blob: e5e1b296293612e0dfccfc8bf169c1883484e408 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
8 *
9 * Firmware is:
Michael Chan49cabf42005-06-06 15:15:17 -070010 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/compiler.h>
24#include <linux/slab.h>
25#include <linux/delay.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020026#include <linux/in.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/workqueue.h>
Michael Chan61487482005-09-05 17:53:19 -070039#include <linux/prefetch.h>
Tobias Klauserf9a5f7d2005-10-29 15:09:26 +020040#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42#include <net/checksum.h>
43
44#include <asm/system.h>
45#include <asm/io.h>
46#include <asm/byteorder.h>
47#include <asm/uaccess.h>
48
49#ifdef CONFIG_SPARC64
50#include <asm/idprom.h>
51#include <asm/oplib.h>
52#include <asm/pbm.h>
53#endif
54
55#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56#define TG3_VLAN_TAG_USED 1
57#else
58#define TG3_VLAN_TAG_USED 0
59#endif
60
61#ifdef NETIF_F_TSO
62#define TG3_TSO_SUPPORT 1
63#else
64#define TG3_TSO_SUPPORT 0
65#endif
66
67#include "tg3.h"
68
69#define DRV_MODULE_NAME "tg3"
70#define PFX DRV_MODULE_NAME ": "
Michael Chan2c6059b2006-06-29 20:16:28 -070071#define DRV_MODULE_VERSION "3.61"
72#define DRV_MODULE_RELDATE "June 29, 2006"
Linus Torvalds1da177e2005-04-16 15:20:36 -070073
74#define TG3_DEF_MAC_MODE 0
75#define TG3_DEF_RX_MODE 0
76#define TG3_DEF_TX_MODE 0
77#define TG3_DEF_MSG_ENABLE \
78 (NETIF_MSG_DRV | \
79 NETIF_MSG_PROBE | \
80 NETIF_MSG_LINK | \
81 NETIF_MSG_TIMER | \
82 NETIF_MSG_IFDOWN | \
83 NETIF_MSG_IFUP | \
84 NETIF_MSG_RX_ERR | \
85 NETIF_MSG_TX_ERR)
86
87/* length of time before we decide the hardware is borked,
88 * and dev->tx_timeout() should be called to fix the problem
89 */
90#define TG3_TX_TIMEOUT (5 * HZ)
91
92/* hardware minimum and maximum for a single frame's data payload */
93#define TG3_MIN_MTU 60
94#define TG3_MAX_MTU(tp) \
Michael Chan0f893dc2005-07-25 12:30:38 -070095 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
97/* These numbers seem to be hard coded in the NIC firmware somehow.
98 * You can't change the ring sizes, but you can change where you place
99 * them in the NIC onboard memory.
100 */
101#define TG3_RX_RING_SIZE 512
102#define TG3_DEF_RX_RING_PENDING 200
103#define TG3_RX_JUMBO_RING_SIZE 256
104#define TG3_DEF_RX_JUMBO_RING_PENDING 100
105
106/* Do not place this n-ring entries value into the tp struct itself,
107 * we really want to expose these constants to GCC so that modulo et
108 * al. operations are done with shifts and masks instead of with
109 * hw multiply/modulo instructions. Another solution would be to
110 * replace things like '% foo' with '& (foo - 1)'.
111 */
112#define TG3_RX_RCB_RING_SIZE(tp) \
113 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
114
115#define TG3_TX_RING_SIZE 512
116#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
117
118#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_RING_SIZE)
120#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121 TG3_RX_JUMBO_RING_SIZE)
122#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123 TG3_RX_RCB_RING_SIZE(tp))
124#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
125 TG3_TX_RING_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126#define TX_BUFFS_AVAIL(TP) \
Michael Chan51b91462005-09-01 17:41:28 -0700127 ((TP)->tx_pending - \
128 (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
130
131#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
132#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
133
134/* minimum number of free TX descriptors required to wake up TX process */
135#define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
136
137/* number of ETHTOOL_GSTATS u64's */
138#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
139
Michael Chan4cafd3f2005-05-29 14:56:34 -0700140#define TG3_NUM_TEST 6
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142static char version[] __devinitdata =
143 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
144
145MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
146MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
147MODULE_LICENSE("GPL");
148MODULE_VERSION(DRV_MODULE_VERSION);
149
150static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
151module_param(tg3_debug, int, 0);
152MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
153
154static struct pci_device_id tg3_pci_tbl[] = {
155 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
156 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
158 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
160 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
162 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
164 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
166 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
168 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
170 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
172 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
174 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
176 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
178 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
180 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
182 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
184 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
186 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
188 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
190 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
192 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
194 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
196 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
198 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
200 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
202 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
204 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
206 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
208 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
210 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
212 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
John W. Linville6e9017a2005-04-21 16:58:56 -0700213 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
John W. Linvilleaf2bcd92005-04-21 16:57:50 -0700214 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Xose Vazquez Perezd8659252005-05-23 12:54:51 -0700215 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
216 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
218 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
220 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
222 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Michael Chand9ab5ad2006-03-20 22:27:35 -0800223 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
224 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
226 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Michael Chanaf36e6b2006-03-23 01:28:06 -0800227 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
228 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
230 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Michael Chan30b6c282006-05-26 17:44:45 -0700231 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786,
232 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Michael Chand9ab5ad2006-03-20 22:27:35 -0800233 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
234 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
235 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
236 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Michael Chana4e2b342005-10-26 15:46:52 -0700237 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
238 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Michael Chand4d2c552006-03-20 17:47:20 -0800239 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
240 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Michael Chana4e2b342005-10-26 15:46:52 -0700241 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
242 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Michael Chand4d2c552006-03-20 17:47:20 -0800243 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
244 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Michael Chan4cf78e42005-07-25 12:29:19 -0700245 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
246 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
247 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
248 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
250 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
251 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
252 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
253 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
254 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
255 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
256 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
257 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
258 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
259 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
260 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
261 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
262 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
263 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
264 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
265 { 0, }
266};
267
268MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
269
270static struct {
271 const char string[ETH_GSTRING_LEN];
272} ethtool_stats_keys[TG3_NUM_STATS] = {
273 { "rx_octets" },
274 { "rx_fragments" },
275 { "rx_ucast_packets" },
276 { "rx_mcast_packets" },
277 { "rx_bcast_packets" },
278 { "rx_fcs_errors" },
279 { "rx_align_errors" },
280 { "rx_xon_pause_rcvd" },
281 { "rx_xoff_pause_rcvd" },
282 { "rx_mac_ctrl_rcvd" },
283 { "rx_xoff_entered" },
284 { "rx_frame_too_long_errors" },
285 { "rx_jabbers" },
286 { "rx_undersize_packets" },
287 { "rx_in_length_errors" },
288 { "rx_out_length_errors" },
289 { "rx_64_or_less_octet_packets" },
290 { "rx_65_to_127_octet_packets" },
291 { "rx_128_to_255_octet_packets" },
292 { "rx_256_to_511_octet_packets" },
293 { "rx_512_to_1023_octet_packets" },
294 { "rx_1024_to_1522_octet_packets" },
295 { "rx_1523_to_2047_octet_packets" },
296 { "rx_2048_to_4095_octet_packets" },
297 { "rx_4096_to_8191_octet_packets" },
298 { "rx_8192_to_9022_octet_packets" },
299
300 { "tx_octets" },
301 { "tx_collisions" },
302
303 { "tx_xon_sent" },
304 { "tx_xoff_sent" },
305 { "tx_flow_control" },
306 { "tx_mac_errors" },
307 { "tx_single_collisions" },
308 { "tx_mult_collisions" },
309 { "tx_deferred" },
310 { "tx_excessive_collisions" },
311 { "tx_late_collisions" },
312 { "tx_collide_2times" },
313 { "tx_collide_3times" },
314 { "tx_collide_4times" },
315 { "tx_collide_5times" },
316 { "tx_collide_6times" },
317 { "tx_collide_7times" },
318 { "tx_collide_8times" },
319 { "tx_collide_9times" },
320 { "tx_collide_10times" },
321 { "tx_collide_11times" },
322 { "tx_collide_12times" },
323 { "tx_collide_13times" },
324 { "tx_collide_14times" },
325 { "tx_collide_15times" },
326 { "tx_ucast_packets" },
327 { "tx_mcast_packets" },
328 { "tx_bcast_packets" },
329 { "tx_carrier_sense_errors" },
330 { "tx_discards" },
331 { "tx_errors" },
332
333 { "dma_writeq_full" },
334 { "dma_write_prioq_full" },
335 { "rxbds_empty" },
336 { "rx_discards" },
337 { "rx_errors" },
338 { "rx_threshold_hit" },
339
340 { "dma_readq_full" },
341 { "dma_read_prioq_full" },
342 { "tx_comp_queue_full" },
343
344 { "ring_set_send_prod_index" },
345 { "ring_status_update" },
346 { "nic_irqs" },
347 { "nic_avoided_irqs" },
348 { "nic_tx_threshold_hit" }
349};
350
Michael Chan4cafd3f2005-05-29 14:56:34 -0700351static struct {
352 const char string[ETH_GSTRING_LEN];
353} ethtool_test_keys[TG3_NUM_TEST] = {
354 { "nvram test (online) " },
355 { "link test (online) " },
356 { "register test (offline)" },
357 { "memory test (offline)" },
358 { "loopback test (offline)" },
359 { "interrupt test (offline)" },
360};
361
Michael Chanb401e9e2005-12-19 16:27:04 -0800362static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
363{
364 writel(val, tp->regs + off);
365}
366
367static u32 tg3_read32(struct tg3 *tp, u32 off)
368{
369 return (readl(tp->regs + off));
370}
371
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
373{
Michael Chan68929142005-08-09 20:17:14 -0700374 unsigned long flags;
375
376 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700377 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
378 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
Michael Chan68929142005-08-09 20:17:14 -0700379 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700380}
381
382static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
383{
384 writel(val, tp->regs + off);
385 readl(tp->regs + off);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386}
387
Michael Chan68929142005-08-09 20:17:14 -0700388static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
389{
390 unsigned long flags;
391 u32 val;
392
393 spin_lock_irqsave(&tp->indirect_lock, flags);
394 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
395 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396 spin_unlock_irqrestore(&tp->indirect_lock, flags);
397 return val;
398}
399
400static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
401{
402 unsigned long flags;
403
404 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
405 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
406 TG3_64BIT_REG_LOW, val);
407 return;
408 }
409 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
410 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
411 TG3_64BIT_REG_LOW, val);
412 return;
413 }
414
415 spin_lock_irqsave(&tp->indirect_lock, flags);
416 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
417 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
418 spin_unlock_irqrestore(&tp->indirect_lock, flags);
419
420 /* In indirect mode when disabling interrupts, we also need
421 * to clear the interrupt bit in the GRC local ctrl register.
422 */
423 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
424 (val == 0x1)) {
425 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
426 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
427 }
428}
429
430static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
431{
432 unsigned long flags;
433 u32 val;
434
435 spin_lock_irqsave(&tp->indirect_lock, flags);
436 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
437 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
438 spin_unlock_irqrestore(&tp->indirect_lock, flags);
439 return val;
440}
441
Michael Chanb401e9e2005-12-19 16:27:04 -0800442/* usec_wait specifies the wait time in usec when writing to certain registers
443 * where it is unsafe to read back the register without some delay.
444 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
445 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
446 */
447static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448{
Michael Chanb401e9e2005-12-19 16:27:04 -0800449 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
450 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
451 /* Non-posted methods */
452 tp->write32(tp, off, val);
453 else {
454 /* Posted method */
455 tg3_write32(tp, off, val);
456 if (usec_wait)
457 udelay(usec_wait);
458 tp->read32(tp, off);
459 }
460 /* Wait again after the read for the posted method to guarantee that
461 * the wait time is met.
462 */
463 if (usec_wait)
464 udelay(usec_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465}
466
Michael Chan09ee9292005-08-09 20:17:00 -0700467static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
468{
469 tp->write32_mbox(tp, off, val);
Michael Chan68929142005-08-09 20:17:14 -0700470 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
471 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
472 tp->read32_mbox(tp, off);
Michael Chan09ee9292005-08-09 20:17:00 -0700473}
474
Michael Chan20094932005-08-09 20:16:32 -0700475static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476{
477 void __iomem *mbox = tp->regs + off;
478 writel(val, mbox);
479 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
480 writel(val, mbox);
481 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
482 readl(mbox);
483}
484
Michael Chan20094932005-08-09 20:16:32 -0700485#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700486#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
Michael Chan20094932005-08-09 20:16:32 -0700487#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
488#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700489#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
Michael Chan20094932005-08-09 20:16:32 -0700490
491#define tw32(reg,val) tp->write32(tp, reg, val)
Michael Chanb401e9e2005-12-19 16:27:04 -0800492#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
493#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
Michael Chan20094932005-08-09 20:16:32 -0700494#define tr32(reg) tp->read32(tp, reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495
496static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
497{
Michael Chan68929142005-08-09 20:17:14 -0700498 unsigned long flags;
499
500 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700501 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
502 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
503 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504
Michael Chanbbadf502006-04-06 21:46:34 -0700505 /* Always leave this as zero. */
506 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
507 } else {
508 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
509 tw32_f(TG3PCI_MEM_WIN_DATA, val);
510
511 /* Always leave this as zero. */
512 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
513 }
Michael Chan68929142005-08-09 20:17:14 -0700514 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515}
516
517static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
518{
Michael Chan68929142005-08-09 20:17:14 -0700519 unsigned long flags;
520
521 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700522 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
523 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
524 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525
Michael Chanbbadf502006-04-06 21:46:34 -0700526 /* Always leave this as zero. */
527 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
528 } else {
529 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
530 *val = tr32(TG3PCI_MEM_WIN_DATA);
531
532 /* Always leave this as zero. */
533 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
534 }
Michael Chan68929142005-08-09 20:17:14 -0700535 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536}
537
538static void tg3_disable_ints(struct tg3 *tp)
539{
540 tw32(TG3PCI_MISC_HOST_CTRL,
541 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700542 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543}
544
545static inline void tg3_cond_int(struct tg3 *tp)
546{
Michael Chan38f38432005-09-05 17:53:32 -0700547 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
548 (tp->hw_status->status & SD_STATUS_UPDATED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
550}
551
552static void tg3_enable_ints(struct tg3 *tp)
553{
Michael Chanbbe832c2005-06-24 20:20:04 -0700554 tp->irq_sync = 0;
555 wmb();
556
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 tw32(TG3PCI_MISC_HOST_CTRL,
558 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700559 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
560 (tp->last_tag << 24));
Michael Chanfcfa0a32006-03-20 22:28:41 -0800561 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
562 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
563 (tp->last_tag << 24));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 tg3_cond_int(tp);
565}
566
Michael Chan04237dd2005-04-25 15:17:17 -0700567static inline unsigned int tg3_has_work(struct tg3 *tp)
568{
569 struct tg3_hw_status *sblk = tp->hw_status;
570 unsigned int work_exists = 0;
571
572 /* check for phy events */
573 if (!(tp->tg3_flags &
574 (TG3_FLAG_USE_LINKCHG_REG |
575 TG3_FLAG_POLL_SERDES))) {
576 if (sblk->status & SD_STATUS_LINK_CHG)
577 work_exists = 1;
578 }
579 /* check for RX/TX work to do */
580 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
581 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
582 work_exists = 1;
583
584 return work_exists;
585}
586
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587/* tg3_restart_ints
Michael Chan04237dd2005-04-25 15:17:17 -0700588 * similar to tg3_enable_ints, but it accurately determines whether there
589 * is new work pending and can return without flushing the PIO write
590 * which reenables interrupts
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 */
592static void tg3_restart_ints(struct tg3 *tp)
593{
David S. Millerfac9b832005-05-18 22:46:34 -0700594 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
595 tp->last_tag << 24);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 mmiowb();
597
David S. Millerfac9b832005-05-18 22:46:34 -0700598 /* When doing tagged status, this work check is unnecessary.
599 * The last_tag we write above tells the chip which piece of
600 * work we've completed.
601 */
602 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
603 tg3_has_work(tp))
Michael Chan04237dd2005-04-25 15:17:17 -0700604 tw32(HOSTCC_MODE, tp->coalesce_mode |
605 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606}
607
608static inline void tg3_netif_stop(struct tg3 *tp)
609{
Michael Chanbbe832c2005-06-24 20:20:04 -0700610 tp->dev->trans_start = jiffies; /* prevent tx timeout */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 netif_poll_disable(tp->dev);
612 netif_tx_disable(tp->dev);
613}
614
615static inline void tg3_netif_start(struct tg3 *tp)
616{
617 netif_wake_queue(tp->dev);
618 /* NOTE: unconditional netif_wake_queue is only appropriate
619 * so long as all callers are assured to have free tx slots
620 * (such as after tg3_init_hw)
621 */
622 netif_poll_enable(tp->dev);
David S. Millerf47c11e2005-06-24 20:18:35 -0700623 tp->hw_status->status |= SD_STATUS_UPDATED;
624 tg3_enable_ints(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625}
626
627static void tg3_switch_clocks(struct tg3 *tp)
628{
629 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
630 u32 orig_clock_ctrl;
631
Michael Chana4e2b342005-10-26 15:46:52 -0700632 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan4cf78e42005-07-25 12:29:19 -0700633 return;
634
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 orig_clock_ctrl = clock_ctrl;
636 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
637 CLOCK_CTRL_CLKRUN_OENABLE |
638 0x1f);
639 tp->pci_clock_ctrl = clock_ctrl;
640
641 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
642 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800643 tw32_wait_f(TG3PCI_CLOCK_CTRL,
644 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 }
646 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800647 tw32_wait_f(TG3PCI_CLOCK_CTRL,
648 clock_ctrl |
649 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
650 40);
651 tw32_wait_f(TG3PCI_CLOCK_CTRL,
652 clock_ctrl | (CLOCK_CTRL_ALTCLK),
653 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 }
Michael Chanb401e9e2005-12-19 16:27:04 -0800655 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656}
657
658#define PHY_BUSY_LOOPS 5000
659
660static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
661{
662 u32 frame_val;
663 unsigned int loops;
664 int ret;
665
666 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
667 tw32_f(MAC_MI_MODE,
668 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
669 udelay(80);
670 }
671
672 *val = 0x0;
673
674 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
675 MI_COM_PHY_ADDR_MASK);
676 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
677 MI_COM_REG_ADDR_MASK);
678 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
679
680 tw32_f(MAC_MI_COM, frame_val);
681
682 loops = PHY_BUSY_LOOPS;
683 while (loops != 0) {
684 udelay(10);
685 frame_val = tr32(MAC_MI_COM);
686
687 if ((frame_val & MI_COM_BUSY) == 0) {
688 udelay(5);
689 frame_val = tr32(MAC_MI_COM);
690 break;
691 }
692 loops -= 1;
693 }
694
695 ret = -EBUSY;
696 if (loops != 0) {
697 *val = frame_val & MI_COM_DATA_MASK;
698 ret = 0;
699 }
700
701 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
702 tw32_f(MAC_MI_MODE, tp->mi_mode);
703 udelay(80);
704 }
705
706 return ret;
707}
708
709static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
710{
711 u32 frame_val;
712 unsigned int loops;
713 int ret;
714
715 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716 tw32_f(MAC_MI_MODE,
717 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718 udelay(80);
719 }
720
721 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
722 MI_COM_PHY_ADDR_MASK);
723 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
724 MI_COM_REG_ADDR_MASK);
725 frame_val |= (val & MI_COM_DATA_MASK);
726 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
727
728 tw32_f(MAC_MI_COM, frame_val);
729
730 loops = PHY_BUSY_LOOPS;
731 while (loops != 0) {
732 udelay(10);
733 frame_val = tr32(MAC_MI_COM);
734 if ((frame_val & MI_COM_BUSY) == 0) {
735 udelay(5);
736 frame_val = tr32(MAC_MI_COM);
737 break;
738 }
739 loops -= 1;
740 }
741
742 ret = -EBUSY;
743 if (loops != 0)
744 ret = 0;
745
746 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
747 tw32_f(MAC_MI_MODE, tp->mi_mode);
748 udelay(80);
749 }
750
751 return ret;
752}
753
754static void tg3_phy_set_wirespeed(struct tg3 *tp)
755{
756 u32 val;
757
758 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
759 return;
760
761 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
762 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
763 tg3_writephy(tp, MII_TG3_AUX_CTRL,
764 (val | (1 << 15) | (1 << 4)));
765}
766
767static int tg3_bmcr_reset(struct tg3 *tp)
768{
769 u32 phy_control;
770 int limit, err;
771
772 /* OK, reset it, and poll the BMCR_RESET bit until it
773 * clears or we time out.
774 */
775 phy_control = BMCR_RESET;
776 err = tg3_writephy(tp, MII_BMCR, phy_control);
777 if (err != 0)
778 return -EBUSY;
779
780 limit = 5000;
781 while (limit--) {
782 err = tg3_readphy(tp, MII_BMCR, &phy_control);
783 if (err != 0)
784 return -EBUSY;
785
786 if ((phy_control & BMCR_RESET) == 0) {
787 udelay(40);
788 break;
789 }
790 udelay(10);
791 }
792 if (limit <= 0)
793 return -EBUSY;
794
795 return 0;
796}
797
798static int tg3_wait_macro_done(struct tg3 *tp)
799{
800 int limit = 100;
801
802 while (limit--) {
803 u32 tmp32;
804
805 if (!tg3_readphy(tp, 0x16, &tmp32)) {
806 if ((tmp32 & 0x1000) == 0)
807 break;
808 }
809 }
810 if (limit <= 0)
811 return -EBUSY;
812
813 return 0;
814}
815
816static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
817{
818 static const u32 test_pat[4][6] = {
819 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
820 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
821 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
822 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
823 };
824 int chan;
825
826 for (chan = 0; chan < 4; chan++) {
827 int i;
828
829 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
830 (chan * 0x2000) | 0x0200);
831 tg3_writephy(tp, 0x16, 0x0002);
832
833 for (i = 0; i < 6; i++)
834 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
835 test_pat[chan][i]);
836
837 tg3_writephy(tp, 0x16, 0x0202);
838 if (tg3_wait_macro_done(tp)) {
839 *resetp = 1;
840 return -EBUSY;
841 }
842
843 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
844 (chan * 0x2000) | 0x0200);
845 tg3_writephy(tp, 0x16, 0x0082);
846 if (tg3_wait_macro_done(tp)) {
847 *resetp = 1;
848 return -EBUSY;
849 }
850
851 tg3_writephy(tp, 0x16, 0x0802);
852 if (tg3_wait_macro_done(tp)) {
853 *resetp = 1;
854 return -EBUSY;
855 }
856
857 for (i = 0; i < 6; i += 2) {
858 u32 low, high;
859
860 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
861 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
862 tg3_wait_macro_done(tp)) {
863 *resetp = 1;
864 return -EBUSY;
865 }
866 low &= 0x7fff;
867 high &= 0x000f;
868 if (low != test_pat[chan][i] ||
869 high != test_pat[chan][i+1]) {
870 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
871 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
872 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
873
874 return -EBUSY;
875 }
876 }
877 }
878
879 return 0;
880}
881
882static int tg3_phy_reset_chanpat(struct tg3 *tp)
883{
884 int chan;
885
886 for (chan = 0; chan < 4; chan++) {
887 int i;
888
889 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
890 (chan * 0x2000) | 0x0200);
891 tg3_writephy(tp, 0x16, 0x0002);
892 for (i = 0; i < 6; i++)
893 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
894 tg3_writephy(tp, 0x16, 0x0202);
895 if (tg3_wait_macro_done(tp))
896 return -EBUSY;
897 }
898
899 return 0;
900}
901
902static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
903{
904 u32 reg32, phy9_orig;
905 int retries, do_phy_reset, err;
906
907 retries = 10;
908 do_phy_reset = 1;
909 do {
910 if (do_phy_reset) {
911 err = tg3_bmcr_reset(tp);
912 if (err)
913 return err;
914 do_phy_reset = 0;
915 }
916
917 /* Disable transmitter and interrupt. */
918 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
919 continue;
920
921 reg32 |= 0x3000;
922 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
923
924 /* Set full-duplex, 1000 mbps. */
925 tg3_writephy(tp, MII_BMCR,
926 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
927
928 /* Set to master mode. */
929 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
930 continue;
931
932 tg3_writephy(tp, MII_TG3_CTRL,
933 (MII_TG3_CTRL_AS_MASTER |
934 MII_TG3_CTRL_ENABLE_AS_MASTER));
935
936 /* Enable SM_DSP_CLOCK and 6dB. */
937 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
938
939 /* Block the PHY control access. */
940 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
941 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
942
943 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
944 if (!err)
945 break;
946 } while (--retries);
947
948 err = tg3_phy_reset_chanpat(tp);
949 if (err)
950 return err;
951
952 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
953 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
954
955 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
956 tg3_writephy(tp, 0x16, 0x0000);
957
958 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
959 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
960 /* Set Extended packet length bit for jumbo frames */
961 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
962 }
963 else {
964 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
965 }
966
967 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
968
969 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
970 reg32 &= ~0x3000;
971 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
972 } else if (!err)
973 err = -EBUSY;
974
975 return err;
976}
977
Michael Chanc8e1e822006-04-29 18:55:17 -0700978static void tg3_link_report(struct tg3 *);
979
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980/* This will reset the tigon3 PHY if there is no valid
981 * link unless the FORCE argument is non-zero.
982 */
983static int tg3_phy_reset(struct tg3 *tp)
984{
985 u32 phy_status;
986 int err;
987
988 err = tg3_readphy(tp, MII_BMSR, &phy_status);
989 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
990 if (err != 0)
991 return -EBUSY;
992
Michael Chanc8e1e822006-04-29 18:55:17 -0700993 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
994 netif_carrier_off(tp->dev);
995 tg3_link_report(tp);
996 }
997
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
999 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1000 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1001 err = tg3_phy_reset_5703_4_5(tp);
1002 if (err)
1003 return err;
1004 goto out;
1005 }
1006
1007 err = tg3_bmcr_reset(tp);
1008 if (err)
1009 return err;
1010
1011out:
1012 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1013 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1014 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1015 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1016 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1017 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1018 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1019 }
1020 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1021 tg3_writephy(tp, 0x1c, 0x8d68);
1022 tg3_writephy(tp, 0x1c, 0x8d68);
1023 }
1024 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1025 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1026 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1027 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1028 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1029 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1030 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1031 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1032 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1033 }
Michael Chanc424cb22006-04-29 18:56:34 -07001034 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1035 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1036 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1037 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1038 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1039 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 /* Set Extended packet length bit (bit 14) on all chips that */
1041 /* support jumbo frames */
1042 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1043 /* Cannot do read-modify-write on 5401 */
1044 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
Michael Chan0f893dc2005-07-25 12:30:38 -07001045 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 u32 phy_reg;
1047
1048 /* Set bit 14 with read-modify-write to preserve other bits */
1049 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1050 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1051 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1052 }
1053
1054 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1055 * jumbo frames transmission.
1056 */
Michael Chan0f893dc2005-07-25 12:30:38 -07001057 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058 u32 phy_reg;
1059
1060 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1061 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1062 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1063 }
1064
1065 tg3_phy_set_wirespeed(tp);
1066 return 0;
1067}
1068
1069static void tg3_frob_aux_power(struct tg3 *tp)
1070{
1071 struct tg3 *tp_peer = tp;
1072
1073 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1074 return;
1075
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001076 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1077 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1078 struct net_device *dev_peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001080 dev_peer = pci_get_drvdata(tp->pdev_peer);
Michael Chanbc1c7562006-03-20 17:48:03 -08001081 /* remove_one() may have been run on the peer. */
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001082 if (!dev_peer)
Michael Chanbc1c7562006-03-20 17:48:03 -08001083 tp_peer = tp;
1084 else
1085 tp_peer = netdev_priv(dev_peer);
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001086 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
1088 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
Michael Chan6921d202005-12-13 21:15:53 -08001089 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1090 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1091 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1093 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
Michael Chanb401e9e2005-12-19 16:27:04 -08001094 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1095 (GRC_LCLCTRL_GPIO_OE0 |
1096 GRC_LCLCTRL_GPIO_OE1 |
1097 GRC_LCLCTRL_GPIO_OE2 |
1098 GRC_LCLCTRL_GPIO_OUTPUT0 |
1099 GRC_LCLCTRL_GPIO_OUTPUT1),
1100 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 } else {
1102 u32 no_gpio2;
Michael Chandc56b7d2005-12-19 16:26:28 -08001103 u32 grc_local_ctrl = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104
1105 if (tp_peer != tp &&
1106 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1107 return;
1108
Michael Chandc56b7d2005-12-19 16:26:28 -08001109 /* Workaround to prevent overdrawing Amps. */
1110 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1111 ASIC_REV_5714) {
1112 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chanb401e9e2005-12-19 16:27:04 -08001113 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1114 grc_local_ctrl, 100);
Michael Chandc56b7d2005-12-19 16:26:28 -08001115 }
1116
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 /* On 5753 and variants, GPIO2 cannot be used. */
1118 no_gpio2 = tp->nic_sram_data_cfg &
1119 NIC_SRAM_DATA_CFG_NO_GPIO2;
1120
Michael Chandc56b7d2005-12-19 16:26:28 -08001121 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 GRC_LCLCTRL_GPIO_OE1 |
1123 GRC_LCLCTRL_GPIO_OE2 |
1124 GRC_LCLCTRL_GPIO_OUTPUT1 |
1125 GRC_LCLCTRL_GPIO_OUTPUT2;
1126 if (no_gpio2) {
1127 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1128 GRC_LCLCTRL_GPIO_OUTPUT2);
1129 }
Michael Chanb401e9e2005-12-19 16:27:04 -08001130 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1131 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132
1133 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1134
Michael Chanb401e9e2005-12-19 16:27:04 -08001135 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1136 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137
1138 if (!no_gpio2) {
1139 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chanb401e9e2005-12-19 16:27:04 -08001140 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1141 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 }
1143 }
1144 } else {
1145 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1146 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1147 if (tp_peer != tp &&
1148 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1149 return;
1150
Michael Chanb401e9e2005-12-19 16:27:04 -08001151 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1152 (GRC_LCLCTRL_GPIO_OE1 |
1153 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154
Michael Chanb401e9e2005-12-19 16:27:04 -08001155 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1156 GRC_LCLCTRL_GPIO_OE1, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157
Michael Chanb401e9e2005-12-19 16:27:04 -08001158 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1159 (GRC_LCLCTRL_GPIO_OE1 |
1160 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 }
1162 }
1163}
1164
1165static int tg3_setup_phy(struct tg3 *, int);
1166
1167#define RESET_KIND_SHUTDOWN 0
1168#define RESET_KIND_INIT 1
1169#define RESET_KIND_SUSPEND 2
1170
1171static void tg3_write_sig_post_reset(struct tg3 *, int);
1172static int tg3_halt_cpu(struct tg3 *, u32);
Michael Chan6921d202005-12-13 21:15:53 -08001173static int tg3_nvram_lock(struct tg3 *);
1174static void tg3_nvram_unlock(struct tg3 *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175
Michael Chan15c3b692006-03-22 01:06:52 -08001176static void tg3_power_down_phy(struct tg3 *tp)
1177{
1178 /* The PHY should not be powered down on some chips because
1179 * of bugs.
1180 */
1181 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1182 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1183 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1184 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1185 return;
1186 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1187}
1188
Michael Chanbc1c7562006-03-20 17:48:03 -08001189static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190{
1191 u32 misc_host_ctrl;
1192 u16 power_control, power_caps;
1193 int pm = tp->pm_cap;
1194
1195 /* Make sure register accesses (indirect or otherwise)
1196 * will function correctly.
1197 */
1198 pci_write_config_dword(tp->pdev,
1199 TG3PCI_MISC_HOST_CTRL,
1200 tp->misc_host_ctrl);
1201
1202 pci_read_config_word(tp->pdev,
1203 pm + PCI_PM_CTRL,
1204 &power_control);
1205 power_control |= PCI_PM_CTRL_PME_STATUS;
1206 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1207 switch (state) {
Michael Chanbc1c7562006-03-20 17:48:03 -08001208 case PCI_D0:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 power_control |= 0;
1210 pci_write_config_word(tp->pdev,
1211 pm + PCI_PM_CTRL,
1212 power_control);
Michael Chan8c6bda12005-04-21 17:09:08 -07001213 udelay(100); /* Delay after power state change */
1214
1215 /* Switch out of Vaux if it is not a LOM */
Michael Chanb401e9e2005-12-19 16:27:04 -08001216 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1217 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218
1219 return 0;
1220
Michael Chanbc1c7562006-03-20 17:48:03 -08001221 case PCI_D1:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 power_control |= 1;
1223 break;
1224
Michael Chanbc1c7562006-03-20 17:48:03 -08001225 case PCI_D2:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 power_control |= 2;
1227 break;
1228
Michael Chanbc1c7562006-03-20 17:48:03 -08001229 case PCI_D3hot:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 power_control |= 3;
1231 break;
1232
1233 default:
1234 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1235 "requested.\n",
1236 tp->dev->name, state);
1237 return -EINVAL;
1238 };
1239
1240 power_control |= PCI_PM_CTRL_PME_ENABLE;
1241
1242 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1243 tw32(TG3PCI_MISC_HOST_CTRL,
1244 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1245
1246 if (tp->link_config.phy_is_low_power == 0) {
1247 tp->link_config.phy_is_low_power = 1;
1248 tp->link_config.orig_speed = tp->link_config.speed;
1249 tp->link_config.orig_duplex = tp->link_config.duplex;
1250 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1251 }
1252
Michael Chan747e8f82005-07-25 12:33:22 -07001253 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 tp->link_config.speed = SPEED_10;
1255 tp->link_config.duplex = DUPLEX_HALF;
1256 tp->link_config.autoneg = AUTONEG_ENABLE;
1257 tg3_setup_phy(tp, 0);
1258 }
1259
Michael Chan6921d202005-12-13 21:15:53 -08001260 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1261 int i;
1262 u32 val;
1263
1264 for (i = 0; i < 200; i++) {
1265 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1266 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1267 break;
1268 msleep(1);
1269 }
1270 }
1271 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1272 WOL_DRV_STATE_SHUTDOWN |
1273 WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1274
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1276
1277 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1278 u32 mac_mode;
1279
1280 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1281 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1282 udelay(40);
1283
1284 mac_mode = MAC_MODE_PORT_MODE_MII;
1285
1286 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1287 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1288 mac_mode |= MAC_MODE_LINK_POLARITY;
1289 } else {
1290 mac_mode = MAC_MODE_PORT_MODE_TBI;
1291 }
1292
John W. Linvillecbf46852005-04-21 17:01:29 -07001293 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 tw32(MAC_LED_CTRL, tp->led_ctrl);
1295
1296 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1297 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1298 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1299
1300 tw32_f(MAC_MODE, mac_mode);
1301 udelay(100);
1302
1303 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1304 udelay(10);
1305 }
1306
1307 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1308 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1309 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1310 u32 base_val;
1311
1312 base_val = tp->pci_clock_ctrl;
1313 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1314 CLOCK_CTRL_TXCLK_DISABLE);
1315
Michael Chanb401e9e2005-12-19 16:27:04 -08001316 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1317 CLOCK_CTRL_PWRDOWN_PLL133, 40);
Michael Chana4e2b342005-10-26 15:46:52 -07001318 } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chan4cf78e42005-07-25 12:29:19 -07001319 /* do nothing */
Michael Chan85e94ce2005-04-21 17:05:28 -07001320 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1322 u32 newbits1, newbits2;
1323
1324 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1325 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1326 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1327 CLOCK_CTRL_TXCLK_DISABLE |
1328 CLOCK_CTRL_ALTCLK);
1329 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1330 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1331 newbits1 = CLOCK_CTRL_625_CORE;
1332 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1333 } else {
1334 newbits1 = CLOCK_CTRL_ALTCLK;
1335 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1336 }
1337
Michael Chanb401e9e2005-12-19 16:27:04 -08001338 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1339 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340
Michael Chanb401e9e2005-12-19 16:27:04 -08001341 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1342 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343
1344 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1345 u32 newbits3;
1346
1347 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1348 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1349 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1350 CLOCK_CTRL_TXCLK_DISABLE |
1351 CLOCK_CTRL_44MHZ_CORE);
1352 } else {
1353 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1354 }
1355
Michael Chanb401e9e2005-12-19 16:27:04 -08001356 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1357 tp->pci_clock_ctrl | newbits3, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 }
1359 }
1360
Michael Chan6921d202005-12-13 21:15:53 -08001361 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1362 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1363 /* Turn off the PHY */
1364 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1365 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1366 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1367 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
Michael Chan15c3b692006-03-22 01:06:52 -08001368 tg3_power_down_phy(tp);
Michael Chan6921d202005-12-13 21:15:53 -08001369 }
1370 }
1371
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 tg3_frob_aux_power(tp);
1373
1374 /* Workaround for unstable PLL clock */
1375 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1376 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1377 u32 val = tr32(0x7d00);
1378
1379 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1380 tw32(0x7d00, val);
Michael Chan6921d202005-12-13 21:15:53 -08001381 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chanec41c7d2006-01-17 02:40:55 -08001382 int err;
1383
1384 err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 tg3_halt_cpu(tp, RX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -08001386 if (!err)
1387 tg3_nvram_unlock(tp);
Michael Chan6921d202005-12-13 21:15:53 -08001388 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 }
1390
Michael Chanbbadf502006-04-06 21:46:34 -07001391 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1392
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 /* Finally, set the new power state. */
1394 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
Michael Chan8c6bda12005-04-21 17:09:08 -07001395 udelay(100); /* Delay after power state change */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 return 0;
1398}
1399
1400static void tg3_link_report(struct tg3 *tp)
1401{
1402 if (!netif_carrier_ok(tp->dev)) {
1403 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1404 } else {
1405 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1406 tp->dev->name,
1407 (tp->link_config.active_speed == SPEED_1000 ?
1408 1000 :
1409 (tp->link_config.active_speed == SPEED_100 ?
1410 100 : 10)),
1411 (tp->link_config.active_duplex == DUPLEX_FULL ?
1412 "full" : "half"));
1413
1414 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1415 "%s for RX.\n",
1416 tp->dev->name,
1417 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1418 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1419 }
1420}
1421
1422static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1423{
1424 u32 new_tg3_flags = 0;
1425 u32 old_rx_mode = tp->rx_mode;
1426 u32 old_tx_mode = tp->tx_mode;
1427
1428 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
Michael Chan747e8f82005-07-25 12:33:22 -07001429
1430 /* Convert 1000BaseX flow control bits to 1000BaseT
1431 * bits before resolving flow control.
1432 */
1433 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1434 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1435 ADVERTISE_PAUSE_ASYM);
1436 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1437
1438 if (local_adv & ADVERTISE_1000XPAUSE)
1439 local_adv |= ADVERTISE_PAUSE_CAP;
1440 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1441 local_adv |= ADVERTISE_PAUSE_ASYM;
1442 if (remote_adv & LPA_1000XPAUSE)
1443 remote_adv |= LPA_PAUSE_CAP;
1444 if (remote_adv & LPA_1000XPAUSE_ASYM)
1445 remote_adv |= LPA_PAUSE_ASYM;
1446 }
1447
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 if (local_adv & ADVERTISE_PAUSE_CAP) {
1449 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1450 if (remote_adv & LPA_PAUSE_CAP)
1451 new_tg3_flags |=
1452 (TG3_FLAG_RX_PAUSE |
1453 TG3_FLAG_TX_PAUSE);
1454 else if (remote_adv & LPA_PAUSE_ASYM)
1455 new_tg3_flags |=
1456 (TG3_FLAG_RX_PAUSE);
1457 } else {
1458 if (remote_adv & LPA_PAUSE_CAP)
1459 new_tg3_flags |=
1460 (TG3_FLAG_RX_PAUSE |
1461 TG3_FLAG_TX_PAUSE);
1462 }
1463 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1464 if ((remote_adv & LPA_PAUSE_CAP) &&
1465 (remote_adv & LPA_PAUSE_ASYM))
1466 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1467 }
1468
1469 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1470 tp->tg3_flags |= new_tg3_flags;
1471 } else {
1472 new_tg3_flags = tp->tg3_flags;
1473 }
1474
1475 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1476 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1477 else
1478 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1479
1480 if (old_rx_mode != tp->rx_mode) {
1481 tw32_f(MAC_RX_MODE, tp->rx_mode);
1482 }
1483
1484 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1485 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1486 else
1487 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1488
1489 if (old_tx_mode != tp->tx_mode) {
1490 tw32_f(MAC_TX_MODE, tp->tx_mode);
1491 }
1492}
1493
1494static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1495{
1496 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1497 case MII_TG3_AUX_STAT_10HALF:
1498 *speed = SPEED_10;
1499 *duplex = DUPLEX_HALF;
1500 break;
1501
1502 case MII_TG3_AUX_STAT_10FULL:
1503 *speed = SPEED_10;
1504 *duplex = DUPLEX_FULL;
1505 break;
1506
1507 case MII_TG3_AUX_STAT_100HALF:
1508 *speed = SPEED_100;
1509 *duplex = DUPLEX_HALF;
1510 break;
1511
1512 case MII_TG3_AUX_STAT_100FULL:
1513 *speed = SPEED_100;
1514 *duplex = DUPLEX_FULL;
1515 break;
1516
1517 case MII_TG3_AUX_STAT_1000HALF:
1518 *speed = SPEED_1000;
1519 *duplex = DUPLEX_HALF;
1520 break;
1521
1522 case MII_TG3_AUX_STAT_1000FULL:
1523 *speed = SPEED_1000;
1524 *duplex = DUPLEX_FULL;
1525 break;
1526
1527 default:
1528 *speed = SPEED_INVALID;
1529 *duplex = DUPLEX_INVALID;
1530 break;
1531 };
1532}
1533
1534static void tg3_phy_copper_begin(struct tg3 *tp)
1535{
1536 u32 new_adv;
1537 int i;
1538
1539 if (tp->link_config.phy_is_low_power) {
1540 /* Entering low power mode. Disable gigabit and
1541 * 100baseT advertisements.
1542 */
1543 tg3_writephy(tp, MII_TG3_CTRL, 0);
1544
1545 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1546 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1547 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1548 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1549
1550 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1551 } else if (tp->link_config.speed == SPEED_INVALID) {
1552 tp->link_config.advertising =
1553 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1554 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1555 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1556 ADVERTISED_Autoneg | ADVERTISED_MII);
1557
1558 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1559 tp->link_config.advertising &=
1560 ~(ADVERTISED_1000baseT_Half |
1561 ADVERTISED_1000baseT_Full);
1562
1563 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1564 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1565 new_adv |= ADVERTISE_10HALF;
1566 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1567 new_adv |= ADVERTISE_10FULL;
1568 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1569 new_adv |= ADVERTISE_100HALF;
1570 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1571 new_adv |= ADVERTISE_100FULL;
1572 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1573
1574 if (tp->link_config.advertising &
1575 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1576 new_adv = 0;
1577 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1578 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1579 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1580 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1581 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1582 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1583 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1584 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1585 MII_TG3_CTRL_ENABLE_AS_MASTER);
1586 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1587 } else {
1588 tg3_writephy(tp, MII_TG3_CTRL, 0);
1589 }
1590 } else {
1591 /* Asking for a specific link mode. */
1592 if (tp->link_config.speed == SPEED_1000) {
1593 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1594 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1595
1596 if (tp->link_config.duplex == DUPLEX_FULL)
1597 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1598 else
1599 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1600 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1601 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1602 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1603 MII_TG3_CTRL_ENABLE_AS_MASTER);
1604 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1605 } else {
1606 tg3_writephy(tp, MII_TG3_CTRL, 0);
1607
1608 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1609 if (tp->link_config.speed == SPEED_100) {
1610 if (tp->link_config.duplex == DUPLEX_FULL)
1611 new_adv |= ADVERTISE_100FULL;
1612 else
1613 new_adv |= ADVERTISE_100HALF;
1614 } else {
1615 if (tp->link_config.duplex == DUPLEX_FULL)
1616 new_adv |= ADVERTISE_10FULL;
1617 else
1618 new_adv |= ADVERTISE_10HALF;
1619 }
1620 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1621 }
1622 }
1623
1624 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1625 tp->link_config.speed != SPEED_INVALID) {
1626 u32 bmcr, orig_bmcr;
1627
1628 tp->link_config.active_speed = tp->link_config.speed;
1629 tp->link_config.active_duplex = tp->link_config.duplex;
1630
1631 bmcr = 0;
1632 switch (tp->link_config.speed) {
1633 default:
1634 case SPEED_10:
1635 break;
1636
1637 case SPEED_100:
1638 bmcr |= BMCR_SPEED100;
1639 break;
1640
1641 case SPEED_1000:
1642 bmcr |= TG3_BMCR_SPEED1000;
1643 break;
1644 };
1645
1646 if (tp->link_config.duplex == DUPLEX_FULL)
1647 bmcr |= BMCR_FULLDPLX;
1648
1649 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1650 (bmcr != orig_bmcr)) {
1651 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1652 for (i = 0; i < 1500; i++) {
1653 u32 tmp;
1654
1655 udelay(10);
1656 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1657 tg3_readphy(tp, MII_BMSR, &tmp))
1658 continue;
1659 if (!(tmp & BMSR_LSTATUS)) {
1660 udelay(40);
1661 break;
1662 }
1663 }
1664 tg3_writephy(tp, MII_BMCR, bmcr);
1665 udelay(40);
1666 }
1667 } else {
1668 tg3_writephy(tp, MII_BMCR,
1669 BMCR_ANENABLE | BMCR_ANRESTART);
1670 }
1671}
1672
1673static int tg3_init_5401phy_dsp(struct tg3 *tp)
1674{
1675 int err;
1676
1677 /* Turn off tap power management. */
1678 /* Set Extended packet length bit */
1679 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1680
1681 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1682 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1683
1684 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1685 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1686
1687 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1688 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1689
1690 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1691 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1692
1693 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1694 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1695
1696 udelay(40);
1697
1698 return err;
1699}
1700
1701static int tg3_copper_is_advertising_all(struct tg3 *tp)
1702{
1703 u32 adv_reg, all_mask;
1704
1705 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1706 return 0;
1707
1708 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1709 ADVERTISE_100HALF | ADVERTISE_100FULL);
1710 if ((adv_reg & all_mask) != all_mask)
1711 return 0;
1712 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1713 u32 tg3_ctrl;
1714
1715 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1716 return 0;
1717
1718 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1719 MII_TG3_CTRL_ADV_1000_FULL);
1720 if ((tg3_ctrl & all_mask) != all_mask)
1721 return 0;
1722 }
1723 return 1;
1724}
1725
1726static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1727{
1728 int current_link_up;
1729 u32 bmsr, dummy;
1730 u16 current_speed;
1731 u8 current_duplex;
1732 int i, err;
1733
1734 tw32(MAC_EVENT, 0);
1735
1736 tw32_f(MAC_STATUS,
1737 (MAC_STATUS_SYNC_CHANGED |
1738 MAC_STATUS_CFG_CHANGED |
1739 MAC_STATUS_MI_COMPLETION |
1740 MAC_STATUS_LNKSTATE_CHANGED));
1741 udelay(40);
1742
1743 tp->mi_mode = MAC_MI_MODE_BASE;
1744 tw32_f(MAC_MI_MODE, tp->mi_mode);
1745 udelay(80);
1746
1747 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1748
1749 /* Some third-party PHYs need to be reset on link going
1750 * down.
1751 */
1752 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1753 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1754 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1755 netif_carrier_ok(tp->dev)) {
1756 tg3_readphy(tp, MII_BMSR, &bmsr);
1757 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1758 !(bmsr & BMSR_LSTATUS))
1759 force_reset = 1;
1760 }
1761 if (force_reset)
1762 tg3_phy_reset(tp);
1763
1764 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1765 tg3_readphy(tp, MII_BMSR, &bmsr);
1766 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1767 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1768 bmsr = 0;
1769
1770 if (!(bmsr & BMSR_LSTATUS)) {
1771 err = tg3_init_5401phy_dsp(tp);
1772 if (err)
1773 return err;
1774
1775 tg3_readphy(tp, MII_BMSR, &bmsr);
1776 for (i = 0; i < 1000; i++) {
1777 udelay(10);
1778 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1779 (bmsr & BMSR_LSTATUS)) {
1780 udelay(40);
1781 break;
1782 }
1783 }
1784
1785 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1786 !(bmsr & BMSR_LSTATUS) &&
1787 tp->link_config.active_speed == SPEED_1000) {
1788 err = tg3_phy_reset(tp);
1789 if (!err)
1790 err = tg3_init_5401phy_dsp(tp);
1791 if (err)
1792 return err;
1793 }
1794 }
1795 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1796 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1797 /* 5701 {A0,B0} CRC bug workaround */
1798 tg3_writephy(tp, 0x15, 0x0a75);
1799 tg3_writephy(tp, 0x1c, 0x8c68);
1800 tg3_writephy(tp, 0x1c, 0x8d68);
1801 tg3_writephy(tp, 0x1c, 0x8c68);
1802 }
1803
1804 /* Clear pending interrupts... */
1805 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1806 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1807
1808 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1809 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1810 else
1811 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1812
1813 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1814 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1815 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1816 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1817 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1818 else
1819 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1820 }
1821
1822 current_link_up = 0;
1823 current_speed = SPEED_INVALID;
1824 current_duplex = DUPLEX_INVALID;
1825
1826 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1827 u32 val;
1828
1829 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1830 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1831 if (!(val & (1 << 10))) {
1832 val |= (1 << 10);
1833 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1834 goto relink;
1835 }
1836 }
1837
1838 bmsr = 0;
1839 for (i = 0; i < 100; i++) {
1840 tg3_readphy(tp, MII_BMSR, &bmsr);
1841 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1842 (bmsr & BMSR_LSTATUS))
1843 break;
1844 udelay(40);
1845 }
1846
1847 if (bmsr & BMSR_LSTATUS) {
1848 u32 aux_stat, bmcr;
1849
1850 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1851 for (i = 0; i < 2000; i++) {
1852 udelay(10);
1853 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1854 aux_stat)
1855 break;
1856 }
1857
1858 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1859 &current_speed,
1860 &current_duplex);
1861
1862 bmcr = 0;
1863 for (i = 0; i < 200; i++) {
1864 tg3_readphy(tp, MII_BMCR, &bmcr);
1865 if (tg3_readphy(tp, MII_BMCR, &bmcr))
1866 continue;
1867 if (bmcr && bmcr != 0x7fff)
1868 break;
1869 udelay(10);
1870 }
1871
1872 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1873 if (bmcr & BMCR_ANENABLE) {
1874 current_link_up = 1;
1875
1876 /* Force autoneg restart if we are exiting
1877 * low power mode.
1878 */
1879 if (!tg3_copper_is_advertising_all(tp))
1880 current_link_up = 0;
1881 } else {
1882 current_link_up = 0;
1883 }
1884 } else {
1885 if (!(bmcr & BMCR_ANENABLE) &&
1886 tp->link_config.speed == current_speed &&
1887 tp->link_config.duplex == current_duplex) {
1888 current_link_up = 1;
1889 } else {
1890 current_link_up = 0;
1891 }
1892 }
1893
1894 tp->link_config.active_speed = current_speed;
1895 tp->link_config.active_duplex = current_duplex;
1896 }
1897
1898 if (current_link_up == 1 &&
1899 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1900 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1901 u32 local_adv, remote_adv;
1902
1903 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1904 local_adv = 0;
1905 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1906
1907 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1908 remote_adv = 0;
1909
1910 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1911
1912 /* If we are not advertising full pause capability,
1913 * something is wrong. Bring the link down and reconfigure.
1914 */
1915 if (local_adv != ADVERTISE_PAUSE_CAP) {
1916 current_link_up = 0;
1917 } else {
1918 tg3_setup_flow_control(tp, local_adv, remote_adv);
1919 }
1920 }
1921relink:
Michael Chan6921d202005-12-13 21:15:53 -08001922 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 u32 tmp;
1924
1925 tg3_phy_copper_begin(tp);
1926
1927 tg3_readphy(tp, MII_BMSR, &tmp);
1928 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1929 (tmp & BMSR_LSTATUS))
1930 current_link_up = 1;
1931 }
1932
1933 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1934 if (current_link_up == 1) {
1935 if (tp->link_config.active_speed == SPEED_100 ||
1936 tp->link_config.active_speed == SPEED_10)
1937 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1938 else
1939 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1940 } else
1941 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1942
1943 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1944 if (tp->link_config.active_duplex == DUPLEX_HALF)
1945 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1946
1947 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1948 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1949 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1950 (current_link_up == 1 &&
1951 tp->link_config.active_speed == SPEED_10))
1952 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1953 } else {
1954 if (current_link_up == 1)
1955 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1956 }
1957
1958 /* ??? Without this setting Netgear GA302T PHY does not
1959 * ??? send/receive packets...
1960 */
1961 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1962 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1963 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1964 tw32_f(MAC_MI_MODE, tp->mi_mode);
1965 udelay(80);
1966 }
1967
1968 tw32_f(MAC_MODE, tp->mac_mode);
1969 udelay(40);
1970
1971 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1972 /* Polled via timer. */
1973 tw32_f(MAC_EVENT, 0);
1974 } else {
1975 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1976 }
1977 udelay(40);
1978
1979 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1980 current_link_up == 1 &&
1981 tp->link_config.active_speed == SPEED_1000 &&
1982 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1983 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1984 udelay(120);
1985 tw32_f(MAC_STATUS,
1986 (MAC_STATUS_SYNC_CHANGED |
1987 MAC_STATUS_CFG_CHANGED));
1988 udelay(40);
1989 tg3_write_mem(tp,
1990 NIC_SRAM_FIRMWARE_MBOX,
1991 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1992 }
1993
1994 if (current_link_up != netif_carrier_ok(tp->dev)) {
1995 if (current_link_up)
1996 netif_carrier_on(tp->dev);
1997 else
1998 netif_carrier_off(tp->dev);
1999 tg3_link_report(tp);
2000 }
2001
2002 return 0;
2003}
2004
2005struct tg3_fiber_aneginfo {
2006 int state;
2007#define ANEG_STATE_UNKNOWN 0
2008#define ANEG_STATE_AN_ENABLE 1
2009#define ANEG_STATE_RESTART_INIT 2
2010#define ANEG_STATE_RESTART 3
2011#define ANEG_STATE_DISABLE_LINK_OK 4
2012#define ANEG_STATE_ABILITY_DETECT_INIT 5
2013#define ANEG_STATE_ABILITY_DETECT 6
2014#define ANEG_STATE_ACK_DETECT_INIT 7
2015#define ANEG_STATE_ACK_DETECT 8
2016#define ANEG_STATE_COMPLETE_ACK_INIT 9
2017#define ANEG_STATE_COMPLETE_ACK 10
2018#define ANEG_STATE_IDLE_DETECT_INIT 11
2019#define ANEG_STATE_IDLE_DETECT 12
2020#define ANEG_STATE_LINK_OK 13
2021#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2022#define ANEG_STATE_NEXT_PAGE_WAIT 15
2023
2024 u32 flags;
2025#define MR_AN_ENABLE 0x00000001
2026#define MR_RESTART_AN 0x00000002
2027#define MR_AN_COMPLETE 0x00000004
2028#define MR_PAGE_RX 0x00000008
2029#define MR_NP_LOADED 0x00000010
2030#define MR_TOGGLE_TX 0x00000020
2031#define MR_LP_ADV_FULL_DUPLEX 0x00000040
2032#define MR_LP_ADV_HALF_DUPLEX 0x00000080
2033#define MR_LP_ADV_SYM_PAUSE 0x00000100
2034#define MR_LP_ADV_ASYM_PAUSE 0x00000200
2035#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2036#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2037#define MR_LP_ADV_NEXT_PAGE 0x00001000
2038#define MR_TOGGLE_RX 0x00002000
2039#define MR_NP_RX 0x00004000
2040
2041#define MR_LINK_OK 0x80000000
2042
2043 unsigned long link_time, cur_time;
2044
2045 u32 ability_match_cfg;
2046 int ability_match_count;
2047
2048 char ability_match, idle_match, ack_match;
2049
2050 u32 txconfig, rxconfig;
2051#define ANEG_CFG_NP 0x00000080
2052#define ANEG_CFG_ACK 0x00000040
2053#define ANEG_CFG_RF2 0x00000020
2054#define ANEG_CFG_RF1 0x00000010
2055#define ANEG_CFG_PS2 0x00000001
2056#define ANEG_CFG_PS1 0x00008000
2057#define ANEG_CFG_HD 0x00004000
2058#define ANEG_CFG_FD 0x00002000
2059#define ANEG_CFG_INVAL 0x00001f06
2060
2061};
2062#define ANEG_OK 0
2063#define ANEG_DONE 1
2064#define ANEG_TIMER_ENAB 2
2065#define ANEG_FAILED -1
2066
2067#define ANEG_STATE_SETTLE_TIME 10000
2068
2069static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2070 struct tg3_fiber_aneginfo *ap)
2071{
2072 unsigned long delta;
2073 u32 rx_cfg_reg;
2074 int ret;
2075
2076 if (ap->state == ANEG_STATE_UNKNOWN) {
2077 ap->rxconfig = 0;
2078 ap->link_time = 0;
2079 ap->cur_time = 0;
2080 ap->ability_match_cfg = 0;
2081 ap->ability_match_count = 0;
2082 ap->ability_match = 0;
2083 ap->idle_match = 0;
2084 ap->ack_match = 0;
2085 }
2086 ap->cur_time++;
2087
2088 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2089 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2090
2091 if (rx_cfg_reg != ap->ability_match_cfg) {
2092 ap->ability_match_cfg = rx_cfg_reg;
2093 ap->ability_match = 0;
2094 ap->ability_match_count = 0;
2095 } else {
2096 if (++ap->ability_match_count > 1) {
2097 ap->ability_match = 1;
2098 ap->ability_match_cfg = rx_cfg_reg;
2099 }
2100 }
2101 if (rx_cfg_reg & ANEG_CFG_ACK)
2102 ap->ack_match = 1;
2103 else
2104 ap->ack_match = 0;
2105
2106 ap->idle_match = 0;
2107 } else {
2108 ap->idle_match = 1;
2109 ap->ability_match_cfg = 0;
2110 ap->ability_match_count = 0;
2111 ap->ability_match = 0;
2112 ap->ack_match = 0;
2113
2114 rx_cfg_reg = 0;
2115 }
2116
2117 ap->rxconfig = rx_cfg_reg;
2118 ret = ANEG_OK;
2119
2120 switch(ap->state) {
2121 case ANEG_STATE_UNKNOWN:
2122 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2123 ap->state = ANEG_STATE_AN_ENABLE;
2124
2125 /* fallthru */
2126 case ANEG_STATE_AN_ENABLE:
2127 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2128 if (ap->flags & MR_AN_ENABLE) {
2129 ap->link_time = 0;
2130 ap->cur_time = 0;
2131 ap->ability_match_cfg = 0;
2132 ap->ability_match_count = 0;
2133 ap->ability_match = 0;
2134 ap->idle_match = 0;
2135 ap->ack_match = 0;
2136
2137 ap->state = ANEG_STATE_RESTART_INIT;
2138 } else {
2139 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2140 }
2141 break;
2142
2143 case ANEG_STATE_RESTART_INIT:
2144 ap->link_time = ap->cur_time;
2145 ap->flags &= ~(MR_NP_LOADED);
2146 ap->txconfig = 0;
2147 tw32(MAC_TX_AUTO_NEG, 0);
2148 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2149 tw32_f(MAC_MODE, tp->mac_mode);
2150 udelay(40);
2151
2152 ret = ANEG_TIMER_ENAB;
2153 ap->state = ANEG_STATE_RESTART;
2154
2155 /* fallthru */
2156 case ANEG_STATE_RESTART:
2157 delta = ap->cur_time - ap->link_time;
2158 if (delta > ANEG_STATE_SETTLE_TIME) {
2159 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2160 } else {
2161 ret = ANEG_TIMER_ENAB;
2162 }
2163 break;
2164
2165 case ANEG_STATE_DISABLE_LINK_OK:
2166 ret = ANEG_DONE;
2167 break;
2168
2169 case ANEG_STATE_ABILITY_DETECT_INIT:
2170 ap->flags &= ~(MR_TOGGLE_TX);
2171 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2172 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2173 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2174 tw32_f(MAC_MODE, tp->mac_mode);
2175 udelay(40);
2176
2177 ap->state = ANEG_STATE_ABILITY_DETECT;
2178 break;
2179
2180 case ANEG_STATE_ABILITY_DETECT:
2181 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2182 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2183 }
2184 break;
2185
2186 case ANEG_STATE_ACK_DETECT_INIT:
2187 ap->txconfig |= ANEG_CFG_ACK;
2188 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2189 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2190 tw32_f(MAC_MODE, tp->mac_mode);
2191 udelay(40);
2192
2193 ap->state = ANEG_STATE_ACK_DETECT;
2194
2195 /* fallthru */
2196 case ANEG_STATE_ACK_DETECT:
2197 if (ap->ack_match != 0) {
2198 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2199 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2200 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2201 } else {
2202 ap->state = ANEG_STATE_AN_ENABLE;
2203 }
2204 } else if (ap->ability_match != 0 &&
2205 ap->rxconfig == 0) {
2206 ap->state = ANEG_STATE_AN_ENABLE;
2207 }
2208 break;
2209
2210 case ANEG_STATE_COMPLETE_ACK_INIT:
2211 if (ap->rxconfig & ANEG_CFG_INVAL) {
2212 ret = ANEG_FAILED;
2213 break;
2214 }
2215 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2216 MR_LP_ADV_HALF_DUPLEX |
2217 MR_LP_ADV_SYM_PAUSE |
2218 MR_LP_ADV_ASYM_PAUSE |
2219 MR_LP_ADV_REMOTE_FAULT1 |
2220 MR_LP_ADV_REMOTE_FAULT2 |
2221 MR_LP_ADV_NEXT_PAGE |
2222 MR_TOGGLE_RX |
2223 MR_NP_RX);
2224 if (ap->rxconfig & ANEG_CFG_FD)
2225 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2226 if (ap->rxconfig & ANEG_CFG_HD)
2227 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2228 if (ap->rxconfig & ANEG_CFG_PS1)
2229 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2230 if (ap->rxconfig & ANEG_CFG_PS2)
2231 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2232 if (ap->rxconfig & ANEG_CFG_RF1)
2233 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2234 if (ap->rxconfig & ANEG_CFG_RF2)
2235 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2236 if (ap->rxconfig & ANEG_CFG_NP)
2237 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2238
2239 ap->link_time = ap->cur_time;
2240
2241 ap->flags ^= (MR_TOGGLE_TX);
2242 if (ap->rxconfig & 0x0008)
2243 ap->flags |= MR_TOGGLE_RX;
2244 if (ap->rxconfig & ANEG_CFG_NP)
2245 ap->flags |= MR_NP_RX;
2246 ap->flags |= MR_PAGE_RX;
2247
2248 ap->state = ANEG_STATE_COMPLETE_ACK;
2249 ret = ANEG_TIMER_ENAB;
2250 break;
2251
2252 case ANEG_STATE_COMPLETE_ACK:
2253 if (ap->ability_match != 0 &&
2254 ap->rxconfig == 0) {
2255 ap->state = ANEG_STATE_AN_ENABLE;
2256 break;
2257 }
2258 delta = ap->cur_time - ap->link_time;
2259 if (delta > ANEG_STATE_SETTLE_TIME) {
2260 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2261 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2262 } else {
2263 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2264 !(ap->flags & MR_NP_RX)) {
2265 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2266 } else {
2267 ret = ANEG_FAILED;
2268 }
2269 }
2270 }
2271 break;
2272
2273 case ANEG_STATE_IDLE_DETECT_INIT:
2274 ap->link_time = ap->cur_time;
2275 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2276 tw32_f(MAC_MODE, tp->mac_mode);
2277 udelay(40);
2278
2279 ap->state = ANEG_STATE_IDLE_DETECT;
2280 ret = ANEG_TIMER_ENAB;
2281 break;
2282
2283 case ANEG_STATE_IDLE_DETECT:
2284 if (ap->ability_match != 0 &&
2285 ap->rxconfig == 0) {
2286 ap->state = ANEG_STATE_AN_ENABLE;
2287 break;
2288 }
2289 delta = ap->cur_time - ap->link_time;
2290 if (delta > ANEG_STATE_SETTLE_TIME) {
2291 /* XXX another gem from the Broadcom driver :( */
2292 ap->state = ANEG_STATE_LINK_OK;
2293 }
2294 break;
2295
2296 case ANEG_STATE_LINK_OK:
2297 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2298 ret = ANEG_DONE;
2299 break;
2300
2301 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2302 /* ??? unimplemented */
2303 break;
2304
2305 case ANEG_STATE_NEXT_PAGE_WAIT:
2306 /* ??? unimplemented */
2307 break;
2308
2309 default:
2310 ret = ANEG_FAILED;
2311 break;
2312 };
2313
2314 return ret;
2315}
2316
2317static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2318{
2319 int res = 0;
2320 struct tg3_fiber_aneginfo aninfo;
2321 int status = ANEG_FAILED;
2322 unsigned int tick;
2323 u32 tmp;
2324
2325 tw32_f(MAC_TX_AUTO_NEG, 0);
2326
2327 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2328 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2329 udelay(40);
2330
2331 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2332 udelay(40);
2333
2334 memset(&aninfo, 0, sizeof(aninfo));
2335 aninfo.flags |= MR_AN_ENABLE;
2336 aninfo.state = ANEG_STATE_UNKNOWN;
2337 aninfo.cur_time = 0;
2338 tick = 0;
2339 while (++tick < 195000) {
2340 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2341 if (status == ANEG_DONE || status == ANEG_FAILED)
2342 break;
2343
2344 udelay(1);
2345 }
2346
2347 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2348 tw32_f(MAC_MODE, tp->mac_mode);
2349 udelay(40);
2350
2351 *flags = aninfo.flags;
2352
2353 if (status == ANEG_DONE &&
2354 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2355 MR_LP_ADV_FULL_DUPLEX)))
2356 res = 1;
2357
2358 return res;
2359}
2360
2361static void tg3_init_bcm8002(struct tg3 *tp)
2362{
2363 u32 mac_status = tr32(MAC_STATUS);
2364 int i;
2365
2366 /* Reset when initting first time or we have a link. */
2367 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2368 !(mac_status & MAC_STATUS_PCS_SYNCED))
2369 return;
2370
2371 /* Set PLL lock range. */
2372 tg3_writephy(tp, 0x16, 0x8007);
2373
2374 /* SW reset */
2375 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2376
2377 /* Wait for reset to complete. */
2378 /* XXX schedule_timeout() ... */
2379 for (i = 0; i < 500; i++)
2380 udelay(10);
2381
2382 /* Config mode; select PMA/Ch 1 regs. */
2383 tg3_writephy(tp, 0x10, 0x8411);
2384
2385 /* Enable auto-lock and comdet, select txclk for tx. */
2386 tg3_writephy(tp, 0x11, 0x0a10);
2387
2388 tg3_writephy(tp, 0x18, 0x00a0);
2389 tg3_writephy(tp, 0x16, 0x41ff);
2390
2391 /* Assert and deassert POR. */
2392 tg3_writephy(tp, 0x13, 0x0400);
2393 udelay(40);
2394 tg3_writephy(tp, 0x13, 0x0000);
2395
2396 tg3_writephy(tp, 0x11, 0x0a50);
2397 udelay(40);
2398 tg3_writephy(tp, 0x11, 0x0a10);
2399
2400 /* Wait for signal to stabilize */
2401 /* XXX schedule_timeout() ... */
2402 for (i = 0; i < 15000; i++)
2403 udelay(10);
2404
2405 /* Deselect the channel register so we can read the PHYID
2406 * later.
2407 */
2408 tg3_writephy(tp, 0x10, 0x8011);
2409}
2410
2411static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2412{
2413 u32 sg_dig_ctrl, sg_dig_status;
2414 u32 serdes_cfg, expected_sg_dig_ctrl;
2415 int workaround, port_a;
2416 int current_link_up;
2417
2418 serdes_cfg = 0;
2419 expected_sg_dig_ctrl = 0;
2420 workaround = 0;
2421 port_a = 1;
2422 current_link_up = 0;
2423
2424 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2425 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2426 workaround = 1;
2427 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2428 port_a = 0;
2429
2430 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2431 /* preserve bits 20-23 for voltage regulator */
2432 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2433 }
2434
2435 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2436
2437 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2438 if (sg_dig_ctrl & (1 << 31)) {
2439 if (workaround) {
2440 u32 val = serdes_cfg;
2441
2442 if (port_a)
2443 val |= 0xc010000;
2444 else
2445 val |= 0x4010000;
2446 tw32_f(MAC_SERDES_CFG, val);
2447 }
2448 tw32_f(SG_DIG_CTRL, 0x01388400);
2449 }
2450 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2451 tg3_setup_flow_control(tp, 0, 0);
2452 current_link_up = 1;
2453 }
2454 goto out;
2455 }
2456
2457 /* Want auto-negotiation. */
2458 expected_sg_dig_ctrl = 0x81388400;
2459
2460 /* Pause capability */
2461 expected_sg_dig_ctrl |= (1 << 11);
2462
2463 /* Asymettric pause */
2464 expected_sg_dig_ctrl |= (1 << 12);
2465
2466 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2467 if (workaround)
2468 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2469 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2470 udelay(5);
2471 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2472
2473 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2474 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2475 MAC_STATUS_SIGNAL_DET)) {
2476 int i;
2477
2478 /* Giver time to negotiate (~200ms) */
2479 for (i = 0; i < 40000; i++) {
2480 sg_dig_status = tr32(SG_DIG_STATUS);
2481 if (sg_dig_status & (0x3))
2482 break;
2483 udelay(5);
2484 }
2485 mac_status = tr32(MAC_STATUS);
2486
2487 if ((sg_dig_status & (1 << 1)) &&
2488 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2489 u32 local_adv, remote_adv;
2490
2491 local_adv = ADVERTISE_PAUSE_CAP;
2492 remote_adv = 0;
2493 if (sg_dig_status & (1 << 19))
2494 remote_adv |= LPA_PAUSE_CAP;
2495 if (sg_dig_status & (1 << 20))
2496 remote_adv |= LPA_PAUSE_ASYM;
2497
2498 tg3_setup_flow_control(tp, local_adv, remote_adv);
2499 current_link_up = 1;
2500 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2501 } else if (!(sg_dig_status & (1 << 1))) {
2502 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2503 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2504 else {
2505 if (workaround) {
2506 u32 val = serdes_cfg;
2507
2508 if (port_a)
2509 val |= 0xc010000;
2510 else
2511 val |= 0x4010000;
2512
2513 tw32_f(MAC_SERDES_CFG, val);
2514 }
2515
2516 tw32_f(SG_DIG_CTRL, 0x01388400);
2517 udelay(40);
2518
2519 /* Link parallel detection - link is up */
2520 /* only if we have PCS_SYNC and not */
2521 /* receiving config code words */
2522 mac_status = tr32(MAC_STATUS);
2523 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2524 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2525 tg3_setup_flow_control(tp, 0, 0);
2526 current_link_up = 1;
2527 }
2528 }
2529 }
2530 }
2531
2532out:
2533 return current_link_up;
2534}
2535
2536static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2537{
2538 int current_link_up = 0;
2539
2540 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2541 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2542 goto out;
2543 }
2544
2545 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2546 u32 flags;
2547 int i;
2548
2549 if (fiber_autoneg(tp, &flags)) {
2550 u32 local_adv, remote_adv;
2551
2552 local_adv = ADVERTISE_PAUSE_CAP;
2553 remote_adv = 0;
2554 if (flags & MR_LP_ADV_SYM_PAUSE)
2555 remote_adv |= LPA_PAUSE_CAP;
2556 if (flags & MR_LP_ADV_ASYM_PAUSE)
2557 remote_adv |= LPA_PAUSE_ASYM;
2558
2559 tg3_setup_flow_control(tp, local_adv, remote_adv);
2560
2561 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2562 current_link_up = 1;
2563 }
2564 for (i = 0; i < 30; i++) {
2565 udelay(20);
2566 tw32_f(MAC_STATUS,
2567 (MAC_STATUS_SYNC_CHANGED |
2568 MAC_STATUS_CFG_CHANGED));
2569 udelay(40);
2570 if ((tr32(MAC_STATUS) &
2571 (MAC_STATUS_SYNC_CHANGED |
2572 MAC_STATUS_CFG_CHANGED)) == 0)
2573 break;
2574 }
2575
2576 mac_status = tr32(MAC_STATUS);
2577 if (current_link_up == 0 &&
2578 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2579 !(mac_status & MAC_STATUS_RCVD_CFG))
2580 current_link_up = 1;
2581 } else {
2582 /* Forcing 1000FD link up. */
2583 current_link_up = 1;
2584 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2585
2586 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2587 udelay(40);
2588 }
2589
2590out:
2591 return current_link_up;
2592}
2593
2594static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2595{
2596 u32 orig_pause_cfg;
2597 u16 orig_active_speed;
2598 u8 orig_active_duplex;
2599 u32 mac_status;
2600 int current_link_up;
2601 int i;
2602
2603 orig_pause_cfg =
2604 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2605 TG3_FLAG_TX_PAUSE));
2606 orig_active_speed = tp->link_config.active_speed;
2607 orig_active_duplex = tp->link_config.active_duplex;
2608
2609 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2610 netif_carrier_ok(tp->dev) &&
2611 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2612 mac_status = tr32(MAC_STATUS);
2613 mac_status &= (MAC_STATUS_PCS_SYNCED |
2614 MAC_STATUS_SIGNAL_DET |
2615 MAC_STATUS_CFG_CHANGED |
2616 MAC_STATUS_RCVD_CFG);
2617 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2618 MAC_STATUS_SIGNAL_DET)) {
2619 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2620 MAC_STATUS_CFG_CHANGED));
2621 return 0;
2622 }
2623 }
2624
2625 tw32_f(MAC_TX_AUTO_NEG, 0);
2626
2627 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2628 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2629 tw32_f(MAC_MODE, tp->mac_mode);
2630 udelay(40);
2631
2632 if (tp->phy_id == PHY_ID_BCM8002)
2633 tg3_init_bcm8002(tp);
2634
2635 /* Enable link change event even when serdes polling. */
2636 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2637 udelay(40);
2638
2639 current_link_up = 0;
2640 mac_status = tr32(MAC_STATUS);
2641
2642 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2643 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2644 else
2645 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2646
2647 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2648 tw32_f(MAC_MODE, tp->mac_mode);
2649 udelay(40);
2650
2651 tp->hw_status->status =
2652 (SD_STATUS_UPDATED |
2653 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2654
2655 for (i = 0; i < 100; i++) {
2656 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2657 MAC_STATUS_CFG_CHANGED));
2658 udelay(5);
2659 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2660 MAC_STATUS_CFG_CHANGED)) == 0)
2661 break;
2662 }
2663
2664 mac_status = tr32(MAC_STATUS);
2665 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2666 current_link_up = 0;
2667 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2668 tw32_f(MAC_MODE, (tp->mac_mode |
2669 MAC_MODE_SEND_CONFIGS));
2670 udelay(1);
2671 tw32_f(MAC_MODE, tp->mac_mode);
2672 }
2673 }
2674
2675 if (current_link_up == 1) {
2676 tp->link_config.active_speed = SPEED_1000;
2677 tp->link_config.active_duplex = DUPLEX_FULL;
2678 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2679 LED_CTRL_LNKLED_OVERRIDE |
2680 LED_CTRL_1000MBPS_ON));
2681 } else {
2682 tp->link_config.active_speed = SPEED_INVALID;
2683 tp->link_config.active_duplex = DUPLEX_INVALID;
2684 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2685 LED_CTRL_LNKLED_OVERRIDE |
2686 LED_CTRL_TRAFFIC_OVERRIDE));
2687 }
2688
2689 if (current_link_up != netif_carrier_ok(tp->dev)) {
2690 if (current_link_up)
2691 netif_carrier_on(tp->dev);
2692 else
2693 netif_carrier_off(tp->dev);
2694 tg3_link_report(tp);
2695 } else {
2696 u32 now_pause_cfg =
2697 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2698 TG3_FLAG_TX_PAUSE);
2699 if (orig_pause_cfg != now_pause_cfg ||
2700 orig_active_speed != tp->link_config.active_speed ||
2701 orig_active_duplex != tp->link_config.active_duplex)
2702 tg3_link_report(tp);
2703 }
2704
2705 return 0;
2706}
2707
Michael Chan747e8f82005-07-25 12:33:22 -07002708static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2709{
2710 int current_link_up, err = 0;
2711 u32 bmsr, bmcr;
2712 u16 current_speed;
2713 u8 current_duplex;
2714
2715 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2716 tw32_f(MAC_MODE, tp->mac_mode);
2717 udelay(40);
2718
2719 tw32(MAC_EVENT, 0);
2720
2721 tw32_f(MAC_STATUS,
2722 (MAC_STATUS_SYNC_CHANGED |
2723 MAC_STATUS_CFG_CHANGED |
2724 MAC_STATUS_MI_COMPLETION |
2725 MAC_STATUS_LNKSTATE_CHANGED));
2726 udelay(40);
2727
2728 if (force_reset)
2729 tg3_phy_reset(tp);
2730
2731 current_link_up = 0;
2732 current_speed = SPEED_INVALID;
2733 current_duplex = DUPLEX_INVALID;
2734
2735 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2736 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08002737 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2738 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2739 bmsr |= BMSR_LSTATUS;
2740 else
2741 bmsr &= ~BMSR_LSTATUS;
2742 }
Michael Chan747e8f82005-07-25 12:33:22 -07002743
2744 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2745
2746 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2747 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2748 /* do nothing, just check for link up at the end */
2749 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2750 u32 adv, new_adv;
2751
2752 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2753 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2754 ADVERTISE_1000XPAUSE |
2755 ADVERTISE_1000XPSE_ASYM |
2756 ADVERTISE_SLCT);
2757
2758 /* Always advertise symmetric PAUSE just like copper */
2759 new_adv |= ADVERTISE_1000XPAUSE;
2760
2761 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2762 new_adv |= ADVERTISE_1000XHALF;
2763 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2764 new_adv |= ADVERTISE_1000XFULL;
2765
2766 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2767 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2768 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2769 tg3_writephy(tp, MII_BMCR, bmcr);
2770
2771 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2772 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2773 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2774
2775 return err;
2776 }
2777 } else {
2778 u32 new_bmcr;
2779
2780 bmcr &= ~BMCR_SPEED1000;
2781 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2782
2783 if (tp->link_config.duplex == DUPLEX_FULL)
2784 new_bmcr |= BMCR_FULLDPLX;
2785
2786 if (new_bmcr != bmcr) {
2787 /* BMCR_SPEED1000 is a reserved bit that needs
2788 * to be set on write.
2789 */
2790 new_bmcr |= BMCR_SPEED1000;
2791
2792 /* Force a linkdown */
2793 if (netif_carrier_ok(tp->dev)) {
2794 u32 adv;
2795
2796 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2797 adv &= ~(ADVERTISE_1000XFULL |
2798 ADVERTISE_1000XHALF |
2799 ADVERTISE_SLCT);
2800 tg3_writephy(tp, MII_ADVERTISE, adv);
2801 tg3_writephy(tp, MII_BMCR, bmcr |
2802 BMCR_ANRESTART |
2803 BMCR_ANENABLE);
2804 udelay(10);
2805 netif_carrier_off(tp->dev);
2806 }
2807 tg3_writephy(tp, MII_BMCR, new_bmcr);
2808 bmcr = new_bmcr;
2809 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2810 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08002811 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2812 ASIC_REV_5714) {
2813 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2814 bmsr |= BMSR_LSTATUS;
2815 else
2816 bmsr &= ~BMSR_LSTATUS;
2817 }
Michael Chan747e8f82005-07-25 12:33:22 -07002818 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2819 }
2820 }
2821
2822 if (bmsr & BMSR_LSTATUS) {
2823 current_speed = SPEED_1000;
2824 current_link_up = 1;
2825 if (bmcr & BMCR_FULLDPLX)
2826 current_duplex = DUPLEX_FULL;
2827 else
2828 current_duplex = DUPLEX_HALF;
2829
2830 if (bmcr & BMCR_ANENABLE) {
2831 u32 local_adv, remote_adv, common;
2832
2833 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2834 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2835 common = local_adv & remote_adv;
2836 if (common & (ADVERTISE_1000XHALF |
2837 ADVERTISE_1000XFULL)) {
2838 if (common & ADVERTISE_1000XFULL)
2839 current_duplex = DUPLEX_FULL;
2840 else
2841 current_duplex = DUPLEX_HALF;
2842
2843 tg3_setup_flow_control(tp, local_adv,
2844 remote_adv);
2845 }
2846 else
2847 current_link_up = 0;
2848 }
2849 }
2850
2851 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2852 if (tp->link_config.active_duplex == DUPLEX_HALF)
2853 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2854
2855 tw32_f(MAC_MODE, tp->mac_mode);
2856 udelay(40);
2857
2858 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2859
2860 tp->link_config.active_speed = current_speed;
2861 tp->link_config.active_duplex = current_duplex;
2862
2863 if (current_link_up != netif_carrier_ok(tp->dev)) {
2864 if (current_link_up)
2865 netif_carrier_on(tp->dev);
2866 else {
2867 netif_carrier_off(tp->dev);
2868 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2869 }
2870 tg3_link_report(tp);
2871 }
2872 return err;
2873}
2874
2875static void tg3_serdes_parallel_detect(struct tg3 *tp)
2876{
2877 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2878 /* Give autoneg time to complete. */
2879 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2880 return;
2881 }
2882 if (!netif_carrier_ok(tp->dev) &&
2883 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2884 u32 bmcr;
2885
2886 tg3_readphy(tp, MII_BMCR, &bmcr);
2887 if (bmcr & BMCR_ANENABLE) {
2888 u32 phy1, phy2;
2889
2890 /* Select shadow register 0x1f */
2891 tg3_writephy(tp, 0x1c, 0x7c00);
2892 tg3_readphy(tp, 0x1c, &phy1);
2893
2894 /* Select expansion interrupt status register */
2895 tg3_writephy(tp, 0x17, 0x0f01);
2896 tg3_readphy(tp, 0x15, &phy2);
2897 tg3_readphy(tp, 0x15, &phy2);
2898
2899 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2900 /* We have signal detect and not receiving
2901 * config code words, link is up by parallel
2902 * detection.
2903 */
2904
2905 bmcr &= ~BMCR_ANENABLE;
2906 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2907 tg3_writephy(tp, MII_BMCR, bmcr);
2908 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2909 }
2910 }
2911 }
2912 else if (netif_carrier_ok(tp->dev) &&
2913 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2914 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2915 u32 phy2;
2916
2917 /* Select expansion interrupt status register */
2918 tg3_writephy(tp, 0x17, 0x0f01);
2919 tg3_readphy(tp, 0x15, &phy2);
2920 if (phy2 & 0x20) {
2921 u32 bmcr;
2922
2923 /* Config code words received, turn on autoneg. */
2924 tg3_readphy(tp, MII_BMCR, &bmcr);
2925 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2926
2927 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2928
2929 }
2930 }
2931}
2932
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2934{
2935 int err;
2936
2937 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2938 err = tg3_setup_fiber_phy(tp, force_reset);
Michael Chan747e8f82005-07-25 12:33:22 -07002939 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2940 err = tg3_setup_fiber_mii_phy(tp, force_reset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941 } else {
2942 err = tg3_setup_copper_phy(tp, force_reset);
2943 }
2944
2945 if (tp->link_config.active_speed == SPEED_1000 &&
2946 tp->link_config.active_duplex == DUPLEX_HALF)
2947 tw32(MAC_TX_LENGTHS,
2948 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2949 (6 << TX_LENGTHS_IPG_SHIFT) |
2950 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2951 else
2952 tw32(MAC_TX_LENGTHS,
2953 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2954 (6 << TX_LENGTHS_IPG_SHIFT) |
2955 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2956
2957 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2958 if (netif_carrier_ok(tp->dev)) {
2959 tw32(HOSTCC_STAT_COAL_TICKS,
David S. Miller15f98502005-05-18 22:49:26 -07002960 tp->coal.stats_block_coalesce_usecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961 } else {
2962 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2963 }
2964 }
2965
2966 return err;
2967}
2968
Michael Chandf3e6542006-05-26 17:48:07 -07002969/* This is called whenever we suspect that the system chipset is re-
2970 * ordering the sequence of MMIO to the tx send mailbox. The symptom
2971 * is bogus tx completions. We try to recover by setting the
2972 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
2973 * in the workqueue.
2974 */
2975static void tg3_tx_recover(struct tg3 *tp)
2976{
2977 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
2978 tp->write32_tx_mbox == tg3_write_indirect_mbox);
2979
2980 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
2981 "mapped I/O cycles to the network device, attempting to "
2982 "recover. Please report the problem to the driver maintainer "
2983 "and include system chipset information.\n", tp->dev->name);
2984
2985 spin_lock(&tp->lock);
Michael Chandf3e6542006-05-26 17:48:07 -07002986 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
Michael Chandf3e6542006-05-26 17:48:07 -07002987 spin_unlock(&tp->lock);
2988}
2989
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990/* Tigon3 never reports partial packet sends. So we do not
2991 * need special logic to handle SKBs that have not had all
2992 * of their frags sent yet, like SunGEM does.
2993 */
2994static void tg3_tx(struct tg3 *tp)
2995{
2996 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2997 u32 sw_idx = tp->tx_cons;
2998
2999 while (sw_idx != hw_idx) {
3000 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3001 struct sk_buff *skb = ri->skb;
Michael Chandf3e6542006-05-26 17:48:07 -07003002 int i, tx_bug = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003003
Michael Chandf3e6542006-05-26 17:48:07 -07003004 if (unlikely(skb == NULL)) {
3005 tg3_tx_recover(tp);
3006 return;
3007 }
3008
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009 pci_unmap_single(tp->pdev,
3010 pci_unmap_addr(ri, mapping),
3011 skb_headlen(skb),
3012 PCI_DMA_TODEVICE);
3013
3014 ri->skb = NULL;
3015
3016 sw_idx = NEXT_TX(sw_idx);
3017
3018 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003019 ri = &tp->tx_buffers[sw_idx];
Michael Chandf3e6542006-05-26 17:48:07 -07003020 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3021 tx_bug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003022
3023 pci_unmap_page(tp->pdev,
3024 pci_unmap_addr(ri, mapping),
3025 skb_shinfo(skb)->frags[i].size,
3026 PCI_DMA_TODEVICE);
3027
3028 sw_idx = NEXT_TX(sw_idx);
3029 }
3030
David S. Millerf47c11e2005-06-24 20:18:35 -07003031 dev_kfree_skb(skb);
Michael Chandf3e6542006-05-26 17:48:07 -07003032
3033 if (unlikely(tx_bug)) {
3034 tg3_tx_recover(tp);
3035 return;
3036 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037 }
3038
3039 tp->tx_cons = sw_idx;
3040
Michael Chan51b91462005-09-01 17:41:28 -07003041 if (unlikely(netif_queue_stopped(tp->dev))) {
3042 spin_lock(&tp->tx_lock);
3043 if (netif_queue_stopped(tp->dev) &&
3044 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
3045 netif_wake_queue(tp->dev);
3046 spin_unlock(&tp->tx_lock);
3047 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003048}
3049
3050/* Returns size of skb allocated or < 0 on error.
3051 *
3052 * We only need to fill in the address because the other members
3053 * of the RX descriptor are invariant, see tg3_init_rings.
3054 *
3055 * Note the purposeful assymetry of cpu vs. chip accesses. For
3056 * posting buffers we only dirty the first cache line of the RX
3057 * descriptor (containing the address). Whereas for the RX status
3058 * buffers the cpu only reads the last cacheline of the RX descriptor
3059 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3060 */
3061static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3062 int src_idx, u32 dest_idx_unmasked)
3063{
3064 struct tg3_rx_buffer_desc *desc;
3065 struct ring_info *map, *src_map;
3066 struct sk_buff *skb;
3067 dma_addr_t mapping;
3068 int skb_size, dest_idx;
3069
3070 src_map = NULL;
3071 switch (opaque_key) {
3072 case RXD_OPAQUE_RING_STD:
3073 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3074 desc = &tp->rx_std[dest_idx];
3075 map = &tp->rx_std_buffers[dest_idx];
3076 if (src_idx >= 0)
3077 src_map = &tp->rx_std_buffers[src_idx];
Michael Chan7e72aad2005-07-25 12:31:17 -07003078 skb_size = tp->rx_pkt_buf_sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003079 break;
3080
3081 case RXD_OPAQUE_RING_JUMBO:
3082 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3083 desc = &tp->rx_jumbo[dest_idx];
3084 map = &tp->rx_jumbo_buffers[dest_idx];
3085 if (src_idx >= 0)
3086 src_map = &tp->rx_jumbo_buffers[src_idx];
3087 skb_size = RX_JUMBO_PKT_BUF_SZ;
3088 break;
3089
3090 default:
3091 return -EINVAL;
3092 };
3093
3094 /* Do not overwrite any of the map or rp information
3095 * until we are sure we can commit to a new buffer.
3096 *
3097 * Callers depend upon this behavior and assume that
3098 * we leave everything unchanged if we fail.
3099 */
3100 skb = dev_alloc_skb(skb_size);
3101 if (skb == NULL)
3102 return -ENOMEM;
3103
3104 skb->dev = tp->dev;
3105 skb_reserve(skb, tp->rx_offset);
3106
3107 mapping = pci_map_single(tp->pdev, skb->data,
3108 skb_size - tp->rx_offset,
3109 PCI_DMA_FROMDEVICE);
3110
3111 map->skb = skb;
3112 pci_unmap_addr_set(map, mapping, mapping);
3113
3114 if (src_map != NULL)
3115 src_map->skb = NULL;
3116
3117 desc->addr_hi = ((u64)mapping >> 32);
3118 desc->addr_lo = ((u64)mapping & 0xffffffff);
3119
3120 return skb_size;
3121}
3122
3123/* We only need to move over in the address because the other
3124 * members of the RX descriptor are invariant. See notes above
3125 * tg3_alloc_rx_skb for full details.
3126 */
3127static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3128 int src_idx, u32 dest_idx_unmasked)
3129{
3130 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3131 struct ring_info *src_map, *dest_map;
3132 int dest_idx;
3133
3134 switch (opaque_key) {
3135 case RXD_OPAQUE_RING_STD:
3136 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3137 dest_desc = &tp->rx_std[dest_idx];
3138 dest_map = &tp->rx_std_buffers[dest_idx];
3139 src_desc = &tp->rx_std[src_idx];
3140 src_map = &tp->rx_std_buffers[src_idx];
3141 break;
3142
3143 case RXD_OPAQUE_RING_JUMBO:
3144 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3145 dest_desc = &tp->rx_jumbo[dest_idx];
3146 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3147 src_desc = &tp->rx_jumbo[src_idx];
3148 src_map = &tp->rx_jumbo_buffers[src_idx];
3149 break;
3150
3151 default:
3152 return;
3153 };
3154
3155 dest_map->skb = src_map->skb;
3156 pci_unmap_addr_set(dest_map, mapping,
3157 pci_unmap_addr(src_map, mapping));
3158 dest_desc->addr_hi = src_desc->addr_hi;
3159 dest_desc->addr_lo = src_desc->addr_lo;
3160
3161 src_map->skb = NULL;
3162}
3163
3164#if TG3_VLAN_TAG_USED
3165static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3166{
3167 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3168}
3169#endif
3170
3171/* The RX ring scheme is composed of multiple rings which post fresh
3172 * buffers to the chip, and one special ring the chip uses to report
3173 * status back to the host.
3174 *
3175 * The special ring reports the status of received packets to the
3176 * host. The chip does not write into the original descriptor the
3177 * RX buffer was obtained from. The chip simply takes the original
3178 * descriptor as provided by the host, updates the status and length
3179 * field, then writes this into the next status ring entry.
3180 *
3181 * Each ring the host uses to post buffers to the chip is described
3182 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3183 * it is first placed into the on-chip ram. When the packet's length
3184 * is known, it walks down the TG3_BDINFO entries to select the ring.
3185 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3186 * which is within the range of the new packet's length is chosen.
3187 *
3188 * The "separate ring for rx status" scheme may sound queer, but it makes
3189 * sense from a cache coherency perspective. If only the host writes
3190 * to the buffer post rings, and only the chip writes to the rx status
3191 * rings, then cache lines never move beyond shared-modified state.
3192 * If both the host and chip were to write into the same ring, cache line
3193 * eviction could occur since both entities want it in an exclusive state.
3194 */
3195static int tg3_rx(struct tg3 *tp, int budget)
3196{
Michael Chanf92905d2006-06-29 20:14:29 -07003197 u32 work_mask, rx_std_posted = 0;
Michael Chan483ba502005-04-25 15:14:03 -07003198 u32 sw_idx = tp->rx_rcb_ptr;
3199 u16 hw_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003200 int received;
3201
3202 hw_idx = tp->hw_status->idx[0].rx_producer;
3203 /*
3204 * We need to order the read of hw_idx and the read of
3205 * the opaque cookie.
3206 */
3207 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003208 work_mask = 0;
3209 received = 0;
3210 while (sw_idx != hw_idx && budget > 0) {
3211 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3212 unsigned int len;
3213 struct sk_buff *skb;
3214 dma_addr_t dma_addr;
3215 u32 opaque_key, desc_idx, *post_ptr;
3216
3217 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3218 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3219 if (opaque_key == RXD_OPAQUE_RING_STD) {
3220 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3221 mapping);
3222 skb = tp->rx_std_buffers[desc_idx].skb;
3223 post_ptr = &tp->rx_std_ptr;
Michael Chanf92905d2006-06-29 20:14:29 -07003224 rx_std_posted++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003225 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3226 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3227 mapping);
3228 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3229 post_ptr = &tp->rx_jumbo_ptr;
3230 }
3231 else {
3232 goto next_pkt_nopost;
3233 }
3234
3235 work_mask |= opaque_key;
3236
3237 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3238 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3239 drop_it:
3240 tg3_recycle_rx(tp, opaque_key,
3241 desc_idx, *post_ptr);
3242 drop_it_no_recycle:
3243 /* Other statistics kept track of by card. */
3244 tp->net_stats.rx_dropped++;
3245 goto next_pkt;
3246 }
3247
3248 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3249
3250 if (len > RX_COPY_THRESHOLD
3251 && tp->rx_offset == 2
3252 /* rx_offset != 2 iff this is a 5701 card running
3253 * in PCI-X mode [see tg3_get_invariants()] */
3254 ) {
3255 int skb_size;
3256
3257 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3258 desc_idx, *post_ptr);
3259 if (skb_size < 0)
3260 goto drop_it;
3261
3262 pci_unmap_single(tp->pdev, dma_addr,
3263 skb_size - tp->rx_offset,
3264 PCI_DMA_FROMDEVICE);
3265
3266 skb_put(skb, len);
3267 } else {
3268 struct sk_buff *copy_skb;
3269
3270 tg3_recycle_rx(tp, opaque_key,
3271 desc_idx, *post_ptr);
3272
3273 copy_skb = dev_alloc_skb(len + 2);
3274 if (copy_skb == NULL)
3275 goto drop_it_no_recycle;
3276
3277 copy_skb->dev = tp->dev;
3278 skb_reserve(copy_skb, 2);
3279 skb_put(copy_skb, len);
3280 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3281 memcpy(copy_skb->data, skb->data, len);
3282 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3283
3284 /* We'll reuse the original ring buffer. */
3285 skb = copy_skb;
3286 }
3287
3288 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3289 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3290 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3291 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3292 skb->ip_summed = CHECKSUM_UNNECESSARY;
3293 else
3294 skb->ip_summed = CHECKSUM_NONE;
3295
3296 skb->protocol = eth_type_trans(skb, tp->dev);
3297#if TG3_VLAN_TAG_USED
3298 if (tp->vlgrp != NULL &&
3299 desc->type_flags & RXD_FLAG_VLAN) {
3300 tg3_vlan_rx(tp, skb,
3301 desc->err_vlan & RXD_VLAN_MASK);
3302 } else
3303#endif
3304 netif_receive_skb(skb);
3305
3306 tp->dev->last_rx = jiffies;
3307 received++;
3308 budget--;
3309
3310next_pkt:
3311 (*post_ptr)++;
Michael Chanf92905d2006-06-29 20:14:29 -07003312
3313 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3314 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3315
3316 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3317 TG3_64BIT_REG_LOW, idx);
3318 work_mask &= ~RXD_OPAQUE_RING_STD;
3319 rx_std_posted = 0;
3320 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003321next_pkt_nopost:
Michael Chan483ba502005-04-25 15:14:03 -07003322 sw_idx++;
3323 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
Michael Chan52f6d692005-04-25 15:14:32 -07003324
3325 /* Refresh hw_idx to see if there is new work */
3326 if (sw_idx == hw_idx) {
3327 hw_idx = tp->hw_status->idx[0].rx_producer;
3328 rmb();
3329 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003330 }
3331
3332 /* ACK the status ring. */
Michael Chan483ba502005-04-25 15:14:03 -07003333 tp->rx_rcb_ptr = sw_idx;
3334 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003335
3336 /* Refill RX ring(s). */
3337 if (work_mask & RXD_OPAQUE_RING_STD) {
3338 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3339 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3340 sw_idx);
3341 }
3342 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3343 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3344 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3345 sw_idx);
3346 }
3347 mmiowb();
3348
3349 return received;
3350}
3351
3352static int tg3_poll(struct net_device *netdev, int *budget)
3353{
3354 struct tg3 *tp = netdev_priv(netdev);
3355 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356 int done;
3357
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358 /* handle link change and other phy events */
3359 if (!(tp->tg3_flags &
3360 (TG3_FLAG_USE_LINKCHG_REG |
3361 TG3_FLAG_POLL_SERDES))) {
3362 if (sblk->status & SD_STATUS_LINK_CHG) {
3363 sblk->status = SD_STATUS_UPDATED |
3364 (sblk->status & ~SD_STATUS_LINK_CHG);
David S. Millerf47c11e2005-06-24 20:18:35 -07003365 spin_lock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366 tg3_setup_phy(tp, 0);
David S. Millerf47c11e2005-06-24 20:18:35 -07003367 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368 }
3369 }
3370
3371 /* run TX completion thread */
3372 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373 tg3_tx(tp);
Michael Chandf3e6542006-05-26 17:48:07 -07003374 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3375 netif_rx_complete(netdev);
3376 schedule_work(&tp->reset_task);
3377 return 0;
3378 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379 }
3380
Linus Torvalds1da177e2005-04-16 15:20:36 -07003381 /* run RX thread, within the bounds set by NAPI.
3382 * All RX "locking" is done by ensuring outside
3383 * code synchronizes with dev->poll()
3384 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003385 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3386 int orig_budget = *budget;
3387 int work_done;
3388
3389 if (orig_budget > netdev->quota)
3390 orig_budget = netdev->quota;
3391
3392 work_done = tg3_rx(tp, orig_budget);
3393
3394 *budget -= work_done;
3395 netdev->quota -= work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396 }
3397
Michael Chan38f38432005-09-05 17:53:32 -07003398 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
David S. Millerf7383c22005-05-18 22:50:53 -07003399 tp->last_tag = sblk->status_tag;
Michael Chan38f38432005-09-05 17:53:32 -07003400 rmb();
3401 } else
3402 sblk->status &= ~SD_STATUS_UPDATED;
David S. Millerf7383c22005-05-18 22:50:53 -07003403
Linus Torvalds1da177e2005-04-16 15:20:36 -07003404 /* if no more work, tell net stack and NIC we're done */
David S. Millerf7383c22005-05-18 22:50:53 -07003405 done = !tg3_has_work(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003406 if (done) {
David S. Millerf47c11e2005-06-24 20:18:35 -07003407 netif_rx_complete(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003408 tg3_restart_ints(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409 }
3410
3411 return (done ? 0 : 1);
3412}
3413
David S. Millerf47c11e2005-06-24 20:18:35 -07003414static void tg3_irq_quiesce(struct tg3 *tp)
3415{
3416 BUG_ON(tp->irq_sync);
3417
3418 tp->irq_sync = 1;
3419 smp_mb();
3420
3421 synchronize_irq(tp->pdev->irq);
3422}
3423
3424static inline int tg3_irq_sync(struct tg3 *tp)
3425{
3426 return tp->irq_sync;
3427}
3428
3429/* Fully shutdown all tg3 driver activity elsewhere in the system.
3430 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3431 * with as well. Most of the time, this is not necessary except when
3432 * shutting down the device.
3433 */
3434static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3435{
3436 if (irq_sync)
3437 tg3_irq_quiesce(tp);
3438 spin_lock_bh(&tp->lock);
David S. Millerf47c11e2005-06-24 20:18:35 -07003439}
3440
3441static inline void tg3_full_unlock(struct tg3 *tp)
3442{
David S. Millerf47c11e2005-06-24 20:18:35 -07003443 spin_unlock_bh(&tp->lock);
3444}
3445
Michael Chanfcfa0a32006-03-20 22:28:41 -08003446/* One-shot MSI handler - Chip automatically disables interrupt
3447 * after sending MSI so driver doesn't have to do it.
3448 */
3449static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3450{
3451 struct net_device *dev = dev_id;
3452 struct tg3 *tp = netdev_priv(dev);
3453
3454 prefetch(tp->hw_status);
3455 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3456
3457 if (likely(!tg3_irq_sync(tp)))
3458 netif_rx_schedule(dev); /* schedule NAPI poll */
3459
3460 return IRQ_HANDLED;
3461}
3462
Michael Chan88b06bc2005-04-21 17:13:25 -07003463/* MSI ISR - No need to check for interrupt sharing and no need to
3464 * flush status block and interrupt mailbox. PCI ordering rules
3465 * guarantee that MSI will arrive after the status block.
3466 */
3467static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3468{
3469 struct net_device *dev = dev_id;
3470 struct tg3 *tp = netdev_priv(dev);
Michael Chan88b06bc2005-04-21 17:13:25 -07003471
Michael Chan61487482005-09-05 17:53:19 -07003472 prefetch(tp->hw_status);
3473 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Michael Chan88b06bc2005-04-21 17:13:25 -07003474 /*
David S. Millerfac9b832005-05-18 22:46:34 -07003475 * Writing any value to intr-mbox-0 clears PCI INTA# and
Michael Chan88b06bc2005-04-21 17:13:25 -07003476 * chip-internal interrupt pending events.
David S. Millerfac9b832005-05-18 22:46:34 -07003477 * Writing non-zero to intr-mbox-0 additional tells the
Michael Chan88b06bc2005-04-21 17:13:25 -07003478 * NIC to stop sending us irqs, engaging "in-intr-handler"
3479 * event coalescing.
3480 */
3481 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chan61487482005-09-05 17:53:19 -07003482 if (likely(!tg3_irq_sync(tp)))
Michael Chan88b06bc2005-04-21 17:13:25 -07003483 netif_rx_schedule(dev); /* schedule NAPI poll */
Michael Chan61487482005-09-05 17:53:19 -07003484
Michael Chan88b06bc2005-04-21 17:13:25 -07003485 return IRQ_RETVAL(1);
3486}
3487
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3489{
3490 struct net_device *dev = dev_id;
3491 struct tg3 *tp = netdev_priv(dev);
3492 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493 unsigned int handled = 1;
3494
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495 /* In INTx mode, it is possible for the interrupt to arrive at
3496 * the CPU before the status block posted prior to the interrupt.
3497 * Reading the PCI State register will confirm whether the
3498 * interrupt is ours and will flush the status block.
3499 */
3500 if ((sblk->status & SD_STATUS_UPDATED) ||
3501 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3502 /*
David S. Millerfac9b832005-05-18 22:46:34 -07003503 * Writing any value to intr-mbox-0 clears PCI INTA# and
3504 * chip-internal interrupt pending events.
3505 * Writing non-zero to intr-mbox-0 additional tells the
3506 * NIC to stop sending us irqs, engaging "in-intr-handler"
3507 * event coalescing.
3508 */
3509 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3510 0x00000001);
David S. Millerf47c11e2005-06-24 20:18:35 -07003511 if (tg3_irq_sync(tp))
3512 goto out;
David S. Millerfac9b832005-05-18 22:46:34 -07003513 sblk->status &= ~SD_STATUS_UPDATED;
Michael Chan61487482005-09-05 17:53:19 -07003514 if (likely(tg3_has_work(tp))) {
3515 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
David S. Millerfac9b832005-05-18 22:46:34 -07003516 netif_rx_schedule(dev); /* schedule NAPI poll */
Michael Chan61487482005-09-05 17:53:19 -07003517 } else {
David S. Millerfac9b832005-05-18 22:46:34 -07003518 /* No work, shared interrupt perhaps? re-enable
3519 * interrupts, and flush that PCI write
3520 */
Michael Chan09ee9292005-08-09 20:17:00 -07003521 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
David S. Millerfac9b832005-05-18 22:46:34 -07003522 0x00000000);
David S. Millerfac9b832005-05-18 22:46:34 -07003523 }
3524 } else { /* shared interrupt */
3525 handled = 0;
3526 }
David S. Millerf47c11e2005-06-24 20:18:35 -07003527out:
David S. Millerfac9b832005-05-18 22:46:34 -07003528 return IRQ_RETVAL(handled);
3529}
3530
3531static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3532{
3533 struct net_device *dev = dev_id;
3534 struct tg3 *tp = netdev_priv(dev);
3535 struct tg3_hw_status *sblk = tp->hw_status;
David S. Millerfac9b832005-05-18 22:46:34 -07003536 unsigned int handled = 1;
3537
David S. Millerfac9b832005-05-18 22:46:34 -07003538 /* In INTx mode, it is possible for the interrupt to arrive at
3539 * the CPU before the status block posted prior to the interrupt.
3540 * Reading the PCI State register will confirm whether the
3541 * interrupt is ours and will flush the status block.
3542 */
Michael Chan38f38432005-09-05 17:53:32 -07003543 if ((sblk->status_tag != tp->last_tag) ||
David S. Millerfac9b832005-05-18 22:46:34 -07003544 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3545 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546 * writing any value to intr-mbox-0 clears PCI INTA# and
3547 * chip-internal interrupt pending events.
3548 * writing non-zero to intr-mbox-0 additional tells the
3549 * NIC to stop sending us irqs, engaging "in-intr-handler"
3550 * event coalescing.
3551 */
3552 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3553 0x00000001);
David S. Millerf47c11e2005-06-24 20:18:35 -07003554 if (tg3_irq_sync(tp))
3555 goto out;
Michael Chan38f38432005-09-05 17:53:32 -07003556 if (netif_rx_schedule_prep(dev)) {
Michael Chan61487482005-09-05 17:53:19 -07003557 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Michael Chan38f38432005-09-05 17:53:32 -07003558 /* Update last_tag to mark that this status has been
3559 * seen. Because interrupt may be shared, we may be
3560 * racing with tg3_poll(), so only update last_tag
3561 * if tg3_poll() is not scheduled.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003562 */
Michael Chan38f38432005-09-05 17:53:32 -07003563 tp->last_tag = sblk->status_tag;
3564 __netif_rx_schedule(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003565 }
3566 } else { /* shared interrupt */
3567 handled = 0;
3568 }
David S. Millerf47c11e2005-06-24 20:18:35 -07003569out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003570 return IRQ_RETVAL(handled);
3571}
3572
Michael Chan79381092005-04-21 17:13:59 -07003573/* ISR for interrupt test */
3574static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3575 struct pt_regs *regs)
3576{
3577 struct net_device *dev = dev_id;
3578 struct tg3 *tp = netdev_priv(dev);
3579 struct tg3_hw_status *sblk = tp->hw_status;
3580
Michael Chanf9804dd2005-09-27 12:13:10 -07003581 if ((sblk->status & SD_STATUS_UPDATED) ||
3582 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
Michael Chan79381092005-04-21 17:13:59 -07003583 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3584 0x00000001);
3585 return IRQ_RETVAL(1);
3586 }
3587 return IRQ_RETVAL(0);
3588}
3589
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07003590static int tg3_init_hw(struct tg3 *, int);
Michael Chan944d9802005-05-29 14:57:48 -07003591static int tg3_halt(struct tg3 *, int, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003592
3593#ifdef CONFIG_NET_POLL_CONTROLLER
3594static void tg3_poll_controller(struct net_device *dev)
3595{
Michael Chan88b06bc2005-04-21 17:13:25 -07003596 struct tg3 *tp = netdev_priv(dev);
3597
3598 tg3_interrupt(tp->pdev->irq, dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003599}
3600#endif
3601
3602static void tg3_reset_task(void *_data)
3603{
3604 struct tg3 *tp = _data;
3605 unsigned int restart_timer;
3606
Michael Chan7faa0062006-02-02 17:29:28 -08003607 tg3_full_lock(tp, 0);
3608 tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3609
3610 if (!netif_running(tp->dev)) {
3611 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3612 tg3_full_unlock(tp);
3613 return;
3614 }
3615
3616 tg3_full_unlock(tp);
3617
Linus Torvalds1da177e2005-04-16 15:20:36 -07003618 tg3_netif_stop(tp);
3619
David S. Millerf47c11e2005-06-24 20:18:35 -07003620 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003621
3622 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3623 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3624
Michael Chandf3e6542006-05-26 17:48:07 -07003625 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3626 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3627 tp->write32_rx_mbox = tg3_write_flush_reg32;
3628 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3629 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3630 }
3631
Michael Chan944d9802005-05-29 14:57:48 -07003632 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07003633 tg3_init_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003634
3635 tg3_netif_start(tp);
3636
Linus Torvalds1da177e2005-04-16 15:20:36 -07003637 if (restart_timer)
3638 mod_timer(&tp->timer, jiffies + 1);
Michael Chan7faa0062006-02-02 17:29:28 -08003639
3640 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3641
3642 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003643}
3644
3645static void tg3_tx_timeout(struct net_device *dev)
3646{
3647 struct tg3 *tp = netdev_priv(dev);
3648
3649 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3650 dev->name);
3651
3652 schedule_work(&tp->reset_task);
3653}
3654
Michael Chanc58ec932005-09-17 00:46:27 -07003655/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3656static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3657{
3658 u32 base = (u32) mapping & 0xffffffff;
3659
3660 return ((base > 0xffffdcc0) &&
3661 (base + len + 8 < base));
3662}
3663
Michael Chan72f2afb2006-03-06 19:28:35 -08003664/* Test for DMA addresses > 40-bit */
3665static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3666 int len)
3667{
3668#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
Michael Chan6728a8e2006-03-27 23:16:49 -08003669 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
Michael Chan72f2afb2006-03-06 19:28:35 -08003670 return (((u64) mapping + len) > DMA_40BIT_MASK);
3671 return 0;
3672#else
3673 return 0;
3674#endif
3675}
3676
Linus Torvalds1da177e2005-04-16 15:20:36 -07003677static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3678
Michael Chan72f2afb2006-03-06 19:28:35 -08003679/* Workaround 4GB and 40-bit hardware DMA bugs. */
3680static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
Michael Chanc58ec932005-09-17 00:46:27 -07003681 u32 last_plus_one, u32 *start,
3682 u32 base_flags, u32 mss)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003683{
3684 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
Michael Chanc58ec932005-09-17 00:46:27 -07003685 dma_addr_t new_addr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003686 u32 entry = *start;
Michael Chanc58ec932005-09-17 00:46:27 -07003687 int i, ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003688
3689 if (!new_skb) {
Michael Chanc58ec932005-09-17 00:46:27 -07003690 ret = -1;
3691 } else {
3692 /* New SKB is guaranteed to be linear. */
3693 entry = *start;
3694 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3695 PCI_DMA_TODEVICE);
3696 /* Make sure new skb does not cross any 4G boundaries.
3697 * Drop the packet if it does.
3698 */
3699 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3700 ret = -1;
3701 dev_kfree_skb(new_skb);
3702 new_skb = NULL;
3703 } else {
3704 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3705 base_flags, 1 | (mss << 1));
3706 *start = NEXT_TX(entry);
3707 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003708 }
3709
Linus Torvalds1da177e2005-04-16 15:20:36 -07003710 /* Now clean up the sw ring entries. */
3711 i = 0;
3712 while (entry != last_plus_one) {
3713 int len;
3714
3715 if (i == 0)
3716 len = skb_headlen(skb);
3717 else
3718 len = skb_shinfo(skb)->frags[i-1].size;
3719 pci_unmap_single(tp->pdev,
3720 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3721 len, PCI_DMA_TODEVICE);
3722 if (i == 0) {
3723 tp->tx_buffers[entry].skb = new_skb;
3724 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3725 } else {
3726 tp->tx_buffers[entry].skb = NULL;
3727 }
3728 entry = NEXT_TX(entry);
3729 i++;
3730 }
3731
3732 dev_kfree_skb(skb);
3733
Michael Chanc58ec932005-09-17 00:46:27 -07003734 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735}
3736
3737static void tg3_set_txd(struct tg3 *tp, int entry,
3738 dma_addr_t mapping, int len, u32 flags,
3739 u32 mss_and_is_end)
3740{
3741 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3742 int is_end = (mss_and_is_end & 0x1);
3743 u32 mss = (mss_and_is_end >> 1);
3744 u32 vlan_tag = 0;
3745
3746 if (is_end)
3747 flags |= TXD_FLAG_END;
3748 if (flags & TXD_FLAG_VLAN) {
3749 vlan_tag = flags >> 16;
3750 flags &= 0xffff;
3751 }
3752 vlan_tag |= (mss << TXD_MSS_SHIFT);
3753
3754 txd->addr_hi = ((u64) mapping >> 32);
3755 txd->addr_lo = ((u64) mapping & 0xffffffff);
3756 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3757 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3758}
3759
Michael Chan5a6f3072006-03-20 22:28:05 -08003760/* hard_start_xmit for devices that don't have any bugs and
3761 * support TG3_FLG2_HW_TSO_2 only.
3762 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003763static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3764{
3765 struct tg3 *tp = netdev_priv(dev);
3766 dma_addr_t mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003767 u32 len, entry, base_flags, mss;
Michael Chan5a6f3072006-03-20 22:28:05 -08003768
3769 len = skb_headlen(skb);
3770
Michael Chan00b70502006-06-17 21:58:45 -07003771 /* We are running in BH disabled context with netif_tx_lock
3772 * and TX reclaim runs via tp->poll inside of a software
Michael Chan5a6f3072006-03-20 22:28:05 -08003773 * interrupt. Furthermore, IRQ processing runs lockless so we have
3774 * no IRQ context deadlocks to worry about either. Rejoice!
3775 */
Michael Chan5a6f3072006-03-20 22:28:05 -08003776 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3777 if (!netif_queue_stopped(dev)) {
3778 netif_stop_queue(dev);
3779
3780 /* This is a hard error, log it. */
3781 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3782 "queue awake!\n", dev->name);
3783 }
Michael Chan5a6f3072006-03-20 22:28:05 -08003784 return NETDEV_TX_BUSY;
3785 }
3786
3787 entry = tp->tx_prod;
3788 base_flags = 0;
3789#if TG3_TSO_SUPPORT != 0
3790 mss = 0;
3791 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
Herbert Xu79671682006-06-22 02:40:14 -07003792 (mss = skb_shinfo(skb)->gso_size) != 0) {
Michael Chan5a6f3072006-03-20 22:28:05 -08003793 int tcp_opt_len, ip_tcp_len;
3794
3795 if (skb_header_cloned(skb) &&
3796 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3797 dev_kfree_skb(skb);
3798 goto out_unlock;
3799 }
3800
3801 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3802 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3803
3804 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3805 TXD_FLAG_CPU_POST_DMA);
3806
3807 skb->nh.iph->check = 0;
3808 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3809
3810 skb->h.th->check = 0;
3811
3812 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3813 }
3814 else if (skb->ip_summed == CHECKSUM_HW)
3815 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3816#else
3817 mss = 0;
3818 if (skb->ip_summed == CHECKSUM_HW)
3819 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3820#endif
3821#if TG3_VLAN_TAG_USED
3822 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3823 base_flags |= (TXD_FLAG_VLAN |
3824 (vlan_tx_tag_get(skb) << 16));
3825#endif
3826
3827 /* Queue skb data, a.k.a. the main skb fragment. */
3828 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3829
3830 tp->tx_buffers[entry].skb = skb;
3831 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3832
3833 tg3_set_txd(tp, entry, mapping, len, base_flags,
3834 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3835
3836 entry = NEXT_TX(entry);
3837
3838 /* Now loop through additional data fragments, and queue them. */
3839 if (skb_shinfo(skb)->nr_frags > 0) {
3840 unsigned int i, last;
3841
3842 last = skb_shinfo(skb)->nr_frags - 1;
3843 for (i = 0; i <= last; i++) {
3844 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3845
3846 len = frag->size;
3847 mapping = pci_map_page(tp->pdev,
3848 frag->page,
3849 frag->page_offset,
3850 len, PCI_DMA_TODEVICE);
3851
3852 tp->tx_buffers[entry].skb = NULL;
3853 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3854
3855 tg3_set_txd(tp, entry, mapping, len,
3856 base_flags, (i == last) | (mss << 1));
3857
3858 entry = NEXT_TX(entry);
3859 }
3860 }
3861
3862 /* Packets are ready, update Tx producer idx local and on card. */
3863 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3864
3865 tp->tx_prod = entry;
Michael Chan00b70502006-06-17 21:58:45 -07003866 if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
3867 spin_lock(&tp->tx_lock);
Michael Chan5a6f3072006-03-20 22:28:05 -08003868 netif_stop_queue(dev);
3869 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3870 netif_wake_queue(tp->dev);
Michael Chan00b70502006-06-17 21:58:45 -07003871 spin_unlock(&tp->tx_lock);
Michael Chan5a6f3072006-03-20 22:28:05 -08003872 }
3873
3874out_unlock:
3875 mmiowb();
Michael Chan5a6f3072006-03-20 22:28:05 -08003876
3877 dev->trans_start = jiffies;
3878
3879 return NETDEV_TX_OK;
3880}
3881
Michael Chan52c0fd82006-06-29 20:15:54 -07003882#if TG3_TSO_SUPPORT != 0
3883static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3884
3885/* Use GSO to workaround a rare TSO bug that may be triggered when the
3886 * TSO header is greater than 80 bytes.
3887 */
3888static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3889{
3890 struct sk_buff *segs, *nskb;
3891
3892 /* Estimate the number of fragments in the worst case */
3893 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
3894 netif_stop_queue(tp->dev);
3895 return NETDEV_TX_BUSY;
3896 }
3897
3898 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
3899 if (unlikely(IS_ERR(segs)))
3900 goto tg3_tso_bug_end;
3901
3902 do {
3903 nskb = segs;
3904 segs = segs->next;
3905 nskb->next = NULL;
3906 tg3_start_xmit_dma_bug(nskb, tp->dev);
3907 } while (segs);
3908
3909tg3_tso_bug_end:
3910 dev_kfree_skb(skb);
3911
3912 return NETDEV_TX_OK;
3913}
3914#endif
3915
Michael Chan5a6f3072006-03-20 22:28:05 -08003916/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3917 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3918 */
3919static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3920{
3921 struct tg3 *tp = netdev_priv(dev);
3922 dma_addr_t mapping;
3923 u32 len, entry, base_flags, mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003924 int would_hit_hwbug;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003925
3926 len = skb_headlen(skb);
3927
Michael Chan00b70502006-06-17 21:58:45 -07003928 /* We are running in BH disabled context with netif_tx_lock
3929 * and TX reclaim runs via tp->poll inside of a software
David S. Millerf47c11e2005-06-24 20:18:35 -07003930 * interrupt. Furthermore, IRQ processing runs lockless so we have
3931 * no IRQ context deadlocks to worry about either. Rejoice!
Linus Torvalds1da177e2005-04-16 15:20:36 -07003932 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003933 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
Stephen Hemminger1f064a82005-12-06 17:36:44 -08003934 if (!netif_queue_stopped(dev)) {
3935 netif_stop_queue(dev);
3936
3937 /* This is a hard error, log it. */
3938 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3939 "queue awake!\n", dev->name);
3940 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003941 return NETDEV_TX_BUSY;
3942 }
3943
3944 entry = tp->tx_prod;
3945 base_flags = 0;
3946 if (skb->ip_summed == CHECKSUM_HW)
3947 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3948#if TG3_TSO_SUPPORT != 0
3949 mss = 0;
3950 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
Herbert Xu79671682006-06-22 02:40:14 -07003951 (mss = skb_shinfo(skb)->gso_size) != 0) {
Michael Chan52c0fd82006-06-29 20:15:54 -07003952 int tcp_opt_len, ip_tcp_len, hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003953
3954 if (skb_header_cloned(skb) &&
3955 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3956 dev_kfree_skb(skb);
3957 goto out_unlock;
3958 }
3959
3960 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3961 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3962
Michael Chan52c0fd82006-06-29 20:15:54 -07003963 hdr_len = ip_tcp_len + tcp_opt_len;
3964 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
3965 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
3966 return (tg3_tso_bug(tp, skb));
3967
Linus Torvalds1da177e2005-04-16 15:20:36 -07003968 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3969 TXD_FLAG_CPU_POST_DMA);
3970
3971 skb->nh.iph->check = 0;
Michael Chan52c0fd82006-06-29 20:15:54 -07003972 skb->nh.iph->tot_len = htons(mss + hdr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003973 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3974 skb->h.th->check = 0;
3975 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3976 }
3977 else {
3978 skb->h.th->check =
3979 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3980 skb->nh.iph->daddr,
3981 0, IPPROTO_TCP, 0);
3982 }
3983
3984 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3985 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3986 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3987 int tsflags;
3988
3989 tsflags = ((skb->nh.iph->ihl - 5) +
3990 (tcp_opt_len >> 2));
3991 mss |= (tsflags << 11);
3992 }
3993 } else {
3994 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3995 int tsflags;
3996
3997 tsflags = ((skb->nh.iph->ihl - 5) +
3998 (tcp_opt_len >> 2));
3999 base_flags |= tsflags << 12;
4000 }
4001 }
4002 }
4003#else
4004 mss = 0;
4005#endif
4006#if TG3_VLAN_TAG_USED
4007 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4008 base_flags |= (TXD_FLAG_VLAN |
4009 (vlan_tx_tag_get(skb) << 16));
4010#endif
4011
4012 /* Queue skb data, a.k.a. the main skb fragment. */
4013 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4014
4015 tp->tx_buffers[entry].skb = skb;
4016 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4017
4018 would_hit_hwbug = 0;
4019
4020 if (tg3_4g_overflow_test(mapping, len))
Michael Chanc58ec932005-09-17 00:46:27 -07004021 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004022
4023 tg3_set_txd(tp, entry, mapping, len, base_flags,
4024 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4025
4026 entry = NEXT_TX(entry);
4027
4028 /* Now loop through additional data fragments, and queue them. */
4029 if (skb_shinfo(skb)->nr_frags > 0) {
4030 unsigned int i, last;
4031
4032 last = skb_shinfo(skb)->nr_frags - 1;
4033 for (i = 0; i <= last; i++) {
4034 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4035
4036 len = frag->size;
4037 mapping = pci_map_page(tp->pdev,
4038 frag->page,
4039 frag->page_offset,
4040 len, PCI_DMA_TODEVICE);
4041
4042 tp->tx_buffers[entry].skb = NULL;
4043 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4044
Michael Chanc58ec932005-09-17 00:46:27 -07004045 if (tg3_4g_overflow_test(mapping, len))
4046 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004047
Michael Chan72f2afb2006-03-06 19:28:35 -08004048 if (tg3_40bit_overflow_test(tp, mapping, len))
4049 would_hit_hwbug = 1;
4050
Linus Torvalds1da177e2005-04-16 15:20:36 -07004051 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4052 tg3_set_txd(tp, entry, mapping, len,
4053 base_flags, (i == last)|(mss << 1));
4054 else
4055 tg3_set_txd(tp, entry, mapping, len,
4056 base_flags, (i == last));
4057
4058 entry = NEXT_TX(entry);
4059 }
4060 }
4061
4062 if (would_hit_hwbug) {
4063 u32 last_plus_one = entry;
4064 u32 start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004065
Michael Chanc58ec932005-09-17 00:46:27 -07004066 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4067 start &= (TG3_TX_RING_SIZE - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004068
4069 /* If the workaround fails due to memory/mapping
4070 * failure, silently drop this packet.
4071 */
Michael Chan72f2afb2006-03-06 19:28:35 -08004072 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
Michael Chanc58ec932005-09-17 00:46:27 -07004073 &start, base_flags, mss))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004074 goto out_unlock;
4075
4076 entry = start;
4077 }
4078
4079 /* Packets are ready, update Tx producer idx local and on card. */
4080 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4081
4082 tp->tx_prod = entry;
Michael Chan00b70502006-06-17 21:58:45 -07004083 if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) {
4084 spin_lock(&tp->tx_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004085 netif_stop_queue(dev);
Michael Chan51b91462005-09-01 17:41:28 -07004086 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
4087 netif_wake_queue(tp->dev);
Michael Chan00b70502006-06-17 21:58:45 -07004088 spin_unlock(&tp->tx_lock);
Michael Chan51b91462005-09-01 17:41:28 -07004089 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004090
4091out_unlock:
4092 mmiowb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004093
4094 dev->trans_start = jiffies;
4095
4096 return NETDEV_TX_OK;
4097}
4098
4099static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4100 int new_mtu)
4101{
4102 dev->mtu = new_mtu;
4103
Michael Chanef7f5ec2005-07-25 12:32:25 -07004104 if (new_mtu > ETH_DATA_LEN) {
Michael Chana4e2b342005-10-26 15:46:52 -07004105 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chanef7f5ec2005-07-25 12:32:25 -07004106 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4107 ethtool_op_set_tso(dev, 0);
4108 }
4109 else
4110 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4111 } else {
Michael Chana4e2b342005-10-26 15:46:52 -07004112 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chanef7f5ec2005-07-25 12:32:25 -07004113 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
Michael Chan0f893dc2005-07-25 12:30:38 -07004114 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
Michael Chanef7f5ec2005-07-25 12:32:25 -07004115 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004116}
4117
4118static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4119{
4120 struct tg3 *tp = netdev_priv(dev);
4121
4122 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4123 return -EINVAL;
4124
4125 if (!netif_running(dev)) {
4126 /* We'll just catch it later when the
4127 * device is up'd.
4128 */
4129 tg3_set_mtu(dev, tp, new_mtu);
4130 return 0;
4131 }
4132
4133 tg3_netif_stop(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07004134
4135 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004136
Michael Chan944d9802005-05-29 14:57:48 -07004137 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004138
4139 tg3_set_mtu(dev, tp, new_mtu);
4140
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07004141 tg3_init_hw(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004142
4143 tg3_netif_start(tp);
4144
David S. Millerf47c11e2005-06-24 20:18:35 -07004145 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004146
4147 return 0;
4148}
4149
4150/* Free up pending packets in all rx/tx rings.
4151 *
4152 * The chip has been shut down and the driver detached from
4153 * the networking, so no interrupts or new tx packets will
4154 * end up in the driver. tp->{tx,}lock is not held and we are not
4155 * in an interrupt context and thus may sleep.
4156 */
4157static void tg3_free_rings(struct tg3 *tp)
4158{
4159 struct ring_info *rxp;
4160 int i;
4161
4162 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4163 rxp = &tp->rx_std_buffers[i];
4164
4165 if (rxp->skb == NULL)
4166 continue;
4167 pci_unmap_single(tp->pdev,
4168 pci_unmap_addr(rxp, mapping),
Michael Chan7e72aad2005-07-25 12:31:17 -07004169 tp->rx_pkt_buf_sz - tp->rx_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004170 PCI_DMA_FROMDEVICE);
4171 dev_kfree_skb_any(rxp->skb);
4172 rxp->skb = NULL;
4173 }
4174
4175 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4176 rxp = &tp->rx_jumbo_buffers[i];
4177
4178 if (rxp->skb == NULL)
4179 continue;
4180 pci_unmap_single(tp->pdev,
4181 pci_unmap_addr(rxp, mapping),
4182 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4183 PCI_DMA_FROMDEVICE);
4184 dev_kfree_skb_any(rxp->skb);
4185 rxp->skb = NULL;
4186 }
4187
4188 for (i = 0; i < TG3_TX_RING_SIZE; ) {
4189 struct tx_ring_info *txp;
4190 struct sk_buff *skb;
4191 int j;
4192
4193 txp = &tp->tx_buffers[i];
4194 skb = txp->skb;
4195
4196 if (skb == NULL) {
4197 i++;
4198 continue;
4199 }
4200
4201 pci_unmap_single(tp->pdev,
4202 pci_unmap_addr(txp, mapping),
4203 skb_headlen(skb),
4204 PCI_DMA_TODEVICE);
4205 txp->skb = NULL;
4206
4207 i++;
4208
4209 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4210 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4211 pci_unmap_page(tp->pdev,
4212 pci_unmap_addr(txp, mapping),
4213 skb_shinfo(skb)->frags[j].size,
4214 PCI_DMA_TODEVICE);
4215 i++;
4216 }
4217
4218 dev_kfree_skb_any(skb);
4219 }
4220}
4221
4222/* Initialize tx/rx rings for packet processing.
4223 *
4224 * The chip has been shut down and the driver detached from
4225 * the networking, so no interrupts or new tx packets will
4226 * end up in the driver. tp->{tx,}lock are held and thus
4227 * we may not sleep.
4228 */
4229static void tg3_init_rings(struct tg3 *tp)
4230{
4231 u32 i;
4232
4233 /* Free up all the SKBs. */
4234 tg3_free_rings(tp);
4235
4236 /* Zero out all descriptors. */
4237 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4238 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4239 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4240 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4241
Michael Chan7e72aad2005-07-25 12:31:17 -07004242 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
Michael Chana4e2b342005-10-26 15:46:52 -07004243 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
Michael Chan7e72aad2005-07-25 12:31:17 -07004244 (tp->dev->mtu > ETH_DATA_LEN))
4245 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4246
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247 /* Initialize invariants of the rings, we only set this
4248 * stuff once. This works because the card does not
4249 * write into the rx buffer posting rings.
4250 */
4251 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4252 struct tg3_rx_buffer_desc *rxd;
4253
4254 rxd = &tp->rx_std[i];
Michael Chan7e72aad2005-07-25 12:31:17 -07004255 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004256 << RXD_LEN_SHIFT;
4257 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4258 rxd->opaque = (RXD_OPAQUE_RING_STD |
4259 (i << RXD_OPAQUE_INDEX_SHIFT));
4260 }
4261
Michael Chan0f893dc2005-07-25 12:30:38 -07004262 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004263 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4264 struct tg3_rx_buffer_desc *rxd;
4265
4266 rxd = &tp->rx_jumbo[i];
4267 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4268 << RXD_LEN_SHIFT;
4269 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4270 RXD_FLAG_JUMBO;
4271 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4272 (i << RXD_OPAQUE_INDEX_SHIFT));
4273 }
4274 }
4275
4276 /* Now allocate fresh SKBs for each rx ring. */
4277 for (i = 0; i < tp->rx_pending; i++) {
4278 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
4279 -1, i) < 0)
4280 break;
4281 }
4282
Michael Chan0f893dc2005-07-25 12:30:38 -07004283 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004284 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4285 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4286 -1, i) < 0)
4287 break;
4288 }
4289 }
4290}
4291
4292/*
4293 * Must not be invoked with interrupt sources disabled and
4294 * the hardware shutdown down.
4295 */
4296static void tg3_free_consistent(struct tg3 *tp)
4297{
Jesper Juhlb4558ea2005-10-28 16:53:13 -04004298 kfree(tp->rx_std_buffers);
4299 tp->rx_std_buffers = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004300 if (tp->rx_std) {
4301 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4302 tp->rx_std, tp->rx_std_mapping);
4303 tp->rx_std = NULL;
4304 }
4305 if (tp->rx_jumbo) {
4306 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4307 tp->rx_jumbo, tp->rx_jumbo_mapping);
4308 tp->rx_jumbo = NULL;
4309 }
4310 if (tp->rx_rcb) {
4311 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4312 tp->rx_rcb, tp->rx_rcb_mapping);
4313 tp->rx_rcb = NULL;
4314 }
4315 if (tp->tx_ring) {
4316 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4317 tp->tx_ring, tp->tx_desc_mapping);
4318 tp->tx_ring = NULL;
4319 }
4320 if (tp->hw_status) {
4321 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4322 tp->hw_status, tp->status_mapping);
4323 tp->hw_status = NULL;
4324 }
4325 if (tp->hw_stats) {
4326 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4327 tp->hw_stats, tp->stats_mapping);
4328 tp->hw_stats = NULL;
4329 }
4330}
4331
4332/*
4333 * Must not be invoked with interrupt sources disabled and
4334 * the hardware shutdown down. Can sleep.
4335 */
4336static int tg3_alloc_consistent(struct tg3 *tp)
4337{
4338 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4339 (TG3_RX_RING_SIZE +
4340 TG3_RX_JUMBO_RING_SIZE)) +
4341 (sizeof(struct tx_ring_info) *
4342 TG3_TX_RING_SIZE),
4343 GFP_KERNEL);
4344 if (!tp->rx_std_buffers)
4345 return -ENOMEM;
4346
4347 memset(tp->rx_std_buffers, 0,
4348 (sizeof(struct ring_info) *
4349 (TG3_RX_RING_SIZE +
4350 TG3_RX_JUMBO_RING_SIZE)) +
4351 (sizeof(struct tx_ring_info) *
4352 TG3_TX_RING_SIZE));
4353
4354 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4355 tp->tx_buffers = (struct tx_ring_info *)
4356 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4357
4358 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4359 &tp->rx_std_mapping);
4360 if (!tp->rx_std)
4361 goto err_out;
4362
4363 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4364 &tp->rx_jumbo_mapping);
4365
4366 if (!tp->rx_jumbo)
4367 goto err_out;
4368
4369 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4370 &tp->rx_rcb_mapping);
4371 if (!tp->rx_rcb)
4372 goto err_out;
4373
4374 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4375 &tp->tx_desc_mapping);
4376 if (!tp->tx_ring)
4377 goto err_out;
4378
4379 tp->hw_status = pci_alloc_consistent(tp->pdev,
4380 TG3_HW_STATUS_SIZE,
4381 &tp->status_mapping);
4382 if (!tp->hw_status)
4383 goto err_out;
4384
4385 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4386 sizeof(struct tg3_hw_stats),
4387 &tp->stats_mapping);
4388 if (!tp->hw_stats)
4389 goto err_out;
4390
4391 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4392 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4393
4394 return 0;
4395
4396err_out:
4397 tg3_free_consistent(tp);
4398 return -ENOMEM;
4399}
4400
4401#define MAX_WAIT_CNT 1000
4402
4403/* To stop a block, clear the enable bit and poll till it
4404 * clears. tp->lock is held.
4405 */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004406static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004407{
4408 unsigned int i;
4409 u32 val;
4410
4411 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4412 switch (ofs) {
4413 case RCVLSC_MODE:
4414 case DMAC_MODE:
4415 case MBFREE_MODE:
4416 case BUFMGR_MODE:
4417 case MEMARB_MODE:
4418 /* We can't enable/disable these bits of the
4419 * 5705/5750, just say success.
4420 */
4421 return 0;
4422
4423 default:
4424 break;
4425 };
4426 }
4427
4428 val = tr32(ofs);
4429 val &= ~enable_bit;
4430 tw32_f(ofs, val);
4431
4432 for (i = 0; i < MAX_WAIT_CNT; i++) {
4433 udelay(100);
4434 val = tr32(ofs);
4435 if ((val & enable_bit) == 0)
4436 break;
4437 }
4438
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004439 if (i == MAX_WAIT_CNT && !silent) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004440 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4441 "ofs=%lx enable_bit=%x\n",
4442 ofs, enable_bit);
4443 return -ENODEV;
4444 }
4445
4446 return 0;
4447}
4448
4449/* tp->lock is held. */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004450static int tg3_abort_hw(struct tg3 *tp, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004451{
4452 int i, err;
4453
4454 tg3_disable_ints(tp);
4455
4456 tp->rx_mode &= ~RX_MODE_ENABLE;
4457 tw32_f(MAC_RX_MODE, tp->rx_mode);
4458 udelay(10);
4459
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004460 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4461 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4462 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4463 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4464 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4465 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004466
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004467 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4468 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4469 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4470 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4471 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4472 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4473 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004474
4475 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4476 tw32_f(MAC_MODE, tp->mac_mode);
4477 udelay(40);
4478
4479 tp->tx_mode &= ~TX_MODE_ENABLE;
4480 tw32_f(MAC_TX_MODE, tp->tx_mode);
4481
4482 for (i = 0; i < MAX_WAIT_CNT; i++) {
4483 udelay(100);
4484 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4485 break;
4486 }
4487 if (i >= MAX_WAIT_CNT) {
4488 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4489 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4490 tp->dev->name, tr32(MAC_TX_MODE));
Michael Chane6de8ad2005-05-05 14:42:41 -07004491 err |= -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004492 }
4493
Michael Chane6de8ad2005-05-05 14:42:41 -07004494 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004495 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4496 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004497
4498 tw32(FTQ_RESET, 0xffffffff);
4499 tw32(FTQ_RESET, 0x00000000);
4500
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004501 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4502 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004503
4504 if (tp->hw_status)
4505 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4506 if (tp->hw_stats)
4507 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4508
Linus Torvalds1da177e2005-04-16 15:20:36 -07004509 return err;
4510}
4511
4512/* tp->lock is held. */
4513static int tg3_nvram_lock(struct tg3 *tp)
4514{
4515 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4516 int i;
4517
Michael Chanec41c7d2006-01-17 02:40:55 -08004518 if (tp->nvram_lock_cnt == 0) {
4519 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4520 for (i = 0; i < 8000; i++) {
4521 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4522 break;
4523 udelay(20);
4524 }
4525 if (i == 8000) {
4526 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4527 return -ENODEV;
4528 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004529 }
Michael Chanec41c7d2006-01-17 02:40:55 -08004530 tp->nvram_lock_cnt++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004531 }
4532 return 0;
4533}
4534
4535/* tp->lock is held. */
4536static void tg3_nvram_unlock(struct tg3 *tp)
4537{
Michael Chanec41c7d2006-01-17 02:40:55 -08004538 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4539 if (tp->nvram_lock_cnt > 0)
4540 tp->nvram_lock_cnt--;
4541 if (tp->nvram_lock_cnt == 0)
4542 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4543 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004544}
4545
4546/* tp->lock is held. */
Michael Chane6af3012005-04-21 17:12:05 -07004547static void tg3_enable_nvram_access(struct tg3 *tp)
4548{
4549 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4550 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4551 u32 nvaccess = tr32(NVRAM_ACCESS);
4552
4553 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4554 }
4555}
4556
4557/* tp->lock is held. */
4558static void tg3_disable_nvram_access(struct tg3 *tp)
4559{
4560 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4561 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4562 u32 nvaccess = tr32(NVRAM_ACCESS);
4563
4564 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4565 }
4566}
4567
4568/* tp->lock is held. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004569static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4570{
David S. Millerf49639e2006-06-09 11:58:36 -07004571 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4572 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004573
4574 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4575 switch (kind) {
4576 case RESET_KIND_INIT:
4577 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4578 DRV_STATE_START);
4579 break;
4580
4581 case RESET_KIND_SHUTDOWN:
4582 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4583 DRV_STATE_UNLOAD);
4584 break;
4585
4586 case RESET_KIND_SUSPEND:
4587 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4588 DRV_STATE_SUSPEND);
4589 break;
4590
4591 default:
4592 break;
4593 };
4594 }
4595}
4596
4597/* tp->lock is held. */
4598static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4599{
4600 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4601 switch (kind) {
4602 case RESET_KIND_INIT:
4603 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4604 DRV_STATE_START_DONE);
4605 break;
4606
4607 case RESET_KIND_SHUTDOWN:
4608 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4609 DRV_STATE_UNLOAD_DONE);
4610 break;
4611
4612 default:
4613 break;
4614 };
4615 }
4616}
4617
4618/* tp->lock is held. */
4619static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4620{
4621 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4622 switch (kind) {
4623 case RESET_KIND_INIT:
4624 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4625 DRV_STATE_START);
4626 break;
4627
4628 case RESET_KIND_SHUTDOWN:
4629 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4630 DRV_STATE_UNLOAD);
4631 break;
4632
4633 case RESET_KIND_SUSPEND:
4634 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4635 DRV_STATE_SUSPEND);
4636 break;
4637
4638 default:
4639 break;
4640 };
4641 }
4642}
4643
4644static void tg3_stop_fw(struct tg3 *);
4645
4646/* tp->lock is held. */
4647static int tg3_chip_reset(struct tg3 *tp)
4648{
4649 u32 val;
Michael Chan1ee582d2005-08-09 20:16:46 -07004650 void (*write_op)(struct tg3 *, u32, u32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004651 int i;
4652
David S. Millerf49639e2006-06-09 11:58:36 -07004653 tg3_nvram_lock(tp);
4654
4655 /* No matching tg3_nvram_unlock() after this because
4656 * chip reset below will undo the nvram lock.
4657 */
4658 tp->nvram_lock_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004659
Michael Chand9ab5ad2006-03-20 22:27:35 -08004660 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -08004661 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chand9ab5ad2006-03-20 22:27:35 -08004662 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4663 tw32(GRC_FASTBOOT_PC, 0);
4664
Linus Torvalds1da177e2005-04-16 15:20:36 -07004665 /*
4666 * We must avoid the readl() that normally takes place.
4667 * It locks machines, causes machine checks, and other
4668 * fun things. So, temporarily disable the 5701
4669 * hardware workaround, while we do the reset.
4670 */
Michael Chan1ee582d2005-08-09 20:16:46 -07004671 write_op = tp->write32;
4672 if (write_op == tg3_write_flush_reg32)
4673 tp->write32 = tg3_write32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004674
4675 /* do the reset */
4676 val = GRC_MISC_CFG_CORECLK_RESET;
4677
4678 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4679 if (tr32(0x7e2c) == 0x60) {
4680 tw32(0x7e2c, 0x20);
4681 }
4682 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4683 tw32(GRC_MISC_CFG, (1 << 29));
4684 val |= (1 << 29);
4685 }
4686 }
4687
4688 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4689 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4690 tw32(GRC_MISC_CFG, val);
4691
Michael Chan1ee582d2005-08-09 20:16:46 -07004692 /* restore 5701 hardware bug workaround write method */
4693 tp->write32 = write_op;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004694
4695 /* Unfortunately, we have to delay before the PCI read back.
4696 * Some 575X chips even will not respond to a PCI cfg access
4697 * when the reset command is given to the chip.
4698 *
4699 * How do these hardware designers expect things to work
4700 * properly if the PCI write is posted for a long period
4701 * of time? It is always necessary to have some method by
4702 * which a register read back can occur to push the write
4703 * out which does the reset.
4704 *
4705 * For most tg3 variants the trick below was working.
4706 * Ho hum...
4707 */
4708 udelay(120);
4709
4710 /* Flush PCI posted writes. The normal MMIO registers
4711 * are inaccessible at this time so this is the only
4712 * way to make this reliably (actually, this is no longer
4713 * the case, see above). I tried to use indirect
4714 * register read/write but this upset some 5701 variants.
4715 */
4716 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4717
4718 udelay(120);
4719
4720 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4721 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4722 int i;
4723 u32 cfg_val;
4724
4725 /* Wait for link training to complete. */
4726 for (i = 0; i < 5000; i++)
4727 udelay(100);
4728
4729 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4730 pci_write_config_dword(tp->pdev, 0xc4,
4731 cfg_val | (1 << 15));
4732 }
4733 /* Set PCIE max payload size and clear error status. */
4734 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4735 }
4736
4737 /* Re-enable indirect register accesses. */
4738 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4739 tp->misc_host_ctrl);
4740
4741 /* Set MAX PCI retry to zero. */
4742 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4743 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4744 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4745 val |= PCISTATE_RETRY_SAME_DMA;
4746 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4747
4748 pci_restore_state(tp->pdev);
4749
4750 /* Make sure PCI-X relaxed ordering bit is clear. */
4751 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4752 val &= ~PCIX_CAPS_RELAXED_ORDERING;
4753 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4754
Michael Chana4e2b342005-10-26 15:46:52 -07004755 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chan4cf78e42005-07-25 12:29:19 -07004756 u32 val;
4757
4758 /* Chip reset on 5780 will reset MSI enable bit,
4759 * so need to restore it.
4760 */
4761 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4762 u16 ctrl;
4763
4764 pci_read_config_word(tp->pdev,
4765 tp->msi_cap + PCI_MSI_FLAGS,
4766 &ctrl);
4767 pci_write_config_word(tp->pdev,
4768 tp->msi_cap + PCI_MSI_FLAGS,
4769 ctrl | PCI_MSI_FLAGS_ENABLE);
4770 val = tr32(MSGINT_MODE);
4771 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4772 }
4773
4774 val = tr32(MEMARB_MODE);
4775 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4776
4777 } else
4778 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004779
4780 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4781 tg3_stop_fw(tp);
4782 tw32(0x5000, 0x400);
4783 }
4784
4785 tw32(GRC_MODE, tp->grc_mode);
4786
4787 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4788 u32 val = tr32(0xc4);
4789
4790 tw32(0xc4, val | (1 << 15));
4791 }
4792
4793 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4794 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4795 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4796 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4797 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4798 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4799 }
4800
4801 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4802 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4803 tw32_f(MAC_MODE, tp->mac_mode);
Michael Chan747e8f82005-07-25 12:33:22 -07004804 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4805 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4806 tw32_f(MAC_MODE, tp->mac_mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004807 } else
4808 tw32_f(MAC_MODE, 0);
4809 udelay(40);
4810
David S. Millerf49639e2006-06-09 11:58:36 -07004811 /* Wait for firmware initialization to complete. */
4812 for (i = 0; i < 100000; i++) {
4813 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4814 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4815 break;
4816 udelay(10);
4817 }
4818
4819 /* Chip might not be fitted with firmare. Some Sun onboard
4820 * parts are configured like that. So don't signal the timeout
4821 * of the above loop as an error, but do report the lack of
4822 * running firmware once.
4823 */
4824 if (i >= 100000 &&
4825 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4826 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4827
4828 printk(KERN_INFO PFX "%s: No firmware running.\n",
4829 tp->dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004830 }
4831
4832 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4833 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4834 u32 val = tr32(0x7c00);
4835
4836 tw32(0x7c00, val | (1 << 25));
4837 }
4838
4839 /* Reprobe ASF enable state. */
4840 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4841 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4842 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4843 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4844 u32 nic_cfg;
4845
4846 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4847 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4848 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
John W. Linvillecbf46852005-04-21 17:01:29 -07004849 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004850 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4851 }
4852 }
4853
4854 return 0;
4855}
4856
4857/* tp->lock is held. */
4858static void tg3_stop_fw(struct tg3 *tp)
4859{
4860 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4861 u32 val;
4862 int i;
4863
4864 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4865 val = tr32(GRC_RX_CPU_EVENT);
4866 val |= (1 << 14);
4867 tw32(GRC_RX_CPU_EVENT, val);
4868
4869 /* Wait for RX cpu to ACK the event. */
4870 for (i = 0; i < 100; i++) {
4871 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4872 break;
4873 udelay(1);
4874 }
4875 }
4876}
4877
4878/* tp->lock is held. */
Michael Chan944d9802005-05-29 14:57:48 -07004879static int tg3_halt(struct tg3 *tp, int kind, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004880{
4881 int err;
4882
4883 tg3_stop_fw(tp);
4884
Michael Chan944d9802005-05-29 14:57:48 -07004885 tg3_write_sig_pre_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004886
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004887 tg3_abort_hw(tp, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004888 err = tg3_chip_reset(tp);
4889
Michael Chan944d9802005-05-29 14:57:48 -07004890 tg3_write_sig_legacy(tp, kind);
4891 tg3_write_sig_post_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004892
4893 if (err)
4894 return err;
4895
4896 return 0;
4897}
4898
4899#define TG3_FW_RELEASE_MAJOR 0x0
4900#define TG3_FW_RELASE_MINOR 0x0
4901#define TG3_FW_RELEASE_FIX 0x0
4902#define TG3_FW_START_ADDR 0x08000000
4903#define TG3_FW_TEXT_ADDR 0x08000000
4904#define TG3_FW_TEXT_LEN 0x9c0
4905#define TG3_FW_RODATA_ADDR 0x080009c0
4906#define TG3_FW_RODATA_LEN 0x60
4907#define TG3_FW_DATA_ADDR 0x08000a40
4908#define TG3_FW_DATA_LEN 0x20
4909#define TG3_FW_SBSS_ADDR 0x08000a60
4910#define TG3_FW_SBSS_LEN 0xc
4911#define TG3_FW_BSS_ADDR 0x08000a70
4912#define TG3_FW_BSS_LEN 0x10
4913
4914static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4915 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4916 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4917 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4918 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4919 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4920 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4921 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4922 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4923 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4924 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4925 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4926 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4927 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4928 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4929 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4930 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4931 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4932 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4933 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4934 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4935 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4936 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4937 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4938 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4939 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4940 0, 0, 0, 0, 0, 0,
4941 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4942 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4943 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4944 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4945 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4946 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4947 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4948 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4949 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4950 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4951 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4952 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4953 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4954 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4955 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4956 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4957 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4958 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4959 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4960 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4961 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4962 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4963 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4964 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4965 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4966 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4967 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4968 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4969 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4970 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4971 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4972 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4973 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4974 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4975 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4976 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4977 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4978 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4979 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4980 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4981 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4982 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4983 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4984 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4985 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4986 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4987 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4988 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4989 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4990 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4991 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4992 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4993 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4994 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4995 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4996 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4997 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4998 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4999 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5000 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5001 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5002 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5003 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5004 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5005 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5006};
5007
5008static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5009 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5010 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5011 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5012 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5013 0x00000000
5014};
5015
5016#if 0 /* All zeros, don't eat up space with it. */
5017u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5018 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5019 0x00000000, 0x00000000, 0x00000000, 0x00000000
5020};
5021#endif
5022
5023#define RX_CPU_SCRATCH_BASE 0x30000
5024#define RX_CPU_SCRATCH_SIZE 0x04000
5025#define TX_CPU_SCRATCH_BASE 0x34000
5026#define TX_CPU_SCRATCH_SIZE 0x04000
5027
5028/* tp->lock is held. */
5029static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5030{
5031 int i;
5032
Eric Sesterhenn5d9428d2006-04-02 13:52:48 +02005033 BUG_ON(offset == TX_CPU_BASE &&
5034 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005035
5036 if (offset == RX_CPU_BASE) {
5037 for (i = 0; i < 10000; i++) {
5038 tw32(offset + CPU_STATE, 0xffffffff);
5039 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5040 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5041 break;
5042 }
5043
5044 tw32(offset + CPU_STATE, 0xffffffff);
5045 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
5046 udelay(10);
5047 } else {
5048 for (i = 0; i < 10000; i++) {
5049 tw32(offset + CPU_STATE, 0xffffffff);
5050 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5051 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5052 break;
5053 }
5054 }
5055
5056 if (i >= 10000) {
5057 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5058 "and %s CPU\n",
5059 tp->dev->name,
5060 (offset == RX_CPU_BASE ? "RX" : "TX"));
5061 return -ENODEV;
5062 }
Michael Chanec41c7d2006-01-17 02:40:55 -08005063
5064 /* Clear firmware's nvram arbitration. */
5065 if (tp->tg3_flags & TG3_FLAG_NVRAM)
5066 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005067 return 0;
5068}
5069
5070struct fw_info {
5071 unsigned int text_base;
5072 unsigned int text_len;
5073 u32 *text_data;
5074 unsigned int rodata_base;
5075 unsigned int rodata_len;
5076 u32 *rodata_data;
5077 unsigned int data_base;
5078 unsigned int data_len;
5079 u32 *data_data;
5080};
5081
5082/* tp->lock is held. */
5083static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5084 int cpu_scratch_size, struct fw_info *info)
5085{
Michael Chanec41c7d2006-01-17 02:40:55 -08005086 int err, lock_err, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005087 void (*write_op)(struct tg3 *, u32, u32);
5088
5089 if (cpu_base == TX_CPU_BASE &&
5090 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5091 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5092 "TX cpu firmware on %s which is 5705.\n",
5093 tp->dev->name);
5094 return -EINVAL;
5095 }
5096
5097 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5098 write_op = tg3_write_mem;
5099 else
5100 write_op = tg3_write_indirect_reg32;
5101
Michael Chan1b628152005-05-29 14:59:49 -07005102 /* It is possible that bootcode is still loading at this point.
5103 * Get the nvram lock first before halting the cpu.
5104 */
Michael Chanec41c7d2006-01-17 02:40:55 -08005105 lock_err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005106 err = tg3_halt_cpu(tp, cpu_base);
Michael Chanec41c7d2006-01-17 02:40:55 -08005107 if (!lock_err)
5108 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005109 if (err)
5110 goto out;
5111
5112 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5113 write_op(tp, cpu_scratch_base + i, 0);
5114 tw32(cpu_base + CPU_STATE, 0xffffffff);
5115 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5116 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5117 write_op(tp, (cpu_scratch_base +
5118 (info->text_base & 0xffff) +
5119 (i * sizeof(u32))),
5120 (info->text_data ?
5121 info->text_data[i] : 0));
5122 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5123 write_op(tp, (cpu_scratch_base +
5124 (info->rodata_base & 0xffff) +
5125 (i * sizeof(u32))),
5126 (info->rodata_data ?
5127 info->rodata_data[i] : 0));
5128 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5129 write_op(tp, (cpu_scratch_base +
5130 (info->data_base & 0xffff) +
5131 (i * sizeof(u32))),
5132 (info->data_data ?
5133 info->data_data[i] : 0));
5134
5135 err = 0;
5136
5137out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005138 return err;
5139}
5140
5141/* tp->lock is held. */
5142static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5143{
5144 struct fw_info info;
5145 int err, i;
5146
5147 info.text_base = TG3_FW_TEXT_ADDR;
5148 info.text_len = TG3_FW_TEXT_LEN;
5149 info.text_data = &tg3FwText[0];
5150 info.rodata_base = TG3_FW_RODATA_ADDR;
5151 info.rodata_len = TG3_FW_RODATA_LEN;
5152 info.rodata_data = &tg3FwRodata[0];
5153 info.data_base = TG3_FW_DATA_ADDR;
5154 info.data_len = TG3_FW_DATA_LEN;
5155 info.data_data = NULL;
5156
5157 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5158 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5159 &info);
5160 if (err)
5161 return err;
5162
5163 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5164 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5165 &info);
5166 if (err)
5167 return err;
5168
5169 /* Now startup only the RX cpu. */
5170 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5171 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5172
5173 for (i = 0; i < 5; i++) {
5174 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5175 break;
5176 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5177 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
5178 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5179 udelay(1000);
5180 }
5181 if (i >= 5) {
5182 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5183 "to set RX CPU PC, is %08x should be %08x\n",
5184 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5185 TG3_FW_TEXT_ADDR);
5186 return -ENODEV;
5187 }
5188 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5189 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
5190
5191 return 0;
5192}
5193
5194#if TG3_TSO_SUPPORT != 0
5195
5196#define TG3_TSO_FW_RELEASE_MAJOR 0x1
5197#define TG3_TSO_FW_RELASE_MINOR 0x6
5198#define TG3_TSO_FW_RELEASE_FIX 0x0
5199#define TG3_TSO_FW_START_ADDR 0x08000000
5200#define TG3_TSO_FW_TEXT_ADDR 0x08000000
5201#define TG3_TSO_FW_TEXT_LEN 0x1aa0
5202#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5203#define TG3_TSO_FW_RODATA_LEN 0x60
5204#define TG3_TSO_FW_DATA_ADDR 0x08001b20
5205#define TG3_TSO_FW_DATA_LEN 0x30
5206#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5207#define TG3_TSO_FW_SBSS_LEN 0x2c
5208#define TG3_TSO_FW_BSS_ADDR 0x08001b80
5209#define TG3_TSO_FW_BSS_LEN 0x894
5210
5211static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5212 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5213 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5214 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5215 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5216 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5217 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5218 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5219 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5220 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5221 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5222 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5223 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5224 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5225 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5226 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5227 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5228 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5229 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5230 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5231 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5232 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5233 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5234 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5235 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5236 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5237 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5238 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5239 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5240 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5241 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5242 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5243 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5244 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5245 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5246 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5247 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5248 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5249 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5250 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5251 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5252 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5253 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5254 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5255 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5256 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5257 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5258 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5259 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5260 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5261 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5262 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5263 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5264 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5265 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5266 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5267 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5268 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5269 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5270 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5271 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5272 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5273 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5274 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5275 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5276 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5277 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5278 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5279 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5280 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5281 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5282 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5283 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5284 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5285 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5286 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5287 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5288 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5289 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5290 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5291 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5292 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5293 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5294 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5295 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5296 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5297 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5298 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5299 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5300 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5301 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5302 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5303 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5304 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5305 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5306 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5307 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5308 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5309 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5310 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5311 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5312 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5313 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5314 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5315 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5316 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5317 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5318 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5319 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5320 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5321 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5322 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5323 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5324 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5325 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5326 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5327 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5328 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5329 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5330 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5331 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5332 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5333 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5334 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5335 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5336 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5337 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5338 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5339 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5340 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5341 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5342 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5343 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5344 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5345 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5346 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5347 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5348 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5349 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5350 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5351 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5352 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5353 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5354 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5355 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5356 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5357 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5358 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5359 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5360 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5361 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5362 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5363 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5364 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5365 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5366 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5367 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5368 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5369 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5370 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5371 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5372 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5373 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5374 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5375 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5376 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5377 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5378 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5379 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5380 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5381 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5382 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5383 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5384 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5385 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5386 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5387 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5388 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5389 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5390 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5391 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5392 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5393 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5394 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5395 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5396 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5397 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5398 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5399 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5400 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5401 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5402 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5403 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5404 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5405 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5406 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5407 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5408 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5409 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5410 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5411 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5412 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5413 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5414 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5415 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5416 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5417 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5418 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5419 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5420 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5421 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5422 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5423 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5424 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5425 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5426 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5427 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5428 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5429 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5430 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5431 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5432 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5433 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5434 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5435 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5436 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5437 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5438 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5439 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5440 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5441 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5442 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5443 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5444 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5445 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5446 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5447 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5448 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5449 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5450 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5451 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5452 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5453 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5454 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5455 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5456 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5457 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5458 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5459 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5460 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5461 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5462 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5463 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5464 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5465 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5466 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5467 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5468 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5469 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5470 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5471 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5472 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5473 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5474 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5475 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5476 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5477 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5478 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5479 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5480 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5481 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5482 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5483 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5484 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5485 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5486 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5487 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5488 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5489 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5490 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5491 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5492 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5493 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5494 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5495 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5496};
5497
5498static u32 tg3TsoFwRodata[] = {
5499 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5500 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5501 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5502 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5503 0x00000000,
5504};
5505
5506static u32 tg3TsoFwData[] = {
5507 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5508 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5509 0x00000000,
5510};
5511
5512/* 5705 needs a special version of the TSO firmware. */
5513#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5514#define TG3_TSO5_FW_RELASE_MINOR 0x2
5515#define TG3_TSO5_FW_RELEASE_FIX 0x0
5516#define TG3_TSO5_FW_START_ADDR 0x00010000
5517#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5518#define TG3_TSO5_FW_TEXT_LEN 0xe90
5519#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5520#define TG3_TSO5_FW_RODATA_LEN 0x50
5521#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5522#define TG3_TSO5_FW_DATA_LEN 0x20
5523#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5524#define TG3_TSO5_FW_SBSS_LEN 0x28
5525#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5526#define TG3_TSO5_FW_BSS_LEN 0x88
5527
5528static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5529 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5530 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5531 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5532 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5533 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5534 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5535 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5536 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5537 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5538 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5539 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5540 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5541 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5542 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5543 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5544 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5545 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5546 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5547 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5548 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5549 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5550 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5551 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5552 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5553 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5554 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5555 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5556 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5557 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5558 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5559 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5560 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5561 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5562 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5563 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5564 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5565 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5566 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5567 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5568 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5569 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5570 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5571 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5572 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5573 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5574 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5575 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5576 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5577 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5578 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5579 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5580 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5581 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5582 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5583 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5584 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5585 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5586 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5587 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5588 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5589 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5590 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5591 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5592 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5593 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5594 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5595 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5596 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5597 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5598 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5599 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5600 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5601 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5602 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5603 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5604 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5605 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5606 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5607 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5608 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5609 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5610 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5611 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5612 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5613 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5614 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5615 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5616 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5617 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5618 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5619 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5620 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5621 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5622 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5623 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5624 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5625 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5626 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5627 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5628 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5629 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5630 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5631 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5632 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5633 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5634 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5635 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5636 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5637 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5638 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5639 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5640 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5641 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5642 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5643 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5644 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5645 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5646 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5647 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5648 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5649 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5650 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5651 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5652 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5653 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5654 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5655 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5656 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5657 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5658 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5659 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5660 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5661 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5662 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5663 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5664 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5665 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5666 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5667 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5668 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5669 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5670 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5671 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5672 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5673 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5674 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5675 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5676 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5677 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5678 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5679 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5680 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5681 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5682 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5683 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5684 0x00000000, 0x00000000, 0x00000000,
5685};
5686
5687static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5688 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5689 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5690 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5691 0x00000000, 0x00000000, 0x00000000,
5692};
5693
5694static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5695 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5696 0x00000000, 0x00000000, 0x00000000,
5697};
5698
5699/* tp->lock is held. */
5700static int tg3_load_tso_firmware(struct tg3 *tp)
5701{
5702 struct fw_info info;
5703 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5704 int err, i;
5705
5706 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5707 return 0;
5708
5709 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5710 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5711 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5712 info.text_data = &tg3Tso5FwText[0];
5713 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5714 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5715 info.rodata_data = &tg3Tso5FwRodata[0];
5716 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5717 info.data_len = TG3_TSO5_FW_DATA_LEN;
5718 info.data_data = &tg3Tso5FwData[0];
5719 cpu_base = RX_CPU_BASE;
5720 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5721 cpu_scratch_size = (info.text_len +
5722 info.rodata_len +
5723 info.data_len +
5724 TG3_TSO5_FW_SBSS_LEN +
5725 TG3_TSO5_FW_BSS_LEN);
5726 } else {
5727 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5728 info.text_len = TG3_TSO_FW_TEXT_LEN;
5729 info.text_data = &tg3TsoFwText[0];
5730 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5731 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5732 info.rodata_data = &tg3TsoFwRodata[0];
5733 info.data_base = TG3_TSO_FW_DATA_ADDR;
5734 info.data_len = TG3_TSO_FW_DATA_LEN;
5735 info.data_data = &tg3TsoFwData[0];
5736 cpu_base = TX_CPU_BASE;
5737 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5738 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5739 }
5740
5741 err = tg3_load_firmware_cpu(tp, cpu_base,
5742 cpu_scratch_base, cpu_scratch_size,
5743 &info);
5744 if (err)
5745 return err;
5746
5747 /* Now startup the cpu. */
5748 tw32(cpu_base + CPU_STATE, 0xffffffff);
5749 tw32_f(cpu_base + CPU_PC, info.text_base);
5750
5751 for (i = 0; i < 5; i++) {
5752 if (tr32(cpu_base + CPU_PC) == info.text_base)
5753 break;
5754 tw32(cpu_base + CPU_STATE, 0xffffffff);
5755 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
5756 tw32_f(cpu_base + CPU_PC, info.text_base);
5757 udelay(1000);
5758 }
5759 if (i >= 5) {
5760 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5761 "to set CPU PC, is %08x should be %08x\n",
5762 tp->dev->name, tr32(cpu_base + CPU_PC),
5763 info.text_base);
5764 return -ENODEV;
5765 }
5766 tw32(cpu_base + CPU_STATE, 0xffffffff);
5767 tw32_f(cpu_base + CPU_MODE, 0x00000000);
5768 return 0;
5769}
5770
5771#endif /* TG3_TSO_SUPPORT != 0 */
5772
5773/* tp->lock is held. */
5774static void __tg3_set_mac_addr(struct tg3 *tp)
5775{
5776 u32 addr_high, addr_low;
5777 int i;
5778
5779 addr_high = ((tp->dev->dev_addr[0] << 8) |
5780 tp->dev->dev_addr[1]);
5781 addr_low = ((tp->dev->dev_addr[2] << 24) |
5782 (tp->dev->dev_addr[3] << 16) |
5783 (tp->dev->dev_addr[4] << 8) |
5784 (tp->dev->dev_addr[5] << 0));
5785 for (i = 0; i < 4; i++) {
5786 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5787 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5788 }
5789
5790 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5791 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5792 for (i = 0; i < 12; i++) {
5793 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5794 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5795 }
5796 }
5797
5798 addr_high = (tp->dev->dev_addr[0] +
5799 tp->dev->dev_addr[1] +
5800 tp->dev->dev_addr[2] +
5801 tp->dev->dev_addr[3] +
5802 tp->dev->dev_addr[4] +
5803 tp->dev->dev_addr[5]) &
5804 TX_BACKOFF_SEED_MASK;
5805 tw32(MAC_TX_BACKOFF_SEED, addr_high);
5806}
5807
5808static int tg3_set_mac_addr(struct net_device *dev, void *p)
5809{
5810 struct tg3 *tp = netdev_priv(dev);
5811 struct sockaddr *addr = p;
5812
Michael Chanf9804dd2005-09-27 12:13:10 -07005813 if (!is_valid_ether_addr(addr->sa_data))
5814 return -EINVAL;
5815
Linus Torvalds1da177e2005-04-16 15:20:36 -07005816 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5817
Michael Chane75f7c92006-03-20 21:33:26 -08005818 if (!netif_running(dev))
5819 return 0;
5820
Michael Chan58712ef2006-04-29 18:58:01 -07005821 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5822 /* Reset chip so that ASF can re-init any MAC addresses it
5823 * needs.
5824 */
5825 tg3_netif_stop(tp);
5826 tg3_full_lock(tp, 1);
5827
5828 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07005829 tg3_init_hw(tp, 0);
Michael Chan58712ef2006-04-29 18:58:01 -07005830
5831 tg3_netif_start(tp);
5832 tg3_full_unlock(tp);
5833 } else {
5834 spin_lock_bh(&tp->lock);
5835 __tg3_set_mac_addr(tp);
5836 spin_unlock_bh(&tp->lock);
5837 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005838
5839 return 0;
5840}
5841
5842/* tp->lock is held. */
5843static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5844 dma_addr_t mapping, u32 maxlen_flags,
5845 u32 nic_addr)
5846{
5847 tg3_write_mem(tp,
5848 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5849 ((u64) mapping >> 32));
5850 tg3_write_mem(tp,
5851 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5852 ((u64) mapping & 0xffffffff));
5853 tg3_write_mem(tp,
5854 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5855 maxlen_flags);
5856
5857 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5858 tg3_write_mem(tp,
5859 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5860 nic_addr);
5861}
5862
5863static void __tg3_set_rx_mode(struct net_device *);
Michael Chand244c892005-07-05 14:42:33 -07005864static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
David S. Miller15f98502005-05-18 22:49:26 -07005865{
5866 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5867 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5868 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5869 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5870 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5871 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5872 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5873 }
5874 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5875 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5876 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5877 u32 val = ec->stats_block_coalesce_usecs;
5878
5879 if (!netif_carrier_ok(tp->dev))
5880 val = 0;
5881
5882 tw32(HOSTCC_STAT_COAL_TICKS, val);
5883 }
5884}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005885
5886/* tp->lock is held. */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07005887static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005888{
5889 u32 val, rdmac_mode;
5890 int i, err, limit;
5891
5892 tg3_disable_ints(tp);
5893
5894 tg3_stop_fw(tp);
5895
5896 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5897
5898 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
Michael Chane6de8ad2005-05-05 14:42:41 -07005899 tg3_abort_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005900 }
5901
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07005902 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
Michael Chand4d2c552006-03-20 17:47:20 -08005903 tg3_phy_reset(tp);
5904
Linus Torvalds1da177e2005-04-16 15:20:36 -07005905 err = tg3_chip_reset(tp);
5906 if (err)
5907 return err;
5908
5909 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5910
5911 /* This works around an issue with Athlon chipsets on
5912 * B3 tigon3 silicon. This bit has no effect on any
5913 * other revision. But do not set this on PCI Express
5914 * chips.
5915 */
5916 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5917 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5918 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5919
5920 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5921 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5922 val = tr32(TG3PCI_PCISTATE);
5923 val |= PCISTATE_RETRY_SAME_DMA;
5924 tw32(TG3PCI_PCISTATE, val);
5925 }
5926
5927 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5928 /* Enable some hw fixes. */
5929 val = tr32(TG3PCI_MSI_DATA);
5930 val |= (1 << 26) | (1 << 28) | (1 << 29);
5931 tw32(TG3PCI_MSI_DATA, val);
5932 }
5933
5934 /* Descriptor ring init may make accesses to the
5935 * NIC SRAM area to setup the TX descriptors, so we
5936 * can only do this after the hardware has been
5937 * successfully reset.
5938 */
5939 tg3_init_rings(tp);
5940
5941 /* This value is determined during the probe time DMA
5942 * engine test, tg3_test_dma.
5943 */
5944 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5945
5946 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5947 GRC_MODE_4X_NIC_SEND_RINGS |
5948 GRC_MODE_NO_TX_PHDR_CSUM |
5949 GRC_MODE_NO_RX_PHDR_CSUM);
5950 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
Michael Chand2d746f2006-04-06 21:45:39 -07005951
5952 /* Pseudo-header checksum is done by hardware logic and not
5953 * the offload processers, so make the chip do the pseudo-
5954 * header checksums on receive. For transmit it is more
5955 * convenient to do the pseudo-header checksum in software
5956 * as Linux does that on transmit for us in all cases.
5957 */
5958 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005959
5960 tw32(GRC_MODE,
5961 tp->grc_mode |
5962 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5963
5964 /* Setup the timer prescalar register. Clock is always 66Mhz. */
5965 val = tr32(GRC_MISC_CFG);
5966 val &= ~0xff;
5967 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5968 tw32(GRC_MISC_CFG, val);
5969
5970 /* Initialize MBUF/DESC pool. */
John W. Linvillecbf46852005-04-21 17:01:29 -07005971 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005972 /* Do nothing. */
5973 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5974 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5975 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5976 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5977 else
5978 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5979 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5980 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5981 }
5982#if TG3_TSO_SUPPORT != 0
5983 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5984 int fw_len;
5985
5986 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5987 TG3_TSO5_FW_RODATA_LEN +
5988 TG3_TSO5_FW_DATA_LEN +
5989 TG3_TSO5_FW_SBSS_LEN +
5990 TG3_TSO5_FW_BSS_LEN);
5991 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5992 tw32(BUFMGR_MB_POOL_ADDR,
5993 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5994 tw32(BUFMGR_MB_POOL_SIZE,
5995 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5996 }
5997#endif
5998
Michael Chan0f893dc2005-07-25 12:30:38 -07005999 if (tp->dev->mtu <= ETH_DATA_LEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006000 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6001 tp->bufmgr_config.mbuf_read_dma_low_water);
6002 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6003 tp->bufmgr_config.mbuf_mac_rx_low_water);
6004 tw32(BUFMGR_MB_HIGH_WATER,
6005 tp->bufmgr_config.mbuf_high_water);
6006 } else {
6007 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6008 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6009 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6010 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6011 tw32(BUFMGR_MB_HIGH_WATER,
6012 tp->bufmgr_config.mbuf_high_water_jumbo);
6013 }
6014 tw32(BUFMGR_DMA_LOW_WATER,
6015 tp->bufmgr_config.dma_low_water);
6016 tw32(BUFMGR_DMA_HIGH_WATER,
6017 tp->bufmgr_config.dma_high_water);
6018
6019 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6020 for (i = 0; i < 2000; i++) {
6021 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6022 break;
6023 udelay(10);
6024 }
6025 if (i >= 2000) {
6026 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6027 tp->dev->name);
6028 return -ENODEV;
6029 }
6030
6031 /* Setup replenish threshold. */
Michael Chanf92905d2006-06-29 20:14:29 -07006032 val = tp->rx_pending / 8;
6033 if (val == 0)
6034 val = 1;
6035 else if (val > tp->rx_std_max_post)
6036 val = tp->rx_std_max_post;
6037
6038 tw32(RCVBDI_STD_THRESH, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006039
6040 /* Initialize TG3_BDINFO's at:
6041 * RCVDBDI_STD_BD: standard eth size rx ring
6042 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6043 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6044 *
6045 * like so:
6046 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6047 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6048 * ring attribute flags
6049 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6050 *
6051 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6052 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6053 *
6054 * The size of each ring is fixed in the firmware, but the location is
6055 * configurable.
6056 */
6057 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6058 ((u64) tp->rx_std_mapping >> 32));
6059 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6060 ((u64) tp->rx_std_mapping & 0xffffffff));
6061 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6062 NIC_SRAM_RX_BUFFER_DESC);
6063
6064 /* Don't even try to program the JUMBO/MINI buffer descriptor
6065 * configs on 5705.
6066 */
6067 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6068 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6069 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6070 } else {
6071 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6072 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6073
6074 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6075 BDINFO_FLAGS_DISABLED);
6076
6077 /* Setup replenish threshold. */
6078 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6079
Michael Chan0f893dc2005-07-25 12:30:38 -07006080 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006081 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6082 ((u64) tp->rx_jumbo_mapping >> 32));
6083 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6084 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6085 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6086 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6087 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6088 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6089 } else {
6090 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6091 BDINFO_FLAGS_DISABLED);
6092 }
6093
6094 }
6095
6096 /* There is only one send ring on 5705/5750, no need to explicitly
6097 * disable the others.
6098 */
6099 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6100 /* Clear out send RCB ring in SRAM. */
6101 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6102 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6103 BDINFO_FLAGS_DISABLED);
6104 }
6105
6106 tp->tx_prod = 0;
6107 tp->tx_cons = 0;
6108 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6109 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6110
6111 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6112 tp->tx_desc_mapping,
6113 (TG3_TX_RING_SIZE <<
6114 BDINFO_FLAGS_MAXLEN_SHIFT),
6115 NIC_SRAM_TX_BUFFER_DESC);
6116
6117 /* There is only one receive return ring on 5705/5750, no need
6118 * to explicitly disable the others.
6119 */
6120 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6121 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6122 i += TG3_BDINFO_SIZE) {
6123 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6124 BDINFO_FLAGS_DISABLED);
6125 }
6126 }
6127
6128 tp->rx_rcb_ptr = 0;
6129 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6130
6131 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6132 tp->rx_rcb_mapping,
6133 (TG3_RX_RCB_RING_SIZE(tp) <<
6134 BDINFO_FLAGS_MAXLEN_SHIFT),
6135 0);
6136
6137 tp->rx_std_ptr = tp->rx_pending;
6138 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6139 tp->rx_std_ptr);
6140
Michael Chan0f893dc2005-07-25 12:30:38 -07006141 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
Linus Torvalds1da177e2005-04-16 15:20:36 -07006142 tp->rx_jumbo_pending : 0;
6143 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6144 tp->rx_jumbo_ptr);
6145
6146 /* Initialize MAC address and backoff seed. */
6147 __tg3_set_mac_addr(tp);
6148
6149 /* MTU + ethernet header + FCS + optional VLAN tag */
6150 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6151
6152 /* The slot time is changed by tg3_setup_phy if we
6153 * run at gigabit with half duplex.
6154 */
6155 tw32(MAC_TX_LENGTHS,
6156 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6157 (6 << TX_LENGTHS_IPG_SHIFT) |
6158 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6159
6160 /* Receive rules. */
6161 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6162 tw32(RCVLPC_CONFIG, 0x0181);
6163
6164 /* Calculate RDMAC_MODE setting early, we need it to determine
6165 * the RCVLPC_STATE_ENABLE mask.
6166 */
6167 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6168 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6169 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6170 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6171 RDMAC_MODE_LNGREAD_ENAB);
6172 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6173 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
Michael Chan85e94ce2005-04-21 17:05:28 -07006174
6175 /* If statement applies to 5705 and 5750 PCI devices only */
6176 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6177 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6178 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006179 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6180 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6181 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6182 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6183 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6184 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6185 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6186 }
6187 }
6188
Michael Chan85e94ce2005-04-21 17:05:28 -07006189 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6190 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6191
Linus Torvalds1da177e2005-04-16 15:20:36 -07006192#if TG3_TSO_SUPPORT != 0
6193 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6194 rdmac_mode |= (1 << 27);
6195#endif
6196
6197 /* Receive/send statistics. */
Michael Chan16613942006-06-29 20:15:13 -07006198 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6199 val = tr32(RCVLPC_STATS_ENABLE);
6200 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6201 tw32(RCVLPC_STATS_ENABLE, val);
6202 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6203 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006204 val = tr32(RCVLPC_STATS_ENABLE);
6205 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6206 tw32(RCVLPC_STATS_ENABLE, val);
6207 } else {
6208 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6209 }
6210 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6211 tw32(SNDDATAI_STATSENAB, 0xffffff);
6212 tw32(SNDDATAI_STATSCTRL,
6213 (SNDDATAI_SCTRL_ENABLE |
6214 SNDDATAI_SCTRL_FASTUPD));
6215
6216 /* Setup host coalescing engine. */
6217 tw32(HOSTCC_MODE, 0);
6218 for (i = 0; i < 2000; i++) {
6219 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6220 break;
6221 udelay(10);
6222 }
6223
Michael Chand244c892005-07-05 14:42:33 -07006224 __tg3_set_coalesce(tp, &tp->coal);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006225
6226 /* set status block DMA address */
6227 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6228 ((u64) tp->status_mapping >> 32));
6229 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6230 ((u64) tp->status_mapping & 0xffffffff));
6231
6232 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6233 /* Status/statistics block address. See tg3_timer,
6234 * the tg3_periodic_fetch_stats call there, and
6235 * tg3_get_stats to see how this works for 5705/5750 chips.
6236 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006237 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6238 ((u64) tp->stats_mapping >> 32));
6239 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6240 ((u64) tp->stats_mapping & 0xffffffff));
6241 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6242 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6243 }
6244
6245 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6246
6247 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6248 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6249 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6250 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6251
6252 /* Clear statistics/status block in chip, and status block in ram. */
6253 for (i = NIC_SRAM_STATS_BLK;
6254 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6255 i += sizeof(u32)) {
6256 tg3_write_mem(tp, i, 0);
6257 udelay(40);
6258 }
6259 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6260
Michael Chanc94e3942005-09-27 12:12:42 -07006261 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6262 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6263 /* reset to prevent losing 1st rx packet intermittently */
6264 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6265 udelay(10);
6266 }
6267
Linus Torvalds1da177e2005-04-16 15:20:36 -07006268 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6269 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6270 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6271 udelay(40);
6272
Michael Chan314fba32005-04-21 17:07:04 -07006273 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6274 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6275 * register to preserve the GPIO settings for LOMs. The GPIOs,
6276 * whether used as inputs or outputs, are set by boot code after
6277 * reset.
6278 */
6279 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6280 u32 gpio_mask;
6281
6282 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6283 GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chan3e7d83b2005-04-21 17:10:36 -07006284
6285 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6286 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6287 GRC_LCLCTRL_GPIO_OUTPUT3;
6288
Michael Chanaf36e6b2006-03-23 01:28:06 -08006289 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6290 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6291
Michael Chan314fba32005-04-21 17:07:04 -07006292 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6293
6294 /* GPIO1 must be driven high for eeprom write protect */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006295 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6296 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan314fba32005-04-21 17:07:04 -07006297 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006298 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6299 udelay(100);
6300
Michael Chan09ee9292005-08-09 20:17:00 -07006301 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
David S. Millerfac9b832005-05-18 22:46:34 -07006302 tp->last_tag = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006303
6304 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6305 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6306 udelay(40);
6307 }
6308
6309 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6310 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6311 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6312 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6313 WDMAC_MODE_LNGREAD_ENAB);
6314
Michael Chan85e94ce2005-04-21 17:05:28 -07006315 /* If statement applies to 5705 and 5750 PCI devices only */
6316 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6317 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6318 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006319 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6320 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6321 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6322 /* nothing */
6323 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6324 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6325 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6326 val |= WDMAC_MODE_RX_ACCEL;
6327 }
6328 }
6329
Michael Chand9ab5ad2006-03-20 22:27:35 -08006330 /* Enable host coalescing bug fix */
Michael Chanaf36e6b2006-03-23 01:28:06 -08006331 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6332 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
Michael Chand9ab5ad2006-03-20 22:27:35 -08006333 val |= (1 << 29);
6334
Linus Torvalds1da177e2005-04-16 15:20:36 -07006335 tw32_f(WDMAC_MODE, val);
6336 udelay(40);
6337
6338 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6339 val = tr32(TG3PCI_X_CAPS);
6340 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6341 val &= ~PCIX_CAPS_BURST_MASK;
6342 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6343 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6344 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6345 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6346 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6347 val |= (tp->split_mode_max_reqs <<
6348 PCIX_CAPS_SPLIT_SHIFT);
6349 }
6350 tw32(TG3PCI_X_CAPS, val);
6351 }
6352
6353 tw32_f(RDMAC_MODE, rdmac_mode);
6354 udelay(40);
6355
6356 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6357 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6358 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6359 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6360 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6361 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6362 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6363 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6364#if TG3_TSO_SUPPORT != 0
6365 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6366 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6367#endif
6368 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6369 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6370
6371 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6372 err = tg3_load_5701_a0_firmware_fix(tp);
6373 if (err)
6374 return err;
6375 }
6376
6377#if TG3_TSO_SUPPORT != 0
6378 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6379 err = tg3_load_tso_firmware(tp);
6380 if (err)
6381 return err;
6382 }
6383#endif
6384
6385 tp->tx_mode = TX_MODE_ENABLE;
6386 tw32_f(MAC_TX_MODE, tp->tx_mode);
6387 udelay(100);
6388
6389 tp->rx_mode = RX_MODE_ENABLE;
Michael Chanaf36e6b2006-03-23 01:28:06 -08006390 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6391 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6392
Linus Torvalds1da177e2005-04-16 15:20:36 -07006393 tw32_f(MAC_RX_MODE, tp->rx_mode);
6394 udelay(10);
6395
6396 if (tp->link_config.phy_is_low_power) {
6397 tp->link_config.phy_is_low_power = 0;
6398 tp->link_config.speed = tp->link_config.orig_speed;
6399 tp->link_config.duplex = tp->link_config.orig_duplex;
6400 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6401 }
6402
6403 tp->mi_mode = MAC_MI_MODE_BASE;
6404 tw32_f(MAC_MI_MODE, tp->mi_mode);
6405 udelay(80);
6406
6407 tw32(MAC_LED_CTRL, tp->led_ctrl);
6408
6409 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
Michael Chanc94e3942005-09-27 12:12:42 -07006410 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006411 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6412 udelay(10);
6413 }
6414 tw32_f(MAC_RX_MODE, tp->rx_mode);
6415 udelay(10);
6416
6417 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6418 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6419 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6420 /* Set drive transmission level to 1.2V */
6421 /* only if the signal pre-emphasis bit is not set */
6422 val = tr32(MAC_SERDES_CFG);
6423 val &= 0xfffff000;
6424 val |= 0x880;
6425 tw32(MAC_SERDES_CFG, val);
6426 }
6427 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6428 tw32(MAC_SERDES_CFG, 0x616000);
6429 }
6430
6431 /* Prevent chip from dropping frames when flow control
6432 * is enabled.
6433 */
6434 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6435
6436 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6437 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6438 /* Use hardware link auto-negotiation */
6439 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6440 }
6441
Michael Chand4d2c552006-03-20 17:47:20 -08006442 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6443 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6444 u32 tmp;
6445
6446 tmp = tr32(SERDES_RX_CTRL);
6447 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6448 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6449 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6450 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6451 }
6452
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07006453 err = tg3_setup_phy(tp, reset_phy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006454 if (err)
6455 return err;
6456
6457 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6458 u32 tmp;
6459
6460 /* Clear CRC stats. */
6461 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6462 tg3_writephy(tp, 0x1e, tmp | 0x8000);
6463 tg3_readphy(tp, 0x14, &tmp);
6464 }
6465 }
6466
6467 __tg3_set_rx_mode(tp->dev);
6468
6469 /* Initialize receive rules. */
6470 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
6471 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6472 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
6473 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6474
Michael Chan4cf78e42005-07-25 12:29:19 -07006475 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
Michael Chana4e2b342005-10-26 15:46:52 -07006476 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006477 limit = 8;
6478 else
6479 limit = 16;
6480 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6481 limit -= 4;
6482 switch (limit) {
6483 case 16:
6484 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
6485 case 15:
6486 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
6487 case 14:
6488 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
6489 case 13:
6490 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
6491 case 12:
6492 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
6493 case 11:
6494 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
6495 case 10:
6496 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
6497 case 9:
6498 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
6499 case 8:
6500 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
6501 case 7:
6502 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
6503 case 6:
6504 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
6505 case 5:
6506 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
6507 case 4:
6508 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
6509 case 3:
6510 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
6511 case 2:
6512 case 1:
6513
6514 default:
6515 break;
6516 };
6517
6518 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6519
Linus Torvalds1da177e2005-04-16 15:20:36 -07006520 return 0;
6521}
6522
6523/* Called at device open time to get the chip ready for
6524 * packet processing. Invoked with tp->lock held.
6525 */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07006526static int tg3_init_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006527{
6528 int err;
6529
6530 /* Force the chip into D0. */
Michael Chanbc1c7562006-03-20 17:48:03 -08006531 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006532 if (err)
6533 goto out;
6534
6535 tg3_switch_clocks(tp);
6536
6537 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6538
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07006539 err = tg3_reset_hw(tp, reset_phy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006540
6541out:
6542 return err;
6543}
6544
6545#define TG3_STAT_ADD32(PSTAT, REG) \
6546do { u32 __val = tr32(REG); \
6547 (PSTAT)->low += __val; \
6548 if ((PSTAT)->low < __val) \
6549 (PSTAT)->high += 1; \
6550} while (0)
6551
6552static void tg3_periodic_fetch_stats(struct tg3 *tp)
6553{
6554 struct tg3_hw_stats *sp = tp->hw_stats;
6555
6556 if (!netif_carrier_ok(tp->dev))
6557 return;
6558
6559 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6560 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6561 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6562 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6563 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6564 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6565 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6566 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6567 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6568 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6569 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6570 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6571 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6572
6573 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6574 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6575 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6576 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6577 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6578 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6579 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6580 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6581 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6582 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6583 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6584 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6585 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6586 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
Michael Chan463d3052006-05-22 16:36:27 -07006587
6588 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6589 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6590 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006591}
6592
6593static void tg3_timer(unsigned long __opaque)
6594{
6595 struct tg3 *tp = (struct tg3 *) __opaque;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006596
Michael Chanf475f162006-03-27 23:20:14 -08006597 if (tp->irq_sync)
6598 goto restart_timer;
6599
David S. Millerf47c11e2005-06-24 20:18:35 -07006600 spin_lock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006601
David S. Millerfac9b832005-05-18 22:46:34 -07006602 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6603 /* All of this garbage is because when using non-tagged
6604 * IRQ status the mailbox/status_block protocol the chip
6605 * uses with the cpu is race prone.
6606 */
6607 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6608 tw32(GRC_LOCAL_CTRL,
6609 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6610 } else {
6611 tw32(HOSTCC_MODE, tp->coalesce_mode |
6612 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6613 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006614
David S. Millerfac9b832005-05-18 22:46:34 -07006615 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6616 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
David S. Millerf47c11e2005-06-24 20:18:35 -07006617 spin_unlock(&tp->lock);
David S. Millerfac9b832005-05-18 22:46:34 -07006618 schedule_work(&tp->reset_task);
6619 return;
6620 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006621 }
6622
Linus Torvalds1da177e2005-04-16 15:20:36 -07006623 /* This part only runs once per second. */
6624 if (!--tp->timer_counter) {
David S. Millerfac9b832005-05-18 22:46:34 -07006625 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6626 tg3_periodic_fetch_stats(tp);
6627
Linus Torvalds1da177e2005-04-16 15:20:36 -07006628 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6629 u32 mac_stat;
6630 int phy_event;
6631
6632 mac_stat = tr32(MAC_STATUS);
6633
6634 phy_event = 0;
6635 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6636 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6637 phy_event = 1;
6638 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6639 phy_event = 1;
6640
6641 if (phy_event)
6642 tg3_setup_phy(tp, 0);
6643 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6644 u32 mac_stat = tr32(MAC_STATUS);
6645 int need_setup = 0;
6646
6647 if (netif_carrier_ok(tp->dev) &&
6648 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6649 need_setup = 1;
6650 }
6651 if (! netif_carrier_ok(tp->dev) &&
6652 (mac_stat & (MAC_STATUS_PCS_SYNCED |
6653 MAC_STATUS_SIGNAL_DET))) {
6654 need_setup = 1;
6655 }
6656 if (need_setup) {
6657 tw32_f(MAC_MODE,
6658 (tp->mac_mode &
6659 ~MAC_MODE_PORT_MODE_MASK));
6660 udelay(40);
6661 tw32_f(MAC_MODE, tp->mac_mode);
6662 udelay(40);
6663 tg3_setup_phy(tp, 0);
6664 }
Michael Chan747e8f82005-07-25 12:33:22 -07006665 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6666 tg3_serdes_parallel_detect(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006667
6668 tp->timer_counter = tp->timer_multiplier;
6669 }
6670
Michael Chan28fbef72005-10-26 15:48:35 -07006671 /* Heartbeat is only sent once every 2 seconds. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006672 if (!--tp->asf_counter) {
6673 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6674 u32 val;
6675
Michael Chanbbadf502006-04-06 21:46:34 -07006676 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6677 FWCMD_NICDRV_ALIVE2);
6678 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
Michael Chan28fbef72005-10-26 15:48:35 -07006679 /* 5 seconds timeout */
Michael Chanbbadf502006-04-06 21:46:34 -07006680 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006681 val = tr32(GRC_RX_CPU_EVENT);
6682 val |= (1 << 14);
6683 tw32(GRC_RX_CPU_EVENT, val);
6684 }
6685 tp->asf_counter = tp->asf_multiplier;
6686 }
6687
David S. Millerf47c11e2005-06-24 20:18:35 -07006688 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006689
Michael Chanf475f162006-03-27 23:20:14 -08006690restart_timer:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006691 tp->timer.expires = jiffies + tp->timer_offset;
6692 add_timer(&tp->timer);
6693}
6694
Adrian Bunk81789ef2006-03-20 23:00:14 -08006695static int tg3_request_irq(struct tg3 *tp)
Michael Chanfcfa0a32006-03-20 22:28:41 -08006696{
6697 irqreturn_t (*fn)(int, void *, struct pt_regs *);
6698 unsigned long flags;
6699 struct net_device *dev = tp->dev;
6700
6701 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6702 fn = tg3_msi;
6703 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6704 fn = tg3_msi_1shot;
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07006705 flags = IRQF_SAMPLE_RANDOM;
Michael Chanfcfa0a32006-03-20 22:28:41 -08006706 } else {
6707 fn = tg3_interrupt;
6708 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6709 fn = tg3_interrupt_tagged;
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07006710 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
Michael Chanfcfa0a32006-03-20 22:28:41 -08006711 }
6712 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6713}
6714
Michael Chan79381092005-04-21 17:13:59 -07006715static int tg3_test_interrupt(struct tg3 *tp)
6716{
6717 struct net_device *dev = tp->dev;
6718 int err, i;
6719 u32 int_mbox = 0;
6720
Michael Chand4bc3922005-05-29 14:59:20 -07006721 if (!netif_running(dev))
6722 return -ENODEV;
6723
Michael Chan79381092005-04-21 17:13:59 -07006724 tg3_disable_ints(tp);
6725
6726 free_irq(tp->pdev->irq, dev);
6727
6728 err = request_irq(tp->pdev->irq, tg3_test_isr,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07006729 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
Michael Chan79381092005-04-21 17:13:59 -07006730 if (err)
6731 return err;
6732
Michael Chan38f38432005-09-05 17:53:32 -07006733 tp->hw_status->status &= ~SD_STATUS_UPDATED;
Michael Chan79381092005-04-21 17:13:59 -07006734 tg3_enable_ints(tp);
6735
6736 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6737 HOSTCC_MODE_NOW);
6738
6739 for (i = 0; i < 5; i++) {
Michael Chan09ee9292005-08-09 20:17:00 -07006740 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6741 TG3_64BIT_REG_LOW);
Michael Chan79381092005-04-21 17:13:59 -07006742 if (int_mbox != 0)
6743 break;
6744 msleep(10);
6745 }
6746
6747 tg3_disable_ints(tp);
6748
6749 free_irq(tp->pdev->irq, dev);
6750
Michael Chanfcfa0a32006-03-20 22:28:41 -08006751 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07006752
6753 if (err)
6754 return err;
6755
6756 if (int_mbox != 0)
6757 return 0;
6758
6759 return -EIO;
6760}
6761
6762/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6763 * successfully restored
6764 */
6765static int tg3_test_msi(struct tg3 *tp)
6766{
6767 struct net_device *dev = tp->dev;
6768 int err;
6769 u16 pci_cmd;
6770
6771 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6772 return 0;
6773
6774 /* Turn off SERR reporting in case MSI terminates with Master
6775 * Abort.
6776 */
6777 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6778 pci_write_config_word(tp->pdev, PCI_COMMAND,
6779 pci_cmd & ~PCI_COMMAND_SERR);
6780
6781 err = tg3_test_interrupt(tp);
6782
6783 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6784
6785 if (!err)
6786 return 0;
6787
6788 /* other failures */
6789 if (err != -EIO)
6790 return err;
6791
6792 /* MSI test failed, go back to INTx mode */
6793 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6794 "switching to INTx mode. Please report this failure to "
6795 "the PCI maintainer and include system chipset information.\n",
6796 tp->dev->name);
6797
6798 free_irq(tp->pdev->irq, dev);
6799 pci_disable_msi(tp->pdev);
6800
6801 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6802
Michael Chanfcfa0a32006-03-20 22:28:41 -08006803 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07006804 if (err)
6805 return err;
6806
6807 /* Need to reset the chip because the MSI cycle may have terminated
6808 * with Master Abort.
6809 */
David S. Millerf47c11e2005-06-24 20:18:35 -07006810 tg3_full_lock(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07006811
Michael Chan944d9802005-05-29 14:57:48 -07006812 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07006813 err = tg3_init_hw(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07006814
David S. Millerf47c11e2005-06-24 20:18:35 -07006815 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07006816
6817 if (err)
6818 free_irq(tp->pdev->irq, dev);
6819
6820 return err;
6821}
6822
Linus Torvalds1da177e2005-04-16 15:20:36 -07006823static int tg3_open(struct net_device *dev)
6824{
6825 struct tg3 *tp = netdev_priv(dev);
6826 int err;
6827
David S. Millerf47c11e2005-06-24 20:18:35 -07006828 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006829
Michael Chanbc1c7562006-03-20 17:48:03 -08006830 err = tg3_set_power_state(tp, PCI_D0);
6831 if (err)
6832 return err;
6833
Linus Torvalds1da177e2005-04-16 15:20:36 -07006834 tg3_disable_ints(tp);
6835 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6836
David S. Millerf47c11e2005-06-24 20:18:35 -07006837 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006838
6839 /* The placement of this call is tied
6840 * to the setup and use of Host TX descriptors.
6841 */
6842 err = tg3_alloc_consistent(tp);
6843 if (err)
6844 return err;
6845
Michael Chan88b06bc2005-04-21 17:13:25 -07006846 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6847 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
Michael Chand4d2c552006-03-20 17:47:20 -08006848 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6849 !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6850 (tp->pdev_peer == tp->pdev))) {
David S. Millerfac9b832005-05-18 22:46:34 -07006851 /* All MSI supporting chips should support tagged
6852 * status. Assert that this is the case.
6853 */
6854 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6855 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6856 "Not using MSI.\n", tp->dev->name);
6857 } else if (pci_enable_msi(tp->pdev) == 0) {
Michael Chan88b06bc2005-04-21 17:13:25 -07006858 u32 msi_mode;
6859
6860 msi_mode = tr32(MSGINT_MODE);
6861 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6862 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6863 }
6864 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08006865 err = tg3_request_irq(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006866
6867 if (err) {
Michael Chan88b06bc2005-04-21 17:13:25 -07006868 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6869 pci_disable_msi(tp->pdev);
6870 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6871 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006872 tg3_free_consistent(tp);
6873 return err;
6874 }
6875
David S. Millerf47c11e2005-06-24 20:18:35 -07006876 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006877
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07006878 err = tg3_init_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006879 if (err) {
Michael Chan944d9802005-05-29 14:57:48 -07006880 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006881 tg3_free_rings(tp);
6882 } else {
David S. Millerfac9b832005-05-18 22:46:34 -07006883 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6884 tp->timer_offset = HZ;
6885 else
6886 tp->timer_offset = HZ / 10;
6887
6888 BUG_ON(tp->timer_offset > HZ);
6889 tp->timer_counter = tp->timer_multiplier =
6890 (HZ / tp->timer_offset);
6891 tp->asf_counter = tp->asf_multiplier =
Michael Chan28fbef72005-10-26 15:48:35 -07006892 ((HZ / tp->timer_offset) * 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006893
6894 init_timer(&tp->timer);
6895 tp->timer.expires = jiffies + tp->timer_offset;
6896 tp->timer.data = (unsigned long) tp;
6897 tp->timer.function = tg3_timer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006898 }
6899
David S. Millerf47c11e2005-06-24 20:18:35 -07006900 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006901
6902 if (err) {
Michael Chan88b06bc2005-04-21 17:13:25 -07006903 free_irq(tp->pdev->irq, dev);
6904 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6905 pci_disable_msi(tp->pdev);
6906 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6907 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006908 tg3_free_consistent(tp);
6909 return err;
6910 }
6911
Michael Chan79381092005-04-21 17:13:59 -07006912 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6913 err = tg3_test_msi(tp);
David S. Millerfac9b832005-05-18 22:46:34 -07006914
Michael Chan79381092005-04-21 17:13:59 -07006915 if (err) {
David S. Millerf47c11e2005-06-24 20:18:35 -07006916 tg3_full_lock(tp, 0);
Michael Chan79381092005-04-21 17:13:59 -07006917
6918 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6919 pci_disable_msi(tp->pdev);
6920 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6921 }
Michael Chan944d9802005-05-29 14:57:48 -07006922 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan79381092005-04-21 17:13:59 -07006923 tg3_free_rings(tp);
6924 tg3_free_consistent(tp);
6925
David S. Millerf47c11e2005-06-24 20:18:35 -07006926 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07006927
6928 return err;
6929 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08006930
6931 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6932 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6933 u32 val = tr32(0x7c04);
6934
6935 tw32(0x7c04, val | (1 << 29));
6936 }
6937 }
Michael Chan79381092005-04-21 17:13:59 -07006938 }
6939
David S. Millerf47c11e2005-06-24 20:18:35 -07006940 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006941
Michael Chan79381092005-04-21 17:13:59 -07006942 add_timer(&tp->timer);
6943 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006944 tg3_enable_ints(tp);
6945
David S. Millerf47c11e2005-06-24 20:18:35 -07006946 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006947
6948 netif_start_queue(dev);
6949
6950 return 0;
6951}
6952
6953#if 0
6954/*static*/ void tg3_dump_state(struct tg3 *tp)
6955{
6956 u32 val32, val32_2, val32_3, val32_4, val32_5;
6957 u16 val16;
6958 int i;
6959
6960 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6961 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6962 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6963 val16, val32);
6964
6965 /* MAC block */
6966 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6967 tr32(MAC_MODE), tr32(MAC_STATUS));
6968 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6969 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6970 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6971 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6972 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6973 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6974
6975 /* Send data initiator control block */
6976 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6977 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6978 printk(" SNDDATAI_STATSCTRL[%08x]\n",
6979 tr32(SNDDATAI_STATSCTRL));
6980
6981 /* Send data completion control block */
6982 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6983
6984 /* Send BD ring selector block */
6985 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6986 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6987
6988 /* Send BD initiator control block */
6989 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6990 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6991
6992 /* Send BD completion control block */
6993 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6994
6995 /* Receive list placement control block */
6996 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6997 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6998 printk(" RCVLPC_STATSCTRL[%08x]\n",
6999 tr32(RCVLPC_STATSCTRL));
7000
7001 /* Receive data and receive BD initiator control block */
7002 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7003 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7004
7005 /* Receive data completion control block */
7006 printk("DEBUG: RCVDCC_MODE[%08x]\n",
7007 tr32(RCVDCC_MODE));
7008
7009 /* Receive BD initiator control block */
7010 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7011 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7012
7013 /* Receive BD completion control block */
7014 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7015 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7016
7017 /* Receive list selector control block */
7018 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7019 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7020
7021 /* Mbuf cluster free block */
7022 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7023 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7024
7025 /* Host coalescing control block */
7026 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7027 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7028 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7029 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7030 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7031 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7032 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7033 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7034 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7035 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7036 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7037 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7038
7039 /* Memory arbiter control block */
7040 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7041 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7042
7043 /* Buffer manager control block */
7044 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7045 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7046 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7047 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7048 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7049 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7050 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7051 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7052
7053 /* Read DMA control block */
7054 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7055 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7056
7057 /* Write DMA control block */
7058 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7059 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7060
7061 /* DMA completion block */
7062 printk("DEBUG: DMAC_MODE[%08x]\n",
7063 tr32(DMAC_MODE));
7064
7065 /* GRC block */
7066 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7067 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7068 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7069 tr32(GRC_LOCAL_CTRL));
7070
7071 /* TG3_BDINFOs */
7072 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7073 tr32(RCVDBDI_JUMBO_BD + 0x0),
7074 tr32(RCVDBDI_JUMBO_BD + 0x4),
7075 tr32(RCVDBDI_JUMBO_BD + 0x8),
7076 tr32(RCVDBDI_JUMBO_BD + 0xc));
7077 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7078 tr32(RCVDBDI_STD_BD + 0x0),
7079 tr32(RCVDBDI_STD_BD + 0x4),
7080 tr32(RCVDBDI_STD_BD + 0x8),
7081 tr32(RCVDBDI_STD_BD + 0xc));
7082 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7083 tr32(RCVDBDI_MINI_BD + 0x0),
7084 tr32(RCVDBDI_MINI_BD + 0x4),
7085 tr32(RCVDBDI_MINI_BD + 0x8),
7086 tr32(RCVDBDI_MINI_BD + 0xc));
7087
7088 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7089 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7090 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7091 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7092 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7093 val32, val32_2, val32_3, val32_4);
7094
7095 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7096 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7097 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7098 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7099 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7100 val32, val32_2, val32_3, val32_4);
7101
7102 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7103 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7104 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7105 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7106 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7107 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7108 val32, val32_2, val32_3, val32_4, val32_5);
7109
7110 /* SW status block */
7111 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7112 tp->hw_status->status,
7113 tp->hw_status->status_tag,
7114 tp->hw_status->rx_jumbo_consumer,
7115 tp->hw_status->rx_consumer,
7116 tp->hw_status->rx_mini_consumer,
7117 tp->hw_status->idx[0].rx_producer,
7118 tp->hw_status->idx[0].tx_consumer);
7119
7120 /* SW statistics block */
7121 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7122 ((u32 *)tp->hw_stats)[0],
7123 ((u32 *)tp->hw_stats)[1],
7124 ((u32 *)tp->hw_stats)[2],
7125 ((u32 *)tp->hw_stats)[3]);
7126
7127 /* Mailboxes */
7128 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
Michael Chan09ee9292005-08-09 20:17:00 -07007129 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7130 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7131 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7132 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007133
7134 /* NIC side send descriptors. */
7135 for (i = 0; i < 6; i++) {
7136 unsigned long txd;
7137
7138 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7139 + (i * sizeof(struct tg3_tx_buffer_desc));
7140 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7141 i,
7142 readl(txd + 0x0), readl(txd + 0x4),
7143 readl(txd + 0x8), readl(txd + 0xc));
7144 }
7145
7146 /* NIC side RX descriptors. */
7147 for (i = 0; i < 6; i++) {
7148 unsigned long rxd;
7149
7150 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7151 + (i * sizeof(struct tg3_rx_buffer_desc));
7152 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7153 i,
7154 readl(rxd + 0x0), readl(rxd + 0x4),
7155 readl(rxd + 0x8), readl(rxd + 0xc));
7156 rxd += (4 * sizeof(u32));
7157 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7158 i,
7159 readl(rxd + 0x0), readl(rxd + 0x4),
7160 readl(rxd + 0x8), readl(rxd + 0xc));
7161 }
7162
7163 for (i = 0; i < 6; i++) {
7164 unsigned long rxd;
7165
7166 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7167 + (i * sizeof(struct tg3_rx_buffer_desc));
7168 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7169 i,
7170 readl(rxd + 0x0), readl(rxd + 0x4),
7171 readl(rxd + 0x8), readl(rxd + 0xc));
7172 rxd += (4 * sizeof(u32));
7173 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7174 i,
7175 readl(rxd + 0x0), readl(rxd + 0x4),
7176 readl(rxd + 0x8), readl(rxd + 0xc));
7177 }
7178}
7179#endif
7180
7181static struct net_device_stats *tg3_get_stats(struct net_device *);
7182static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7183
7184static int tg3_close(struct net_device *dev)
7185{
7186 struct tg3 *tp = netdev_priv(dev);
7187
Michael Chan7faa0062006-02-02 17:29:28 -08007188 /* Calling flush_scheduled_work() may deadlock because
7189 * linkwatch_event() may be on the workqueue and it will try to get
7190 * the rtnl_lock which we are holding.
7191 */
7192 while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7193 msleep(1);
7194
Linus Torvalds1da177e2005-04-16 15:20:36 -07007195 netif_stop_queue(dev);
7196
7197 del_timer_sync(&tp->timer);
7198
David S. Millerf47c11e2005-06-24 20:18:35 -07007199 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007200#if 0
7201 tg3_dump_state(tp);
7202#endif
7203
7204 tg3_disable_ints(tp);
7205
Michael Chan944d9802005-05-29 14:57:48 -07007206 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007207 tg3_free_rings(tp);
7208 tp->tg3_flags &=
7209 ~(TG3_FLAG_INIT_COMPLETE |
7210 TG3_FLAG_GOT_SERDES_FLOWCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007211
David S. Millerf47c11e2005-06-24 20:18:35 -07007212 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007213
Michael Chan88b06bc2005-04-21 17:13:25 -07007214 free_irq(tp->pdev->irq, dev);
7215 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7216 pci_disable_msi(tp->pdev);
7217 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7218 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007219
7220 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7221 sizeof(tp->net_stats_prev));
7222 memcpy(&tp->estats_prev, tg3_get_estats(tp),
7223 sizeof(tp->estats_prev));
7224
7225 tg3_free_consistent(tp);
7226
Michael Chanbc1c7562006-03-20 17:48:03 -08007227 tg3_set_power_state(tp, PCI_D3hot);
7228
7229 netif_carrier_off(tp->dev);
7230
Linus Torvalds1da177e2005-04-16 15:20:36 -07007231 return 0;
7232}
7233
7234static inline unsigned long get_stat64(tg3_stat64_t *val)
7235{
7236 unsigned long ret;
7237
7238#if (BITS_PER_LONG == 32)
7239 ret = val->low;
7240#else
7241 ret = ((u64)val->high << 32) | ((u64)val->low);
7242#endif
7243 return ret;
7244}
7245
7246static unsigned long calc_crc_errors(struct tg3 *tp)
7247{
7248 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7249
7250 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7251 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7252 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007253 u32 val;
7254
David S. Millerf47c11e2005-06-24 20:18:35 -07007255 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007256 if (!tg3_readphy(tp, 0x1e, &val)) {
7257 tg3_writephy(tp, 0x1e, val | 0x8000);
7258 tg3_readphy(tp, 0x14, &val);
7259 } else
7260 val = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07007261 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007262
7263 tp->phy_crc_errors += val;
7264
7265 return tp->phy_crc_errors;
7266 }
7267
7268 return get_stat64(&hw_stats->rx_fcs_errors);
7269}
7270
7271#define ESTAT_ADD(member) \
7272 estats->member = old_estats->member + \
7273 get_stat64(&hw_stats->member)
7274
7275static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7276{
7277 struct tg3_ethtool_stats *estats = &tp->estats;
7278 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7279 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7280
7281 if (!hw_stats)
7282 return old_estats;
7283
7284 ESTAT_ADD(rx_octets);
7285 ESTAT_ADD(rx_fragments);
7286 ESTAT_ADD(rx_ucast_packets);
7287 ESTAT_ADD(rx_mcast_packets);
7288 ESTAT_ADD(rx_bcast_packets);
7289 ESTAT_ADD(rx_fcs_errors);
7290 ESTAT_ADD(rx_align_errors);
7291 ESTAT_ADD(rx_xon_pause_rcvd);
7292 ESTAT_ADD(rx_xoff_pause_rcvd);
7293 ESTAT_ADD(rx_mac_ctrl_rcvd);
7294 ESTAT_ADD(rx_xoff_entered);
7295 ESTAT_ADD(rx_frame_too_long_errors);
7296 ESTAT_ADD(rx_jabbers);
7297 ESTAT_ADD(rx_undersize_packets);
7298 ESTAT_ADD(rx_in_length_errors);
7299 ESTAT_ADD(rx_out_length_errors);
7300 ESTAT_ADD(rx_64_or_less_octet_packets);
7301 ESTAT_ADD(rx_65_to_127_octet_packets);
7302 ESTAT_ADD(rx_128_to_255_octet_packets);
7303 ESTAT_ADD(rx_256_to_511_octet_packets);
7304 ESTAT_ADD(rx_512_to_1023_octet_packets);
7305 ESTAT_ADD(rx_1024_to_1522_octet_packets);
7306 ESTAT_ADD(rx_1523_to_2047_octet_packets);
7307 ESTAT_ADD(rx_2048_to_4095_octet_packets);
7308 ESTAT_ADD(rx_4096_to_8191_octet_packets);
7309 ESTAT_ADD(rx_8192_to_9022_octet_packets);
7310
7311 ESTAT_ADD(tx_octets);
7312 ESTAT_ADD(tx_collisions);
7313 ESTAT_ADD(tx_xon_sent);
7314 ESTAT_ADD(tx_xoff_sent);
7315 ESTAT_ADD(tx_flow_control);
7316 ESTAT_ADD(tx_mac_errors);
7317 ESTAT_ADD(tx_single_collisions);
7318 ESTAT_ADD(tx_mult_collisions);
7319 ESTAT_ADD(tx_deferred);
7320 ESTAT_ADD(tx_excessive_collisions);
7321 ESTAT_ADD(tx_late_collisions);
7322 ESTAT_ADD(tx_collide_2times);
7323 ESTAT_ADD(tx_collide_3times);
7324 ESTAT_ADD(tx_collide_4times);
7325 ESTAT_ADD(tx_collide_5times);
7326 ESTAT_ADD(tx_collide_6times);
7327 ESTAT_ADD(tx_collide_7times);
7328 ESTAT_ADD(tx_collide_8times);
7329 ESTAT_ADD(tx_collide_9times);
7330 ESTAT_ADD(tx_collide_10times);
7331 ESTAT_ADD(tx_collide_11times);
7332 ESTAT_ADD(tx_collide_12times);
7333 ESTAT_ADD(tx_collide_13times);
7334 ESTAT_ADD(tx_collide_14times);
7335 ESTAT_ADD(tx_collide_15times);
7336 ESTAT_ADD(tx_ucast_packets);
7337 ESTAT_ADD(tx_mcast_packets);
7338 ESTAT_ADD(tx_bcast_packets);
7339 ESTAT_ADD(tx_carrier_sense_errors);
7340 ESTAT_ADD(tx_discards);
7341 ESTAT_ADD(tx_errors);
7342
7343 ESTAT_ADD(dma_writeq_full);
7344 ESTAT_ADD(dma_write_prioq_full);
7345 ESTAT_ADD(rxbds_empty);
7346 ESTAT_ADD(rx_discards);
7347 ESTAT_ADD(rx_errors);
7348 ESTAT_ADD(rx_threshold_hit);
7349
7350 ESTAT_ADD(dma_readq_full);
7351 ESTAT_ADD(dma_read_prioq_full);
7352 ESTAT_ADD(tx_comp_queue_full);
7353
7354 ESTAT_ADD(ring_set_send_prod_index);
7355 ESTAT_ADD(ring_status_update);
7356 ESTAT_ADD(nic_irqs);
7357 ESTAT_ADD(nic_avoided_irqs);
7358 ESTAT_ADD(nic_tx_threshold_hit);
7359
7360 return estats;
7361}
7362
7363static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7364{
7365 struct tg3 *tp = netdev_priv(dev);
7366 struct net_device_stats *stats = &tp->net_stats;
7367 struct net_device_stats *old_stats = &tp->net_stats_prev;
7368 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7369
7370 if (!hw_stats)
7371 return old_stats;
7372
7373 stats->rx_packets = old_stats->rx_packets +
7374 get_stat64(&hw_stats->rx_ucast_packets) +
7375 get_stat64(&hw_stats->rx_mcast_packets) +
7376 get_stat64(&hw_stats->rx_bcast_packets);
7377
7378 stats->tx_packets = old_stats->tx_packets +
7379 get_stat64(&hw_stats->tx_ucast_packets) +
7380 get_stat64(&hw_stats->tx_mcast_packets) +
7381 get_stat64(&hw_stats->tx_bcast_packets);
7382
7383 stats->rx_bytes = old_stats->rx_bytes +
7384 get_stat64(&hw_stats->rx_octets);
7385 stats->tx_bytes = old_stats->tx_bytes +
7386 get_stat64(&hw_stats->tx_octets);
7387
7388 stats->rx_errors = old_stats->rx_errors +
John W. Linville4f63b872005-09-12 14:43:18 -07007389 get_stat64(&hw_stats->rx_errors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007390 stats->tx_errors = old_stats->tx_errors +
7391 get_stat64(&hw_stats->tx_errors) +
7392 get_stat64(&hw_stats->tx_mac_errors) +
7393 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7394 get_stat64(&hw_stats->tx_discards);
7395
7396 stats->multicast = old_stats->multicast +
7397 get_stat64(&hw_stats->rx_mcast_packets);
7398 stats->collisions = old_stats->collisions +
7399 get_stat64(&hw_stats->tx_collisions);
7400
7401 stats->rx_length_errors = old_stats->rx_length_errors +
7402 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7403 get_stat64(&hw_stats->rx_undersize_packets);
7404
7405 stats->rx_over_errors = old_stats->rx_over_errors +
7406 get_stat64(&hw_stats->rxbds_empty);
7407 stats->rx_frame_errors = old_stats->rx_frame_errors +
7408 get_stat64(&hw_stats->rx_align_errors);
7409 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7410 get_stat64(&hw_stats->tx_discards);
7411 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7412 get_stat64(&hw_stats->tx_carrier_sense_errors);
7413
7414 stats->rx_crc_errors = old_stats->rx_crc_errors +
7415 calc_crc_errors(tp);
7416
John W. Linville4f63b872005-09-12 14:43:18 -07007417 stats->rx_missed_errors = old_stats->rx_missed_errors +
7418 get_stat64(&hw_stats->rx_discards);
7419
Linus Torvalds1da177e2005-04-16 15:20:36 -07007420 return stats;
7421}
7422
7423static inline u32 calc_crc(unsigned char *buf, int len)
7424{
7425 u32 reg;
7426 u32 tmp;
7427 int j, k;
7428
7429 reg = 0xffffffff;
7430
7431 for (j = 0; j < len; j++) {
7432 reg ^= buf[j];
7433
7434 for (k = 0; k < 8; k++) {
7435 tmp = reg & 0x01;
7436
7437 reg >>= 1;
7438
7439 if (tmp) {
7440 reg ^= 0xedb88320;
7441 }
7442 }
7443 }
7444
7445 return ~reg;
7446}
7447
7448static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7449{
7450 /* accept or reject all multicast frames */
7451 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7452 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7453 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7454 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7455}
7456
7457static void __tg3_set_rx_mode(struct net_device *dev)
7458{
7459 struct tg3 *tp = netdev_priv(dev);
7460 u32 rx_mode;
7461
7462 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7463 RX_MODE_KEEP_VLAN_TAG);
7464
7465 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7466 * flag clear.
7467 */
7468#if TG3_VLAN_TAG_USED
7469 if (!tp->vlgrp &&
7470 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7471 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7472#else
7473 /* By definition, VLAN is disabled always in this
7474 * case.
7475 */
7476 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7477 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7478#endif
7479
7480 if (dev->flags & IFF_PROMISC) {
7481 /* Promiscuous mode. */
7482 rx_mode |= RX_MODE_PROMISC;
7483 } else if (dev->flags & IFF_ALLMULTI) {
7484 /* Accept all multicast. */
7485 tg3_set_multi (tp, 1);
7486 } else if (dev->mc_count < 1) {
7487 /* Reject all multicast. */
7488 tg3_set_multi (tp, 0);
7489 } else {
7490 /* Accept one or more multicast(s). */
7491 struct dev_mc_list *mclist;
7492 unsigned int i;
7493 u32 mc_filter[4] = { 0, };
7494 u32 regidx;
7495 u32 bit;
7496 u32 crc;
7497
7498 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7499 i++, mclist = mclist->next) {
7500
7501 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7502 bit = ~crc & 0x7f;
7503 regidx = (bit & 0x60) >> 5;
7504 bit &= 0x1f;
7505 mc_filter[regidx] |= (1 << bit);
7506 }
7507
7508 tw32(MAC_HASH_REG_0, mc_filter[0]);
7509 tw32(MAC_HASH_REG_1, mc_filter[1]);
7510 tw32(MAC_HASH_REG_2, mc_filter[2]);
7511 tw32(MAC_HASH_REG_3, mc_filter[3]);
7512 }
7513
7514 if (rx_mode != tp->rx_mode) {
7515 tp->rx_mode = rx_mode;
7516 tw32_f(MAC_RX_MODE, rx_mode);
7517 udelay(10);
7518 }
7519}
7520
7521static void tg3_set_rx_mode(struct net_device *dev)
7522{
7523 struct tg3 *tp = netdev_priv(dev);
7524
Michael Chane75f7c92006-03-20 21:33:26 -08007525 if (!netif_running(dev))
7526 return;
7527
David S. Millerf47c11e2005-06-24 20:18:35 -07007528 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007529 __tg3_set_rx_mode(dev);
David S. Millerf47c11e2005-06-24 20:18:35 -07007530 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007531}
7532
7533#define TG3_REGDUMP_LEN (32 * 1024)
7534
7535static int tg3_get_regs_len(struct net_device *dev)
7536{
7537 return TG3_REGDUMP_LEN;
7538}
7539
7540static void tg3_get_regs(struct net_device *dev,
7541 struct ethtool_regs *regs, void *_p)
7542{
7543 u32 *p = _p;
7544 struct tg3 *tp = netdev_priv(dev);
7545 u8 *orig_p = _p;
7546 int i;
7547
7548 regs->version = 0;
7549
7550 memset(p, 0, TG3_REGDUMP_LEN);
7551
Michael Chanbc1c7562006-03-20 17:48:03 -08007552 if (tp->link_config.phy_is_low_power)
7553 return;
7554
David S. Millerf47c11e2005-06-24 20:18:35 -07007555 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007556
7557#define __GET_REG32(reg) (*(p)++ = tr32(reg))
7558#define GET_REG32_LOOP(base,len) \
7559do { p = (u32 *)(orig_p + (base)); \
7560 for (i = 0; i < len; i += 4) \
7561 __GET_REG32((base) + i); \
7562} while (0)
7563#define GET_REG32_1(reg) \
7564do { p = (u32 *)(orig_p + (reg)); \
7565 __GET_REG32((reg)); \
7566} while (0)
7567
7568 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7569 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7570 GET_REG32_LOOP(MAC_MODE, 0x4f0);
7571 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7572 GET_REG32_1(SNDDATAC_MODE);
7573 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7574 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7575 GET_REG32_1(SNDBDC_MODE);
7576 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7577 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7578 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7579 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7580 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7581 GET_REG32_1(RCVDCC_MODE);
7582 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7583 GET_REG32_LOOP(RCVCC_MODE, 0x14);
7584 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7585 GET_REG32_1(MBFREE_MODE);
7586 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7587 GET_REG32_LOOP(MEMARB_MODE, 0x10);
7588 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7589 GET_REG32_LOOP(RDMAC_MODE, 0x08);
7590 GET_REG32_LOOP(WDMAC_MODE, 0x08);
Chris Elmquist091465d2005-12-20 13:25:19 -08007591 GET_REG32_1(RX_CPU_MODE);
7592 GET_REG32_1(RX_CPU_STATE);
7593 GET_REG32_1(RX_CPU_PGMCTR);
7594 GET_REG32_1(RX_CPU_HWBKPT);
7595 GET_REG32_1(TX_CPU_MODE);
7596 GET_REG32_1(TX_CPU_STATE);
7597 GET_REG32_1(TX_CPU_PGMCTR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007598 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7599 GET_REG32_LOOP(FTQ_RESET, 0x120);
7600 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7601 GET_REG32_1(DMAC_MODE);
7602 GET_REG32_LOOP(GRC_MODE, 0x4c);
7603 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7604 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7605
7606#undef __GET_REG32
7607#undef GET_REG32_LOOP
7608#undef GET_REG32_1
7609
David S. Millerf47c11e2005-06-24 20:18:35 -07007610 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007611}
7612
7613static int tg3_get_eeprom_len(struct net_device *dev)
7614{
7615 struct tg3 *tp = netdev_priv(dev);
7616
7617 return tp->nvram_size;
7618}
7619
7620static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
Michael Chan18201802006-03-20 22:29:15 -08007621static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007622
7623static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7624{
7625 struct tg3 *tp = netdev_priv(dev);
7626 int ret;
7627 u8 *pd;
7628 u32 i, offset, len, val, b_offset, b_count;
7629
Michael Chanbc1c7562006-03-20 17:48:03 -08007630 if (tp->link_config.phy_is_low_power)
7631 return -EAGAIN;
7632
Linus Torvalds1da177e2005-04-16 15:20:36 -07007633 offset = eeprom->offset;
7634 len = eeprom->len;
7635 eeprom->len = 0;
7636
7637 eeprom->magic = TG3_EEPROM_MAGIC;
7638
7639 if (offset & 3) {
7640 /* adjustments to start on required 4 byte boundary */
7641 b_offset = offset & 3;
7642 b_count = 4 - b_offset;
7643 if (b_count > len) {
7644 /* i.e. offset=1 len=2 */
7645 b_count = len;
7646 }
7647 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7648 if (ret)
7649 return ret;
7650 val = cpu_to_le32(val);
7651 memcpy(data, ((char*)&val) + b_offset, b_count);
7652 len -= b_count;
7653 offset += b_count;
7654 eeprom->len += b_count;
7655 }
7656
7657 /* read bytes upto the last 4 byte boundary */
7658 pd = &data[eeprom->len];
7659 for (i = 0; i < (len - (len & 3)); i += 4) {
7660 ret = tg3_nvram_read(tp, offset + i, &val);
7661 if (ret) {
7662 eeprom->len += i;
7663 return ret;
7664 }
7665 val = cpu_to_le32(val);
7666 memcpy(pd + i, &val, 4);
7667 }
7668 eeprom->len += i;
7669
7670 if (len & 3) {
7671 /* read last bytes not ending on 4 byte boundary */
7672 pd = &data[eeprom->len];
7673 b_count = len & 3;
7674 b_offset = offset + len - b_count;
7675 ret = tg3_nvram_read(tp, b_offset, &val);
7676 if (ret)
7677 return ret;
7678 val = cpu_to_le32(val);
7679 memcpy(pd, ((char*)&val), b_count);
7680 eeprom->len += b_count;
7681 }
7682 return 0;
7683}
7684
7685static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7686
7687static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7688{
7689 struct tg3 *tp = netdev_priv(dev);
7690 int ret;
7691 u32 offset, len, b_offset, odd_len, start, end;
7692 u8 *buf;
7693
Michael Chanbc1c7562006-03-20 17:48:03 -08007694 if (tp->link_config.phy_is_low_power)
7695 return -EAGAIN;
7696
Linus Torvalds1da177e2005-04-16 15:20:36 -07007697 if (eeprom->magic != TG3_EEPROM_MAGIC)
7698 return -EINVAL;
7699
7700 offset = eeprom->offset;
7701 len = eeprom->len;
7702
7703 if ((b_offset = (offset & 3))) {
7704 /* adjustments to start on required 4 byte boundary */
7705 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7706 if (ret)
7707 return ret;
7708 start = cpu_to_le32(start);
7709 len += b_offset;
7710 offset &= ~3;
Michael Chan1c8594b2005-04-21 17:12:46 -07007711 if (len < 4)
7712 len = 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007713 }
7714
7715 odd_len = 0;
Michael Chan1c8594b2005-04-21 17:12:46 -07007716 if (len & 3) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007717 /* adjustments to end on required 4 byte boundary */
7718 odd_len = 1;
7719 len = (len + 3) & ~3;
7720 ret = tg3_nvram_read(tp, offset+len-4, &end);
7721 if (ret)
7722 return ret;
7723 end = cpu_to_le32(end);
7724 }
7725
7726 buf = data;
7727 if (b_offset || odd_len) {
7728 buf = kmalloc(len, GFP_KERNEL);
7729 if (buf == 0)
7730 return -ENOMEM;
7731 if (b_offset)
7732 memcpy(buf, &start, 4);
7733 if (odd_len)
7734 memcpy(buf+len-4, &end, 4);
7735 memcpy(buf + b_offset, data, eeprom->len);
7736 }
7737
7738 ret = tg3_nvram_write_block(tp, offset, len, buf);
7739
7740 if (buf != data)
7741 kfree(buf);
7742
7743 return ret;
7744}
7745
7746static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7747{
7748 struct tg3 *tp = netdev_priv(dev);
7749
7750 cmd->supported = (SUPPORTED_Autoneg);
7751
7752 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7753 cmd->supported |= (SUPPORTED_1000baseT_Half |
7754 SUPPORTED_1000baseT_Full);
7755
Karsten Keilef348142006-05-12 12:49:08 -07007756 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007757 cmd->supported |= (SUPPORTED_100baseT_Half |
7758 SUPPORTED_100baseT_Full |
7759 SUPPORTED_10baseT_Half |
7760 SUPPORTED_10baseT_Full |
7761 SUPPORTED_MII);
Karsten Keilef348142006-05-12 12:49:08 -07007762 cmd->port = PORT_TP;
7763 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007764 cmd->supported |= SUPPORTED_FIBRE;
Karsten Keilef348142006-05-12 12:49:08 -07007765 cmd->port = PORT_FIBRE;
7766 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007767
7768 cmd->advertising = tp->link_config.advertising;
7769 if (netif_running(dev)) {
7770 cmd->speed = tp->link_config.active_speed;
7771 cmd->duplex = tp->link_config.active_duplex;
7772 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007773 cmd->phy_address = PHY_ADDR;
7774 cmd->transceiver = 0;
7775 cmd->autoneg = tp->link_config.autoneg;
7776 cmd->maxtxpkt = 0;
7777 cmd->maxrxpkt = 0;
7778 return 0;
7779}
7780
7781static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7782{
7783 struct tg3 *tp = netdev_priv(dev);
7784
Michael Chan37ff2382005-10-26 15:49:51 -07007785 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007786 /* These are the only valid advertisement bits allowed. */
7787 if (cmd->autoneg == AUTONEG_ENABLE &&
7788 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7789 ADVERTISED_1000baseT_Full |
7790 ADVERTISED_Autoneg |
7791 ADVERTISED_FIBRE)))
7792 return -EINVAL;
Michael Chan37ff2382005-10-26 15:49:51 -07007793 /* Fiber can only do SPEED_1000. */
7794 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7795 (cmd->speed != SPEED_1000))
7796 return -EINVAL;
7797 /* Copper cannot force SPEED_1000. */
7798 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7799 (cmd->speed == SPEED_1000))
7800 return -EINVAL;
7801 else if ((cmd->speed == SPEED_1000) &&
7802 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7803 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007804
David S. Millerf47c11e2005-06-24 20:18:35 -07007805 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007806
7807 tp->link_config.autoneg = cmd->autoneg;
7808 if (cmd->autoneg == AUTONEG_ENABLE) {
7809 tp->link_config.advertising = cmd->advertising;
7810 tp->link_config.speed = SPEED_INVALID;
7811 tp->link_config.duplex = DUPLEX_INVALID;
7812 } else {
7813 tp->link_config.advertising = 0;
7814 tp->link_config.speed = cmd->speed;
7815 tp->link_config.duplex = cmd->duplex;
7816 }
7817
7818 if (netif_running(dev))
7819 tg3_setup_phy(tp, 1);
7820
David S. Millerf47c11e2005-06-24 20:18:35 -07007821 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007822
7823 return 0;
7824}
7825
7826static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7827{
7828 struct tg3 *tp = netdev_priv(dev);
7829
7830 strcpy(info->driver, DRV_MODULE_NAME);
7831 strcpy(info->version, DRV_MODULE_VERSION);
Michael Chanc4e65752006-03-20 22:29:32 -08007832 strcpy(info->fw_version, tp->fw_ver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007833 strcpy(info->bus_info, pci_name(tp->pdev));
7834}
7835
7836static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7837{
7838 struct tg3 *tp = netdev_priv(dev);
7839
7840 wol->supported = WAKE_MAGIC;
7841 wol->wolopts = 0;
7842 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7843 wol->wolopts = WAKE_MAGIC;
7844 memset(&wol->sopass, 0, sizeof(wol->sopass));
7845}
7846
7847static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7848{
7849 struct tg3 *tp = netdev_priv(dev);
7850
7851 if (wol->wolopts & ~WAKE_MAGIC)
7852 return -EINVAL;
7853 if ((wol->wolopts & WAKE_MAGIC) &&
7854 tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7855 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7856 return -EINVAL;
7857
David S. Millerf47c11e2005-06-24 20:18:35 -07007858 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007859 if (wol->wolopts & WAKE_MAGIC)
7860 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7861 else
7862 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
David S. Millerf47c11e2005-06-24 20:18:35 -07007863 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007864
7865 return 0;
7866}
7867
7868static u32 tg3_get_msglevel(struct net_device *dev)
7869{
7870 struct tg3 *tp = netdev_priv(dev);
7871 return tp->msg_enable;
7872}
7873
7874static void tg3_set_msglevel(struct net_device *dev, u32 value)
7875{
7876 struct tg3 *tp = netdev_priv(dev);
7877 tp->msg_enable = value;
7878}
7879
7880#if TG3_TSO_SUPPORT != 0
7881static int tg3_set_tso(struct net_device *dev, u32 value)
7882{
7883 struct tg3 *tp = netdev_priv(dev);
7884
7885 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7886 if (value)
7887 return -EINVAL;
7888 return 0;
7889 }
7890 return ethtool_op_set_tso(dev, value);
7891}
7892#endif
7893
7894static int tg3_nway_reset(struct net_device *dev)
7895{
7896 struct tg3 *tp = netdev_priv(dev);
7897 u32 bmcr;
7898 int r;
7899
7900 if (!netif_running(dev))
7901 return -EAGAIN;
7902
Michael Chanc94e3942005-09-27 12:12:42 -07007903 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7904 return -EINVAL;
7905
David S. Millerf47c11e2005-06-24 20:18:35 -07007906 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007907 r = -EINVAL;
7908 tg3_readphy(tp, MII_BMCR, &bmcr);
7909 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
Michael Chanc94e3942005-09-27 12:12:42 -07007910 ((bmcr & BMCR_ANENABLE) ||
7911 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7912 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7913 BMCR_ANENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007914 r = 0;
7915 }
David S. Millerf47c11e2005-06-24 20:18:35 -07007916 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007917
7918 return r;
7919}
7920
7921static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7922{
7923 struct tg3 *tp = netdev_priv(dev);
7924
7925 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7926 ering->rx_mini_max_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08007927 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7928 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7929 else
7930 ering->rx_jumbo_max_pending = 0;
7931
7932 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007933
7934 ering->rx_pending = tp->rx_pending;
7935 ering->rx_mini_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08007936 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7937 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7938 else
7939 ering->rx_jumbo_pending = 0;
7940
Linus Torvalds1da177e2005-04-16 15:20:36 -07007941 ering->tx_pending = tp->tx_pending;
7942}
7943
7944static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7945{
7946 struct tg3 *tp = netdev_priv(dev);
Michael Chanbbe832c2005-06-24 20:20:04 -07007947 int irq_sync = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007948
7949 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7950 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7951 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7952 return -EINVAL;
7953
Michael Chanbbe832c2005-06-24 20:20:04 -07007954 if (netif_running(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007955 tg3_netif_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07007956 irq_sync = 1;
7957 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007958
Michael Chanbbe832c2005-06-24 20:20:04 -07007959 tg3_full_lock(tp, irq_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007960
7961 tp->rx_pending = ering->rx_pending;
7962
7963 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7964 tp->rx_pending > 63)
7965 tp->rx_pending = 63;
7966 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7967 tp->tx_pending = ering->tx_pending;
7968
7969 if (netif_running(dev)) {
Michael Chan944d9802005-05-29 14:57:48 -07007970 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007971 tg3_init_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007972 tg3_netif_start(tp);
7973 }
7974
David S. Millerf47c11e2005-06-24 20:18:35 -07007975 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007976
7977 return 0;
7978}
7979
7980static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7981{
7982 struct tg3 *tp = netdev_priv(dev);
7983
7984 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7985 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7986 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7987}
7988
7989static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7990{
7991 struct tg3 *tp = netdev_priv(dev);
Michael Chanbbe832c2005-06-24 20:20:04 -07007992 int irq_sync = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007993
Michael Chanbbe832c2005-06-24 20:20:04 -07007994 if (netif_running(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007995 tg3_netif_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07007996 irq_sync = 1;
7997 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007998
Michael Chanbbe832c2005-06-24 20:20:04 -07007999 tg3_full_lock(tp, irq_sync);
David S. Millerf47c11e2005-06-24 20:18:35 -07008000
Linus Torvalds1da177e2005-04-16 15:20:36 -07008001 if (epause->autoneg)
8002 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8003 else
8004 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8005 if (epause->rx_pause)
8006 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8007 else
8008 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8009 if (epause->tx_pause)
8010 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8011 else
8012 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8013
8014 if (netif_running(dev)) {
Michael Chan944d9802005-05-29 14:57:48 -07008015 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07008016 tg3_init_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008017 tg3_netif_start(tp);
8018 }
David S. Millerf47c11e2005-06-24 20:18:35 -07008019
8020 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008021
8022 return 0;
8023}
8024
8025static u32 tg3_get_rx_csum(struct net_device *dev)
8026{
8027 struct tg3 *tp = netdev_priv(dev);
8028 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8029}
8030
8031static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8032{
8033 struct tg3 *tp = netdev_priv(dev);
8034
8035 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8036 if (data != 0)
8037 return -EINVAL;
8038 return 0;
8039 }
8040
David S. Millerf47c11e2005-06-24 20:18:35 -07008041 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008042 if (data)
8043 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8044 else
8045 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
David S. Millerf47c11e2005-06-24 20:18:35 -07008046 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008047
8048 return 0;
8049}
8050
8051static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8052{
8053 struct tg3 *tp = netdev_priv(dev);
8054
8055 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8056 if (data != 0)
8057 return -EINVAL;
8058 return 0;
8059 }
8060
Michael Chanaf36e6b2006-03-23 01:28:06 -08008061 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8062 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
Michael Chan9c27dbd2006-03-20 22:28:27 -08008063 ethtool_op_set_tx_hw_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008064 else
Michael Chan9c27dbd2006-03-20 22:28:27 -08008065 ethtool_op_set_tx_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008066
8067 return 0;
8068}
8069
8070static int tg3_get_stats_count (struct net_device *dev)
8071{
8072 return TG3_NUM_STATS;
8073}
8074
Michael Chan4cafd3f2005-05-29 14:56:34 -07008075static int tg3_get_test_count (struct net_device *dev)
8076{
8077 return TG3_NUM_TEST;
8078}
8079
Linus Torvalds1da177e2005-04-16 15:20:36 -07008080static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8081{
8082 switch (stringset) {
8083 case ETH_SS_STATS:
8084 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8085 break;
Michael Chan4cafd3f2005-05-29 14:56:34 -07008086 case ETH_SS_TEST:
8087 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8088 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008089 default:
8090 WARN_ON(1); /* we need a WARN() */
8091 break;
8092 }
8093}
8094
Michael Chan4009a932005-09-05 17:52:54 -07008095static int tg3_phys_id(struct net_device *dev, u32 data)
8096{
8097 struct tg3 *tp = netdev_priv(dev);
8098 int i;
8099
8100 if (!netif_running(tp->dev))
8101 return -EAGAIN;
8102
8103 if (data == 0)
8104 data = 2;
8105
8106 for (i = 0; i < (data * 2); i++) {
8107 if ((i % 2) == 0)
8108 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8109 LED_CTRL_1000MBPS_ON |
8110 LED_CTRL_100MBPS_ON |
8111 LED_CTRL_10MBPS_ON |
8112 LED_CTRL_TRAFFIC_OVERRIDE |
8113 LED_CTRL_TRAFFIC_BLINK |
8114 LED_CTRL_TRAFFIC_LED);
8115
8116 else
8117 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8118 LED_CTRL_TRAFFIC_OVERRIDE);
8119
8120 if (msleep_interruptible(500))
8121 break;
8122 }
8123 tw32(MAC_LED_CTRL, tp->led_ctrl);
8124 return 0;
8125}
8126
Linus Torvalds1da177e2005-04-16 15:20:36 -07008127static void tg3_get_ethtool_stats (struct net_device *dev,
8128 struct ethtool_stats *estats, u64 *tmp_stats)
8129{
8130 struct tg3 *tp = netdev_priv(dev);
8131 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8132}
8133
Michael Chan566f86a2005-05-29 14:56:58 -07008134#define NVRAM_TEST_SIZE 0x100
Michael Chan1b277772006-03-20 22:27:48 -08008135#define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
Michael Chan566f86a2005-05-29 14:56:58 -07008136
8137static int tg3_test_nvram(struct tg3 *tp)
8138{
Michael Chan1b277772006-03-20 22:27:48 -08008139 u32 *buf, csum, magic;
8140 int i, j, err = 0, size;
Michael Chan566f86a2005-05-29 14:56:58 -07008141
Michael Chan18201802006-03-20 22:29:15 -08008142 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Michael Chan1b277772006-03-20 22:27:48 -08008143 return -EIO;
8144
Michael Chan1b277772006-03-20 22:27:48 -08008145 if (magic == TG3_EEPROM_MAGIC)
8146 size = NVRAM_TEST_SIZE;
8147 else if ((magic & 0xff000000) == 0xa5000000) {
8148 if ((magic & 0xe00000) == 0x200000)
8149 size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8150 else
8151 return 0;
8152 } else
8153 return -EIO;
8154
8155 buf = kmalloc(size, GFP_KERNEL);
Michael Chan566f86a2005-05-29 14:56:58 -07008156 if (buf == NULL)
8157 return -ENOMEM;
8158
Michael Chan1b277772006-03-20 22:27:48 -08008159 err = -EIO;
8160 for (i = 0, j = 0; i < size; i += 4, j++) {
Michael Chan566f86a2005-05-29 14:56:58 -07008161 u32 val;
8162
8163 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8164 break;
8165 buf[j] = cpu_to_le32(val);
8166 }
Michael Chan1b277772006-03-20 22:27:48 -08008167 if (i < size)
Michael Chan566f86a2005-05-29 14:56:58 -07008168 goto out;
8169
Michael Chan1b277772006-03-20 22:27:48 -08008170 /* Selfboot format */
8171 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8172 u8 *buf8 = (u8 *) buf, csum8 = 0;
8173
8174 for (i = 0; i < size; i++)
8175 csum8 += buf8[i];
8176
Adrian Bunkad96b482006-04-05 22:21:04 -07008177 if (csum8 == 0) {
8178 err = 0;
8179 goto out;
8180 }
8181
8182 err = -EIO;
8183 goto out;
Michael Chan1b277772006-03-20 22:27:48 -08008184 }
Michael Chan566f86a2005-05-29 14:56:58 -07008185
8186 /* Bootstrap checksum at offset 0x10 */
8187 csum = calc_crc((unsigned char *) buf, 0x10);
8188 if(csum != cpu_to_le32(buf[0x10/4]))
8189 goto out;
8190
8191 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8192 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8193 if (csum != cpu_to_le32(buf[0xfc/4]))
8194 goto out;
8195
8196 err = 0;
8197
8198out:
8199 kfree(buf);
8200 return err;
8201}
8202
Michael Chanca430072005-05-29 14:57:23 -07008203#define TG3_SERDES_TIMEOUT_SEC 2
8204#define TG3_COPPER_TIMEOUT_SEC 6
8205
8206static int tg3_test_link(struct tg3 *tp)
8207{
8208 int i, max;
8209
8210 if (!netif_running(tp->dev))
8211 return -ENODEV;
8212
Michael Chan4c987482005-09-05 17:52:38 -07008213 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Michael Chanca430072005-05-29 14:57:23 -07008214 max = TG3_SERDES_TIMEOUT_SEC;
8215 else
8216 max = TG3_COPPER_TIMEOUT_SEC;
8217
8218 for (i = 0; i < max; i++) {
8219 if (netif_carrier_ok(tp->dev))
8220 return 0;
8221
8222 if (msleep_interruptible(1000))
8223 break;
8224 }
8225
8226 return -EIO;
8227}
8228
Michael Chana71116d2005-05-29 14:58:11 -07008229/* Only test the commonly used registers */
David S. Miller30ca3e32006-03-20 23:02:36 -08008230static int tg3_test_registers(struct tg3 *tp)
Michael Chana71116d2005-05-29 14:58:11 -07008231{
8232 int i, is_5705;
8233 u32 offset, read_mask, write_mask, val, save_val, read_val;
8234 static struct {
8235 u16 offset;
8236 u16 flags;
8237#define TG3_FL_5705 0x1
8238#define TG3_FL_NOT_5705 0x2
8239#define TG3_FL_NOT_5788 0x4
8240 u32 read_mask;
8241 u32 write_mask;
8242 } reg_tbl[] = {
8243 /* MAC Control Registers */
8244 { MAC_MODE, TG3_FL_NOT_5705,
8245 0x00000000, 0x00ef6f8c },
8246 { MAC_MODE, TG3_FL_5705,
8247 0x00000000, 0x01ef6b8c },
8248 { MAC_STATUS, TG3_FL_NOT_5705,
8249 0x03800107, 0x00000000 },
8250 { MAC_STATUS, TG3_FL_5705,
8251 0x03800100, 0x00000000 },
8252 { MAC_ADDR_0_HIGH, 0x0000,
8253 0x00000000, 0x0000ffff },
8254 { MAC_ADDR_0_LOW, 0x0000,
8255 0x00000000, 0xffffffff },
8256 { MAC_RX_MTU_SIZE, 0x0000,
8257 0x00000000, 0x0000ffff },
8258 { MAC_TX_MODE, 0x0000,
8259 0x00000000, 0x00000070 },
8260 { MAC_TX_LENGTHS, 0x0000,
8261 0x00000000, 0x00003fff },
8262 { MAC_RX_MODE, TG3_FL_NOT_5705,
8263 0x00000000, 0x000007fc },
8264 { MAC_RX_MODE, TG3_FL_5705,
8265 0x00000000, 0x000007dc },
8266 { MAC_HASH_REG_0, 0x0000,
8267 0x00000000, 0xffffffff },
8268 { MAC_HASH_REG_1, 0x0000,
8269 0x00000000, 0xffffffff },
8270 { MAC_HASH_REG_2, 0x0000,
8271 0x00000000, 0xffffffff },
8272 { MAC_HASH_REG_3, 0x0000,
8273 0x00000000, 0xffffffff },
8274
8275 /* Receive Data and Receive BD Initiator Control Registers. */
8276 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8277 0x00000000, 0xffffffff },
8278 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8279 0x00000000, 0xffffffff },
8280 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8281 0x00000000, 0x00000003 },
8282 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8283 0x00000000, 0xffffffff },
8284 { RCVDBDI_STD_BD+0, 0x0000,
8285 0x00000000, 0xffffffff },
8286 { RCVDBDI_STD_BD+4, 0x0000,
8287 0x00000000, 0xffffffff },
8288 { RCVDBDI_STD_BD+8, 0x0000,
8289 0x00000000, 0xffff0002 },
8290 { RCVDBDI_STD_BD+0xc, 0x0000,
8291 0x00000000, 0xffffffff },
8292
8293 /* Receive BD Initiator Control Registers. */
8294 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8295 0x00000000, 0xffffffff },
8296 { RCVBDI_STD_THRESH, TG3_FL_5705,
8297 0x00000000, 0x000003ff },
8298 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8299 0x00000000, 0xffffffff },
8300
8301 /* Host Coalescing Control Registers. */
8302 { HOSTCC_MODE, TG3_FL_NOT_5705,
8303 0x00000000, 0x00000004 },
8304 { HOSTCC_MODE, TG3_FL_5705,
8305 0x00000000, 0x000000f6 },
8306 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8307 0x00000000, 0xffffffff },
8308 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8309 0x00000000, 0x000003ff },
8310 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8311 0x00000000, 0xffffffff },
8312 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8313 0x00000000, 0x000003ff },
8314 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8315 0x00000000, 0xffffffff },
8316 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8317 0x00000000, 0x000000ff },
8318 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8319 0x00000000, 0xffffffff },
8320 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8321 0x00000000, 0x000000ff },
8322 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8323 0x00000000, 0xffffffff },
8324 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8325 0x00000000, 0xffffffff },
8326 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8327 0x00000000, 0xffffffff },
8328 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8329 0x00000000, 0x000000ff },
8330 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8331 0x00000000, 0xffffffff },
8332 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8333 0x00000000, 0x000000ff },
8334 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8335 0x00000000, 0xffffffff },
8336 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8337 0x00000000, 0xffffffff },
8338 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8339 0x00000000, 0xffffffff },
8340 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8341 0x00000000, 0xffffffff },
8342 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8343 0x00000000, 0xffffffff },
8344 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8345 0xffffffff, 0x00000000 },
8346 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8347 0xffffffff, 0x00000000 },
8348
8349 /* Buffer Manager Control Registers. */
8350 { BUFMGR_MB_POOL_ADDR, 0x0000,
8351 0x00000000, 0x007fff80 },
8352 { BUFMGR_MB_POOL_SIZE, 0x0000,
8353 0x00000000, 0x007fffff },
8354 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8355 0x00000000, 0x0000003f },
8356 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8357 0x00000000, 0x000001ff },
8358 { BUFMGR_MB_HIGH_WATER, 0x0000,
8359 0x00000000, 0x000001ff },
8360 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8361 0xffffffff, 0x00000000 },
8362 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8363 0xffffffff, 0x00000000 },
8364
8365 /* Mailbox Registers */
8366 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8367 0x00000000, 0x000001ff },
8368 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8369 0x00000000, 0x000001ff },
8370 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8371 0x00000000, 0x000007ff },
8372 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8373 0x00000000, 0x000001ff },
8374
8375 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8376 };
8377
8378 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8379 is_5705 = 1;
8380 else
8381 is_5705 = 0;
8382
8383 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8384 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8385 continue;
8386
8387 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8388 continue;
8389
8390 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8391 (reg_tbl[i].flags & TG3_FL_NOT_5788))
8392 continue;
8393
8394 offset = (u32) reg_tbl[i].offset;
8395 read_mask = reg_tbl[i].read_mask;
8396 write_mask = reg_tbl[i].write_mask;
8397
8398 /* Save the original register content */
8399 save_val = tr32(offset);
8400
8401 /* Determine the read-only value. */
8402 read_val = save_val & read_mask;
8403
8404 /* Write zero to the register, then make sure the read-only bits
8405 * are not changed and the read/write bits are all zeros.
8406 */
8407 tw32(offset, 0);
8408
8409 val = tr32(offset);
8410
8411 /* Test the read-only and read/write bits. */
8412 if (((val & read_mask) != read_val) || (val & write_mask))
8413 goto out;
8414
8415 /* Write ones to all the bits defined by RdMask and WrMask, then
8416 * make sure the read-only bits are not changed and the
8417 * read/write bits are all ones.
8418 */
8419 tw32(offset, read_mask | write_mask);
8420
8421 val = tr32(offset);
8422
8423 /* Test the read-only bits. */
8424 if ((val & read_mask) != read_val)
8425 goto out;
8426
8427 /* Test the read/write bits. */
8428 if ((val & write_mask) != write_mask)
8429 goto out;
8430
8431 tw32(offset, save_val);
8432 }
8433
8434 return 0;
8435
8436out:
8437 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8438 tw32(offset, save_val);
8439 return -EIO;
8440}
8441
Michael Chan7942e1d2005-05-29 14:58:36 -07008442static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8443{
Arjan van de Venf71e1302006-03-03 21:33:57 -05008444 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
Michael Chan7942e1d2005-05-29 14:58:36 -07008445 int i;
8446 u32 j;
8447
8448 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8449 for (j = 0; j < len; j += 4) {
8450 u32 val;
8451
8452 tg3_write_mem(tp, offset + j, test_pattern[i]);
8453 tg3_read_mem(tp, offset + j, &val);
8454 if (val != test_pattern[i])
8455 return -EIO;
8456 }
8457 }
8458 return 0;
8459}
8460
8461static int tg3_test_memory(struct tg3 *tp)
8462{
8463 static struct mem_entry {
8464 u32 offset;
8465 u32 len;
8466 } mem_tbl_570x[] = {
Michael Chan38690192005-12-19 16:27:28 -08008467 { 0x00000000, 0x00b50},
Michael Chan7942e1d2005-05-29 14:58:36 -07008468 { 0x00002000, 0x1c000},
8469 { 0xffffffff, 0x00000}
8470 }, mem_tbl_5705[] = {
8471 { 0x00000100, 0x0000c},
8472 { 0x00000200, 0x00008},
Michael Chan7942e1d2005-05-29 14:58:36 -07008473 { 0x00004000, 0x00800},
8474 { 0x00006000, 0x01000},
8475 { 0x00008000, 0x02000},
8476 { 0x00010000, 0x0e000},
8477 { 0xffffffff, 0x00000}
Michael Chan79f4d132006-03-20 22:28:57 -08008478 }, mem_tbl_5755[] = {
8479 { 0x00000200, 0x00008},
8480 { 0x00004000, 0x00800},
8481 { 0x00006000, 0x00800},
8482 { 0x00008000, 0x02000},
8483 { 0x00010000, 0x0c000},
8484 { 0xffffffff, 0x00000}
Michael Chan7942e1d2005-05-29 14:58:36 -07008485 };
8486 struct mem_entry *mem_tbl;
8487 int err = 0;
8488 int i;
8489
Michael Chan79f4d132006-03-20 22:28:57 -08008490 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
Michael Chanaf36e6b2006-03-23 01:28:06 -08008491 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8492 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
Michael Chan79f4d132006-03-20 22:28:57 -08008493 mem_tbl = mem_tbl_5755;
8494 else
8495 mem_tbl = mem_tbl_5705;
8496 } else
Michael Chan7942e1d2005-05-29 14:58:36 -07008497 mem_tbl = mem_tbl_570x;
8498
8499 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8500 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8501 mem_tbl[i].len)) != 0)
8502 break;
8503 }
8504
8505 return err;
8506}
8507
Michael Chan9f40dea2005-09-05 17:53:06 -07008508#define TG3_MAC_LOOPBACK 0
8509#define TG3_PHY_LOOPBACK 1
8510
8511static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
Michael Chanc76949a2005-05-29 14:58:59 -07008512{
Michael Chan9f40dea2005-09-05 17:53:06 -07008513 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
Michael Chanc76949a2005-05-29 14:58:59 -07008514 u32 desc_idx;
8515 struct sk_buff *skb, *rx_skb;
8516 u8 *tx_data;
8517 dma_addr_t map;
8518 int num_pkts, tx_len, rx_len, i, err;
8519 struct tg3_rx_buffer_desc *desc;
8520
Michael Chan9f40dea2005-09-05 17:53:06 -07008521 if (loopback_mode == TG3_MAC_LOOPBACK) {
Michael Chanc94e3942005-09-27 12:12:42 -07008522 /* HW errata - mac loopback fails in some cases on 5780.
8523 * Normal traffic and PHY loopback are not affected by
8524 * errata.
8525 */
8526 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8527 return 0;
8528
Michael Chan9f40dea2005-09-05 17:53:06 -07008529 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8530 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8531 MAC_MODE_PORT_MODE_GMII;
8532 tw32(MAC_MODE, mac_mode);
8533 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
Michael Chanc94e3942005-09-27 12:12:42 -07008534 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8535 BMCR_SPEED1000);
8536 udelay(40);
8537 /* reset to prevent losing 1st rx packet intermittently */
8538 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8539 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8540 udelay(10);
8541 tw32_f(MAC_RX_MODE, tp->rx_mode);
8542 }
Michael Chan9f40dea2005-09-05 17:53:06 -07008543 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8544 MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
Michael Chanff18ff02006-03-27 23:17:27 -08008545 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
Michael Chan9f40dea2005-09-05 17:53:06 -07008546 mac_mode &= ~MAC_MODE_LINK_POLARITY;
Michael Chanff18ff02006-03-27 23:17:27 -08008547 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8548 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8549 }
Michael Chan9f40dea2005-09-05 17:53:06 -07008550 tw32(MAC_MODE, mac_mode);
Michael Chan9f40dea2005-09-05 17:53:06 -07008551 }
8552 else
8553 return -EINVAL;
Michael Chanc76949a2005-05-29 14:58:59 -07008554
8555 err = -EIO;
8556
Michael Chanc76949a2005-05-29 14:58:59 -07008557 tx_len = 1514;
8558 skb = dev_alloc_skb(tx_len);
Jesper Juhla50bb7b2006-05-09 23:14:35 -07008559 if (!skb)
8560 return -ENOMEM;
8561
Michael Chanc76949a2005-05-29 14:58:59 -07008562 tx_data = skb_put(skb, tx_len);
8563 memcpy(tx_data, tp->dev->dev_addr, 6);
8564 memset(tx_data + 6, 0x0, 8);
8565
8566 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8567
8568 for (i = 14; i < tx_len; i++)
8569 tx_data[i] = (u8) (i & 0xff);
8570
8571 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8572
8573 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8574 HOSTCC_MODE_NOW);
8575
8576 udelay(10);
8577
8578 rx_start_idx = tp->hw_status->idx[0].rx_producer;
8579
Michael Chanc76949a2005-05-29 14:58:59 -07008580 num_pkts = 0;
8581
Michael Chan9f40dea2005-09-05 17:53:06 -07008582 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
Michael Chanc76949a2005-05-29 14:58:59 -07008583
Michael Chan9f40dea2005-09-05 17:53:06 -07008584 tp->tx_prod++;
Michael Chanc76949a2005-05-29 14:58:59 -07008585 num_pkts++;
8586
Michael Chan9f40dea2005-09-05 17:53:06 -07008587 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8588 tp->tx_prod);
Michael Chan09ee9292005-08-09 20:17:00 -07008589 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
Michael Chanc76949a2005-05-29 14:58:59 -07008590
8591 udelay(10);
8592
8593 for (i = 0; i < 10; i++) {
8594 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8595 HOSTCC_MODE_NOW);
8596
8597 udelay(10);
8598
8599 tx_idx = tp->hw_status->idx[0].tx_consumer;
8600 rx_idx = tp->hw_status->idx[0].rx_producer;
Michael Chan9f40dea2005-09-05 17:53:06 -07008601 if ((tx_idx == tp->tx_prod) &&
Michael Chanc76949a2005-05-29 14:58:59 -07008602 (rx_idx == (rx_start_idx + num_pkts)))
8603 break;
8604 }
8605
8606 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8607 dev_kfree_skb(skb);
8608
Michael Chan9f40dea2005-09-05 17:53:06 -07008609 if (tx_idx != tp->tx_prod)
Michael Chanc76949a2005-05-29 14:58:59 -07008610 goto out;
8611
8612 if (rx_idx != rx_start_idx + num_pkts)
8613 goto out;
8614
8615 desc = &tp->rx_rcb[rx_start_idx];
8616 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8617 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8618 if (opaque_key != RXD_OPAQUE_RING_STD)
8619 goto out;
8620
8621 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8622 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8623 goto out;
8624
8625 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8626 if (rx_len != tx_len)
8627 goto out;
8628
8629 rx_skb = tp->rx_std_buffers[desc_idx].skb;
8630
8631 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8632 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8633
8634 for (i = 14; i < tx_len; i++) {
8635 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8636 goto out;
8637 }
8638 err = 0;
8639
8640 /* tg3_free_rings will unmap and free the rx_skb */
8641out:
8642 return err;
8643}
8644
Michael Chan9f40dea2005-09-05 17:53:06 -07008645#define TG3_MAC_LOOPBACK_FAILED 1
8646#define TG3_PHY_LOOPBACK_FAILED 2
8647#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
8648 TG3_PHY_LOOPBACK_FAILED)
8649
8650static int tg3_test_loopback(struct tg3 *tp)
8651{
8652 int err = 0;
8653
8654 if (!netif_running(tp->dev))
8655 return TG3_LOOPBACK_FAILED;
8656
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07008657 tg3_reset_hw(tp, 1);
Michael Chan9f40dea2005-09-05 17:53:06 -07008658
8659 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8660 err |= TG3_MAC_LOOPBACK_FAILED;
8661 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8662 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8663 err |= TG3_PHY_LOOPBACK_FAILED;
8664 }
8665
8666 return err;
8667}
8668
Michael Chan4cafd3f2005-05-29 14:56:34 -07008669static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8670 u64 *data)
8671{
Michael Chan566f86a2005-05-29 14:56:58 -07008672 struct tg3 *tp = netdev_priv(dev);
8673
Michael Chanbc1c7562006-03-20 17:48:03 -08008674 if (tp->link_config.phy_is_low_power)
8675 tg3_set_power_state(tp, PCI_D0);
8676
Michael Chan566f86a2005-05-29 14:56:58 -07008677 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8678
8679 if (tg3_test_nvram(tp) != 0) {
8680 etest->flags |= ETH_TEST_FL_FAILED;
8681 data[0] = 1;
8682 }
Michael Chanca430072005-05-29 14:57:23 -07008683 if (tg3_test_link(tp) != 0) {
8684 etest->flags |= ETH_TEST_FL_FAILED;
8685 data[1] = 1;
8686 }
Michael Chana71116d2005-05-29 14:58:11 -07008687 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chanec41c7d2006-01-17 02:40:55 -08008688 int err, irq_sync = 0;
Michael Chana71116d2005-05-29 14:58:11 -07008689
Michael Chanbbe832c2005-06-24 20:20:04 -07008690 if (netif_running(dev)) {
8691 tg3_netif_stop(tp);
8692 irq_sync = 1;
8693 }
8694
8695 tg3_full_lock(tp, irq_sync);
Michael Chana71116d2005-05-29 14:58:11 -07008696
8697 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
Michael Chanec41c7d2006-01-17 02:40:55 -08008698 err = tg3_nvram_lock(tp);
Michael Chana71116d2005-05-29 14:58:11 -07008699 tg3_halt_cpu(tp, RX_CPU_BASE);
8700 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8701 tg3_halt_cpu(tp, TX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -08008702 if (!err)
8703 tg3_nvram_unlock(tp);
Michael Chana71116d2005-05-29 14:58:11 -07008704
Michael Chand9ab5ad2006-03-20 22:27:35 -08008705 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8706 tg3_phy_reset(tp);
8707
Michael Chana71116d2005-05-29 14:58:11 -07008708 if (tg3_test_registers(tp) != 0) {
8709 etest->flags |= ETH_TEST_FL_FAILED;
8710 data[2] = 1;
8711 }
Michael Chan7942e1d2005-05-29 14:58:36 -07008712 if (tg3_test_memory(tp) != 0) {
8713 etest->flags |= ETH_TEST_FL_FAILED;
8714 data[3] = 1;
8715 }
Michael Chan9f40dea2005-09-05 17:53:06 -07008716 if ((data[4] = tg3_test_loopback(tp)) != 0)
Michael Chanc76949a2005-05-29 14:58:59 -07008717 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chana71116d2005-05-29 14:58:11 -07008718
David S. Millerf47c11e2005-06-24 20:18:35 -07008719 tg3_full_unlock(tp);
8720
Michael Chand4bc3922005-05-29 14:59:20 -07008721 if (tg3_test_interrupt(tp) != 0) {
8722 etest->flags |= ETH_TEST_FL_FAILED;
8723 data[5] = 1;
8724 }
David S. Millerf47c11e2005-06-24 20:18:35 -07008725
8726 tg3_full_lock(tp, 0);
Michael Chand4bc3922005-05-29 14:59:20 -07008727
Michael Chana71116d2005-05-29 14:58:11 -07008728 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8729 if (netif_running(dev)) {
8730 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07008731 tg3_init_hw(tp, 1);
Michael Chana71116d2005-05-29 14:58:11 -07008732 tg3_netif_start(tp);
8733 }
David S. Millerf47c11e2005-06-24 20:18:35 -07008734
8735 tg3_full_unlock(tp);
Michael Chana71116d2005-05-29 14:58:11 -07008736 }
Michael Chanbc1c7562006-03-20 17:48:03 -08008737 if (tp->link_config.phy_is_low_power)
8738 tg3_set_power_state(tp, PCI_D3hot);
8739
Michael Chan4cafd3f2005-05-29 14:56:34 -07008740}
8741
Linus Torvalds1da177e2005-04-16 15:20:36 -07008742static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8743{
8744 struct mii_ioctl_data *data = if_mii(ifr);
8745 struct tg3 *tp = netdev_priv(dev);
8746 int err;
8747
8748 switch(cmd) {
8749 case SIOCGMIIPHY:
8750 data->phy_id = PHY_ADDR;
8751
8752 /* fallthru */
8753 case SIOCGMIIREG: {
8754 u32 mii_regval;
8755
8756 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8757 break; /* We have no PHY */
8758
Michael Chanbc1c7562006-03-20 17:48:03 -08008759 if (tp->link_config.phy_is_low_power)
8760 return -EAGAIN;
8761
David S. Millerf47c11e2005-06-24 20:18:35 -07008762 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008763 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
David S. Millerf47c11e2005-06-24 20:18:35 -07008764 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008765
8766 data->val_out = mii_regval;
8767
8768 return err;
8769 }
8770
8771 case SIOCSMIIREG:
8772 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8773 break; /* We have no PHY */
8774
8775 if (!capable(CAP_NET_ADMIN))
8776 return -EPERM;
8777
Michael Chanbc1c7562006-03-20 17:48:03 -08008778 if (tp->link_config.phy_is_low_power)
8779 return -EAGAIN;
8780
David S. Millerf47c11e2005-06-24 20:18:35 -07008781 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008782 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
David S. Millerf47c11e2005-06-24 20:18:35 -07008783 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008784
8785 return err;
8786
8787 default:
8788 /* do nothing */
8789 break;
8790 }
8791 return -EOPNOTSUPP;
8792}
8793
8794#if TG3_VLAN_TAG_USED
8795static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8796{
8797 struct tg3 *tp = netdev_priv(dev);
8798
Michael Chan29315e82006-06-29 20:12:30 -07008799 if (netif_running(dev))
8800 tg3_netif_stop(tp);
8801
David S. Millerf47c11e2005-06-24 20:18:35 -07008802 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008803
8804 tp->vlgrp = grp;
8805
8806 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8807 __tg3_set_rx_mode(dev);
8808
David S. Millerf47c11e2005-06-24 20:18:35 -07008809 tg3_full_unlock(tp);
Michael Chan29315e82006-06-29 20:12:30 -07008810
8811 if (netif_running(dev))
8812 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008813}
8814
8815static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8816{
8817 struct tg3 *tp = netdev_priv(dev);
8818
Michael Chan29315e82006-06-29 20:12:30 -07008819 if (netif_running(dev))
8820 tg3_netif_stop(tp);
8821
David S. Millerf47c11e2005-06-24 20:18:35 -07008822 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008823 if (tp->vlgrp)
8824 tp->vlgrp->vlan_devices[vid] = NULL;
David S. Millerf47c11e2005-06-24 20:18:35 -07008825 tg3_full_unlock(tp);
Michael Chan29315e82006-06-29 20:12:30 -07008826
8827 if (netif_running(dev))
8828 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008829}
8830#endif
8831
David S. Miller15f98502005-05-18 22:49:26 -07008832static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8833{
8834 struct tg3 *tp = netdev_priv(dev);
8835
8836 memcpy(ec, &tp->coal, sizeof(*ec));
8837 return 0;
8838}
8839
Michael Chand244c892005-07-05 14:42:33 -07008840static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8841{
8842 struct tg3 *tp = netdev_priv(dev);
8843 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8844 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8845
8846 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8847 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8848 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8849 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8850 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8851 }
8852
8853 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8854 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8855 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8856 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8857 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8858 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8859 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8860 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8861 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8862 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8863 return -EINVAL;
8864
8865 /* No rx interrupts will be generated if both are zero */
8866 if ((ec->rx_coalesce_usecs == 0) &&
8867 (ec->rx_max_coalesced_frames == 0))
8868 return -EINVAL;
8869
8870 /* No tx interrupts will be generated if both are zero */
8871 if ((ec->tx_coalesce_usecs == 0) &&
8872 (ec->tx_max_coalesced_frames == 0))
8873 return -EINVAL;
8874
8875 /* Only copy relevant parameters, ignore all others. */
8876 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8877 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8878 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8879 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8880 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8881 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8882 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8883 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8884 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8885
8886 if (netif_running(dev)) {
8887 tg3_full_lock(tp, 0);
8888 __tg3_set_coalesce(tp, &tp->coal);
8889 tg3_full_unlock(tp);
8890 }
8891 return 0;
8892}
8893
Linus Torvalds1da177e2005-04-16 15:20:36 -07008894static struct ethtool_ops tg3_ethtool_ops = {
8895 .get_settings = tg3_get_settings,
8896 .set_settings = tg3_set_settings,
8897 .get_drvinfo = tg3_get_drvinfo,
8898 .get_regs_len = tg3_get_regs_len,
8899 .get_regs = tg3_get_regs,
8900 .get_wol = tg3_get_wol,
8901 .set_wol = tg3_set_wol,
8902 .get_msglevel = tg3_get_msglevel,
8903 .set_msglevel = tg3_set_msglevel,
8904 .nway_reset = tg3_nway_reset,
8905 .get_link = ethtool_op_get_link,
8906 .get_eeprom_len = tg3_get_eeprom_len,
8907 .get_eeprom = tg3_get_eeprom,
8908 .set_eeprom = tg3_set_eeprom,
8909 .get_ringparam = tg3_get_ringparam,
8910 .set_ringparam = tg3_set_ringparam,
8911 .get_pauseparam = tg3_get_pauseparam,
8912 .set_pauseparam = tg3_set_pauseparam,
8913 .get_rx_csum = tg3_get_rx_csum,
8914 .set_rx_csum = tg3_set_rx_csum,
8915 .get_tx_csum = ethtool_op_get_tx_csum,
8916 .set_tx_csum = tg3_set_tx_csum,
8917 .get_sg = ethtool_op_get_sg,
8918 .set_sg = ethtool_op_set_sg,
8919#if TG3_TSO_SUPPORT != 0
8920 .get_tso = ethtool_op_get_tso,
8921 .set_tso = tg3_set_tso,
8922#endif
Michael Chan4cafd3f2005-05-29 14:56:34 -07008923 .self_test_count = tg3_get_test_count,
8924 .self_test = tg3_self_test,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008925 .get_strings = tg3_get_strings,
Michael Chan4009a932005-09-05 17:52:54 -07008926 .phys_id = tg3_phys_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008927 .get_stats_count = tg3_get_stats_count,
8928 .get_ethtool_stats = tg3_get_ethtool_stats,
David S. Miller15f98502005-05-18 22:49:26 -07008929 .get_coalesce = tg3_get_coalesce,
Michael Chand244c892005-07-05 14:42:33 -07008930 .set_coalesce = tg3_set_coalesce,
John W. Linville2ff43692005-09-12 14:44:20 -07008931 .get_perm_addr = ethtool_op_get_perm_addr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008932};
8933
8934static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8935{
Michael Chan1b277772006-03-20 22:27:48 -08008936 u32 cursize, val, magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008937
8938 tp->nvram_size = EEPROM_CHIP_SIZE;
8939
Michael Chan18201802006-03-20 22:29:15 -08008940 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008941 return;
8942
Michael Chan1b277772006-03-20 22:27:48 -08008943 if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008944 return;
8945
8946 /*
8947 * Size the chip by reading offsets at increasing powers of two.
8948 * When we encounter our validation signature, we know the addressing
8949 * has wrapped around, and thus have our chip size.
8950 */
Michael Chan1b277772006-03-20 22:27:48 -08008951 cursize = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008952
8953 while (cursize < tp->nvram_size) {
Michael Chan18201802006-03-20 22:29:15 -08008954 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008955 return;
8956
Michael Chan18201802006-03-20 22:29:15 -08008957 if (val == magic)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008958 break;
8959
8960 cursize <<= 1;
8961 }
8962
8963 tp->nvram_size = cursize;
8964}
8965
8966static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8967{
8968 u32 val;
8969
Michael Chan18201802006-03-20 22:29:15 -08008970 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
Michael Chan1b277772006-03-20 22:27:48 -08008971 return;
8972
8973 /* Selfboot format */
Michael Chan18201802006-03-20 22:29:15 -08008974 if (val != TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -08008975 tg3_get_eeprom_size(tp);
8976 return;
8977 }
8978
Linus Torvalds1da177e2005-04-16 15:20:36 -07008979 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8980 if (val != 0) {
8981 tp->nvram_size = (val >> 16) * 1024;
8982 return;
8983 }
8984 }
8985 tp->nvram_size = 0x20000;
8986}
8987
8988static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8989{
8990 u32 nvcfg1;
8991
8992 nvcfg1 = tr32(NVRAM_CFG1);
8993 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8994 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8995 }
8996 else {
8997 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8998 tw32(NVRAM_CFG1, nvcfg1);
8999 }
9000
Michael Chan4c987482005-09-05 17:52:38 -07009001 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
Michael Chana4e2b342005-10-26 15:46:52 -07009002 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009003 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9004 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9005 tp->nvram_jedecnum = JEDEC_ATMEL;
9006 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9007 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9008 break;
9009 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9010 tp->nvram_jedecnum = JEDEC_ATMEL;
9011 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9012 break;
9013 case FLASH_VENDOR_ATMEL_EEPROM:
9014 tp->nvram_jedecnum = JEDEC_ATMEL;
9015 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9016 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9017 break;
9018 case FLASH_VENDOR_ST:
9019 tp->nvram_jedecnum = JEDEC_ST;
9020 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9021 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9022 break;
9023 case FLASH_VENDOR_SAIFUN:
9024 tp->nvram_jedecnum = JEDEC_SAIFUN;
9025 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9026 break;
9027 case FLASH_VENDOR_SST_SMALL:
9028 case FLASH_VENDOR_SST_LARGE:
9029 tp->nvram_jedecnum = JEDEC_SST;
9030 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9031 break;
9032 }
9033 }
9034 else {
9035 tp->nvram_jedecnum = JEDEC_ATMEL;
9036 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9037 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9038 }
9039}
9040
Michael Chan361b4ac2005-04-21 17:11:21 -07009041static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9042{
9043 u32 nvcfg1;
9044
9045 nvcfg1 = tr32(NVRAM_CFG1);
9046
Michael Chane6af3012005-04-21 17:12:05 -07009047 /* NVRAM protection for TPM */
9048 if (nvcfg1 & (1 << 27))
9049 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9050
Michael Chan361b4ac2005-04-21 17:11:21 -07009051 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9052 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9053 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9054 tp->nvram_jedecnum = JEDEC_ATMEL;
9055 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9056 break;
9057 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9058 tp->nvram_jedecnum = JEDEC_ATMEL;
9059 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9060 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9061 break;
9062 case FLASH_5752VENDOR_ST_M45PE10:
9063 case FLASH_5752VENDOR_ST_M45PE20:
9064 case FLASH_5752VENDOR_ST_M45PE40:
9065 tp->nvram_jedecnum = JEDEC_ST;
9066 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9067 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9068 break;
9069 }
9070
9071 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9072 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9073 case FLASH_5752PAGE_SIZE_256:
9074 tp->nvram_pagesize = 256;
9075 break;
9076 case FLASH_5752PAGE_SIZE_512:
9077 tp->nvram_pagesize = 512;
9078 break;
9079 case FLASH_5752PAGE_SIZE_1K:
9080 tp->nvram_pagesize = 1024;
9081 break;
9082 case FLASH_5752PAGE_SIZE_2K:
9083 tp->nvram_pagesize = 2048;
9084 break;
9085 case FLASH_5752PAGE_SIZE_4K:
9086 tp->nvram_pagesize = 4096;
9087 break;
9088 case FLASH_5752PAGE_SIZE_264:
9089 tp->nvram_pagesize = 264;
9090 break;
9091 }
9092 }
9093 else {
9094 /* For eeprom, set pagesize to maximum eeprom size */
9095 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9096
9097 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9098 tw32(NVRAM_CFG1, nvcfg1);
9099 }
9100}
9101
Michael Chand3c7b882006-03-23 01:28:25 -08009102static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9103{
9104 u32 nvcfg1;
9105
9106 nvcfg1 = tr32(NVRAM_CFG1);
9107
9108 /* NVRAM protection for TPM */
9109 if (nvcfg1 & (1 << 27))
9110 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9111
9112 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9113 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9114 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9115 tp->nvram_jedecnum = JEDEC_ATMEL;
9116 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9117 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9118
9119 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9120 tw32(NVRAM_CFG1, nvcfg1);
9121 break;
9122 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9123 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9124 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9125 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9126 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9127 tp->nvram_jedecnum = JEDEC_ATMEL;
9128 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9129 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9130 tp->nvram_pagesize = 264;
9131 break;
9132 case FLASH_5752VENDOR_ST_M45PE10:
9133 case FLASH_5752VENDOR_ST_M45PE20:
9134 case FLASH_5752VENDOR_ST_M45PE40:
9135 tp->nvram_jedecnum = JEDEC_ST;
9136 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9137 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9138 tp->nvram_pagesize = 256;
9139 break;
9140 }
9141}
9142
Michael Chan1b277772006-03-20 22:27:48 -08009143static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9144{
9145 u32 nvcfg1;
9146
9147 nvcfg1 = tr32(NVRAM_CFG1);
9148
9149 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9150 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9151 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9152 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9153 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9154 tp->nvram_jedecnum = JEDEC_ATMEL;
9155 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9156 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9157
9158 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9159 tw32(NVRAM_CFG1, nvcfg1);
9160 break;
9161 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9162 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9163 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9164 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9165 tp->nvram_jedecnum = JEDEC_ATMEL;
9166 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9167 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9168 tp->nvram_pagesize = 264;
9169 break;
9170 case FLASH_5752VENDOR_ST_M45PE10:
9171 case FLASH_5752VENDOR_ST_M45PE20:
9172 case FLASH_5752VENDOR_ST_M45PE40:
9173 tp->nvram_jedecnum = JEDEC_ST;
9174 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9175 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9176 tp->nvram_pagesize = 256;
9177 break;
9178 }
9179}
9180
Linus Torvalds1da177e2005-04-16 15:20:36 -07009181/* Chips other than 5700/5701 use the NVRAM for fetching info. */
9182static void __devinit tg3_nvram_init(struct tg3 *tp)
9183{
9184 int j;
9185
Linus Torvalds1da177e2005-04-16 15:20:36 -07009186 tw32_f(GRC_EEPROM_ADDR,
9187 (EEPROM_ADDR_FSM_RESET |
9188 (EEPROM_DEFAULT_CLOCK_PERIOD <<
9189 EEPROM_ADDR_CLKPERD_SHIFT)));
9190
9191 /* XXX schedule_timeout() ... */
9192 for (j = 0; j < 100; j++)
9193 udelay(10);
9194
9195 /* Enable seeprom accesses. */
9196 tw32_f(GRC_LOCAL_CTRL,
9197 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9198 udelay(100);
9199
9200 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9201 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9202 tp->tg3_flags |= TG3_FLAG_NVRAM;
9203
Michael Chanec41c7d2006-01-17 02:40:55 -08009204 if (tg3_nvram_lock(tp)) {
9205 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9206 "tg3_nvram_init failed.\n", tp->dev->name);
9207 return;
9208 }
Michael Chane6af3012005-04-21 17:12:05 -07009209 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009210
Michael Chan361b4ac2005-04-21 17:11:21 -07009211 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9212 tg3_get_5752_nvram_info(tp);
Michael Chand3c7b882006-03-23 01:28:25 -08009213 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9214 tg3_get_5755_nvram_info(tp);
Michael Chan1b277772006-03-20 22:27:48 -08009215 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9216 tg3_get_5787_nvram_info(tp);
Michael Chan361b4ac2005-04-21 17:11:21 -07009217 else
9218 tg3_get_nvram_info(tp);
9219
Linus Torvalds1da177e2005-04-16 15:20:36 -07009220 tg3_get_nvram_size(tp);
9221
Michael Chane6af3012005-04-21 17:12:05 -07009222 tg3_disable_nvram_access(tp);
Michael Chan381291b2005-12-13 21:08:21 -08009223 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009224
9225 } else {
9226 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9227
9228 tg3_get_eeprom_size(tp);
9229 }
9230}
9231
9232static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9233 u32 offset, u32 *val)
9234{
9235 u32 tmp;
9236 int i;
9237
9238 if (offset > EEPROM_ADDR_ADDR_MASK ||
9239 (offset % 4) != 0)
9240 return -EINVAL;
9241
9242 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9243 EEPROM_ADDR_DEVID_MASK |
9244 EEPROM_ADDR_READ);
9245 tw32(GRC_EEPROM_ADDR,
9246 tmp |
9247 (0 << EEPROM_ADDR_DEVID_SHIFT) |
9248 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9249 EEPROM_ADDR_ADDR_MASK) |
9250 EEPROM_ADDR_READ | EEPROM_ADDR_START);
9251
9252 for (i = 0; i < 10000; i++) {
9253 tmp = tr32(GRC_EEPROM_ADDR);
9254
9255 if (tmp & EEPROM_ADDR_COMPLETE)
9256 break;
9257 udelay(100);
9258 }
9259 if (!(tmp & EEPROM_ADDR_COMPLETE))
9260 return -EBUSY;
9261
9262 *val = tr32(GRC_EEPROM_DATA);
9263 return 0;
9264}
9265
9266#define NVRAM_CMD_TIMEOUT 10000
9267
9268static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9269{
9270 int i;
9271
9272 tw32(NVRAM_CMD, nvram_cmd);
9273 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9274 udelay(10);
9275 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9276 udelay(10);
9277 break;
9278 }
9279 }
9280 if (i == NVRAM_CMD_TIMEOUT) {
9281 return -EBUSY;
9282 }
9283 return 0;
9284}
9285
Michael Chan18201802006-03-20 22:29:15 -08009286static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9287{
9288 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9289 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9290 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9291 (tp->nvram_jedecnum == JEDEC_ATMEL))
9292
9293 addr = ((addr / tp->nvram_pagesize) <<
9294 ATMEL_AT45DB0X1B_PAGE_POS) +
9295 (addr % tp->nvram_pagesize);
9296
9297 return addr;
9298}
9299
Michael Chanc4e65752006-03-20 22:29:32 -08009300static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9301{
9302 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9303 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9304 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9305 (tp->nvram_jedecnum == JEDEC_ATMEL))
9306
9307 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9308 tp->nvram_pagesize) +
9309 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9310
9311 return addr;
9312}
9313
Linus Torvalds1da177e2005-04-16 15:20:36 -07009314static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9315{
9316 int ret;
9317
Linus Torvalds1da177e2005-04-16 15:20:36 -07009318 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9319 return tg3_nvram_read_using_eeprom(tp, offset, val);
9320
Michael Chan18201802006-03-20 22:29:15 -08009321 offset = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009322
9323 if (offset > NVRAM_ADDR_MSK)
9324 return -EINVAL;
9325
Michael Chanec41c7d2006-01-17 02:40:55 -08009326 ret = tg3_nvram_lock(tp);
9327 if (ret)
9328 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009329
Michael Chane6af3012005-04-21 17:12:05 -07009330 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009331
9332 tw32(NVRAM_ADDR, offset);
9333 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9334 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9335
9336 if (ret == 0)
9337 *val = swab32(tr32(NVRAM_RDDATA));
9338
Michael Chane6af3012005-04-21 17:12:05 -07009339 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009340
Michael Chan381291b2005-12-13 21:08:21 -08009341 tg3_nvram_unlock(tp);
9342
Linus Torvalds1da177e2005-04-16 15:20:36 -07009343 return ret;
9344}
9345
Michael Chan18201802006-03-20 22:29:15 -08009346static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9347{
9348 int err;
9349 u32 tmp;
9350
9351 err = tg3_nvram_read(tp, offset, &tmp);
9352 *val = swab32(tmp);
9353 return err;
9354}
9355
Linus Torvalds1da177e2005-04-16 15:20:36 -07009356static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9357 u32 offset, u32 len, u8 *buf)
9358{
9359 int i, j, rc = 0;
9360 u32 val;
9361
9362 for (i = 0; i < len; i += 4) {
9363 u32 addr, data;
9364
9365 addr = offset + i;
9366
9367 memcpy(&data, buf + i, 4);
9368
9369 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9370
9371 val = tr32(GRC_EEPROM_ADDR);
9372 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9373
9374 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9375 EEPROM_ADDR_READ);
9376 tw32(GRC_EEPROM_ADDR, val |
9377 (0 << EEPROM_ADDR_DEVID_SHIFT) |
9378 (addr & EEPROM_ADDR_ADDR_MASK) |
9379 EEPROM_ADDR_START |
9380 EEPROM_ADDR_WRITE);
9381
9382 for (j = 0; j < 10000; j++) {
9383 val = tr32(GRC_EEPROM_ADDR);
9384
9385 if (val & EEPROM_ADDR_COMPLETE)
9386 break;
9387 udelay(100);
9388 }
9389 if (!(val & EEPROM_ADDR_COMPLETE)) {
9390 rc = -EBUSY;
9391 break;
9392 }
9393 }
9394
9395 return rc;
9396}
9397
9398/* offset and length are dword aligned */
9399static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9400 u8 *buf)
9401{
9402 int ret = 0;
9403 u32 pagesize = tp->nvram_pagesize;
9404 u32 pagemask = pagesize - 1;
9405 u32 nvram_cmd;
9406 u8 *tmp;
9407
9408 tmp = kmalloc(pagesize, GFP_KERNEL);
9409 if (tmp == NULL)
9410 return -ENOMEM;
9411
9412 while (len) {
9413 int j;
Michael Chane6af3012005-04-21 17:12:05 -07009414 u32 phy_addr, page_off, size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009415
9416 phy_addr = offset & ~pagemask;
9417
9418 for (j = 0; j < pagesize; j += 4) {
9419 if ((ret = tg3_nvram_read(tp, phy_addr + j,
9420 (u32 *) (tmp + j))))
9421 break;
9422 }
9423 if (ret)
9424 break;
9425
9426 page_off = offset & pagemask;
9427 size = pagesize;
9428 if (len < size)
9429 size = len;
9430
9431 len -= size;
9432
9433 memcpy(tmp + page_off, buf, size);
9434
9435 offset = offset + (pagesize - page_off);
9436
Michael Chane6af3012005-04-21 17:12:05 -07009437 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009438
9439 /*
9440 * Before we can erase the flash page, we need
9441 * to issue a special "write enable" command.
9442 */
9443 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9444
9445 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9446 break;
9447
9448 /* Erase the target page */
9449 tw32(NVRAM_ADDR, phy_addr);
9450
9451 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9452 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9453
9454 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9455 break;
9456
9457 /* Issue another write enable to start the write. */
9458 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9459
9460 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9461 break;
9462
9463 for (j = 0; j < pagesize; j += 4) {
9464 u32 data;
9465
9466 data = *((u32 *) (tmp + j));
9467 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9468
9469 tw32(NVRAM_ADDR, phy_addr + j);
9470
9471 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9472 NVRAM_CMD_WR;
9473
9474 if (j == 0)
9475 nvram_cmd |= NVRAM_CMD_FIRST;
9476 else if (j == (pagesize - 4))
9477 nvram_cmd |= NVRAM_CMD_LAST;
9478
9479 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9480 break;
9481 }
9482 if (ret)
9483 break;
9484 }
9485
9486 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9487 tg3_nvram_exec_cmd(tp, nvram_cmd);
9488
9489 kfree(tmp);
9490
9491 return ret;
9492}
9493
9494/* offset and length are dword aligned */
9495static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9496 u8 *buf)
9497{
9498 int i, ret = 0;
9499
9500 for (i = 0; i < len; i += 4, offset += 4) {
9501 u32 data, page_off, phy_addr, nvram_cmd;
9502
9503 memcpy(&data, buf + i, 4);
9504 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9505
9506 page_off = offset % tp->nvram_pagesize;
9507
Michael Chan18201802006-03-20 22:29:15 -08009508 phy_addr = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009509
9510 tw32(NVRAM_ADDR, phy_addr);
9511
9512 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9513
9514 if ((page_off == 0) || (i == 0))
9515 nvram_cmd |= NVRAM_CMD_FIRST;
Michael Chanf6d9a252006-04-29 19:00:24 -07009516 if (page_off == (tp->nvram_pagesize - 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009517 nvram_cmd |= NVRAM_CMD_LAST;
9518
9519 if (i == (len - 4))
9520 nvram_cmd |= NVRAM_CMD_LAST;
9521
Michael Chan4c987482005-09-05 17:52:38 -07009522 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
Michael Chanaf36e6b2006-03-23 01:28:06 -08009523 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
Michael Chan1b277772006-03-20 22:27:48 -08009524 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
Michael Chan4c987482005-09-05 17:52:38 -07009525 (tp->nvram_jedecnum == JEDEC_ST) &&
9526 (nvram_cmd & NVRAM_CMD_FIRST)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009527
9528 if ((ret = tg3_nvram_exec_cmd(tp,
9529 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9530 NVRAM_CMD_DONE)))
9531
9532 break;
9533 }
9534 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9535 /* We always do complete word writes to eeprom. */
9536 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9537 }
9538
9539 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9540 break;
9541 }
9542 return ret;
9543}
9544
9545/* offset and length are dword aligned */
9546static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9547{
9548 int ret;
9549
Linus Torvalds1da177e2005-04-16 15:20:36 -07009550 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -07009551 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9552 ~GRC_LCLCTRL_GPIO_OUTPUT1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009553 udelay(40);
9554 }
9555
9556 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9557 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9558 }
9559 else {
9560 u32 grc_mode;
9561
Michael Chanec41c7d2006-01-17 02:40:55 -08009562 ret = tg3_nvram_lock(tp);
9563 if (ret)
9564 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009565
Michael Chane6af3012005-04-21 17:12:05 -07009566 tg3_enable_nvram_access(tp);
9567 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9568 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009569 tw32(NVRAM_WRITE1, 0x406);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009570
9571 grc_mode = tr32(GRC_MODE);
9572 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9573
9574 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9575 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9576
9577 ret = tg3_nvram_write_block_buffered(tp, offset, len,
9578 buf);
9579 }
9580 else {
9581 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9582 buf);
9583 }
9584
9585 grc_mode = tr32(GRC_MODE);
9586 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9587
Michael Chane6af3012005-04-21 17:12:05 -07009588 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009589 tg3_nvram_unlock(tp);
9590 }
9591
9592 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -07009593 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009594 udelay(40);
9595 }
9596
9597 return ret;
9598}
9599
9600struct subsys_tbl_ent {
9601 u16 subsys_vendor, subsys_devid;
9602 u32 phy_id;
9603};
9604
9605static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9606 /* Broadcom boards. */
9607 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9608 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9609 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9610 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
9611 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9612 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9613 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
9614 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9615 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9616 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9617 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9618
9619 /* 3com boards. */
9620 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9621 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9622 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
9623 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9624 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9625
9626 /* DELL boards. */
9627 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9628 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9629 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9630 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9631
9632 /* Compaq boards. */
9633 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9634 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9635 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
9636 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9637 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9638
9639 /* IBM boards. */
9640 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9641};
9642
9643static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9644{
9645 int i;
9646
9647 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9648 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9649 tp->pdev->subsystem_vendor) &&
9650 (subsys_id_to_phy_id[i].subsys_devid ==
9651 tp->pdev->subsystem_device))
9652 return &subsys_id_to_phy_id[i];
9653 }
9654 return NULL;
9655}
9656
Michael Chan7d0c41e2005-04-21 17:06:20 -07009657static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009658{
Linus Torvalds1da177e2005-04-16 15:20:36 -07009659 u32 val;
Michael Chancaf636c72006-03-22 01:05:31 -08009660 u16 pmcsr;
9661
9662 /* On some early chips the SRAM cannot be accessed in D3hot state,
9663 * so need make sure we're in D0.
9664 */
9665 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9666 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9667 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9668 msleep(1);
Michael Chan7d0c41e2005-04-21 17:06:20 -07009669
9670 /* Make sure register accesses (indirect or otherwise)
9671 * will function correctly.
9672 */
9673 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9674 tp->misc_host_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009675
David S. Millerf49639e2006-06-09 11:58:36 -07009676 /* The memory arbiter has to be enabled in order for SRAM accesses
9677 * to succeed. Normally on powerup the tg3 chip firmware will make
9678 * sure it is enabled, but other entities such as system netboot
9679 * code might disable it.
9680 */
9681 val = tr32(MEMARB_MODE);
9682 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9683
Linus Torvalds1da177e2005-04-16 15:20:36 -07009684 tp->phy_id = PHY_ID_INVALID;
Michael Chan7d0c41e2005-04-21 17:06:20 -07009685 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9686
David S. Millerf49639e2006-06-09 11:58:36 -07009687 /* Assume an onboard device by default. */
9688 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
David S. Miller72b845e2006-03-14 14:11:48 -08009689
Linus Torvalds1da177e2005-04-16 15:20:36 -07009690 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9691 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9692 u32 nic_cfg, led_cfg;
Michael Chan7d0c41e2005-04-21 17:06:20 -07009693 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9694 int eeprom_phy_serdes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009695
9696 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9697 tp->nic_sram_data_cfg = nic_cfg;
9698
9699 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9700 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9701 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9702 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9703 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9704 (ver > 0) && (ver < 0x100))
9705 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9706
Linus Torvalds1da177e2005-04-16 15:20:36 -07009707 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9708 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9709 eeprom_phy_serdes = 1;
9710
9711 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9712 if (nic_phy_id != 0) {
9713 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9714 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9715
9716 eeprom_phy_id = (id1 >> 16) << 10;
9717 eeprom_phy_id |= (id2 & 0xfc00) << 16;
9718 eeprom_phy_id |= (id2 & 0x03ff) << 0;
9719 } else
9720 eeprom_phy_id = 0;
9721
Michael Chan7d0c41e2005-04-21 17:06:20 -07009722 tp->phy_id = eeprom_phy_id;
Michael Chan747e8f82005-07-25 12:33:22 -07009723 if (eeprom_phy_serdes) {
Michael Chana4e2b342005-10-26 15:46:52 -07009724 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan747e8f82005-07-25 12:33:22 -07009725 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9726 else
9727 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9728 }
Michael Chan7d0c41e2005-04-21 17:06:20 -07009729
John W. Linvillecbf46852005-04-21 17:01:29 -07009730 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009731 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9732 SHASTA_EXT_LED_MODE_MASK);
John W. Linvillecbf46852005-04-21 17:01:29 -07009733 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07009734 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9735
9736 switch (led_cfg) {
9737 default:
9738 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9739 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9740 break;
9741
9742 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9743 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9744 break;
9745
9746 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9747 tp->led_ctrl = LED_CTRL_MODE_MAC;
Michael Chan9ba27792005-06-06 15:16:20 -07009748
9749 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9750 * read on some older 5700/5701 bootcode.
9751 */
9752 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9753 ASIC_REV_5700 ||
9754 GET_ASIC_REV(tp->pci_chip_rev_id) ==
9755 ASIC_REV_5701)
9756 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9757
Linus Torvalds1da177e2005-04-16 15:20:36 -07009758 break;
9759
9760 case SHASTA_EXT_LED_SHARED:
9761 tp->led_ctrl = LED_CTRL_MODE_SHARED;
9762 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9763 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9764 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9765 LED_CTRL_MODE_PHY_2);
9766 break;
9767
9768 case SHASTA_EXT_LED_MAC:
9769 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9770 break;
9771
9772 case SHASTA_EXT_LED_COMBO:
9773 tp->led_ctrl = LED_CTRL_MODE_COMBO;
9774 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9775 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9776 LED_CTRL_MODE_PHY_2);
9777 break;
9778
9779 };
9780
9781 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9782 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9783 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9784 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9785
Michael Chanbbadf502006-04-06 21:46:34 -07009786 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009787 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
David S. Millerf49639e2006-06-09 11:58:36 -07009788 else
9789 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009790
9791 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9792 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
John W. Linvillecbf46852005-04-21 17:01:29 -07009793 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009794 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9795 }
9796 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9797 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9798
9799 if (cfg2 & (1 << 17))
9800 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9801
9802 /* serdes signal pre-emphasis in register 0x590 set by */
9803 /* bootcode if bit 18 is set */
9804 if (cfg2 & (1 << 18))
9805 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9806 }
Michael Chan7d0c41e2005-04-21 17:06:20 -07009807}
9808
9809static int __devinit tg3_phy_probe(struct tg3 *tp)
9810{
9811 u32 hw_phy_id_1, hw_phy_id_2;
9812 u32 hw_phy_id, hw_phy_id_masked;
9813 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009814
9815 /* Reading the PHY ID register can conflict with ASF
9816 * firwmare access to the PHY hardware.
9817 */
9818 err = 0;
9819 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9820 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9821 } else {
9822 /* Now read the physical PHY_ID from the chip and verify
9823 * that it is sane. If it doesn't look good, we fall back
9824 * to either the hard-coded table based PHY_ID and failing
9825 * that the value found in the eeprom area.
9826 */
9827 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9828 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9829
9830 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
9831 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9832 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
9833
9834 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9835 }
9836
9837 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9838 tp->phy_id = hw_phy_id;
9839 if (hw_phy_id_masked == PHY_ID_BCM8002)
9840 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
Michael Chanda6b2d02005-08-19 12:54:29 -07009841 else
9842 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009843 } else {
Michael Chan7d0c41e2005-04-21 17:06:20 -07009844 if (tp->phy_id != PHY_ID_INVALID) {
9845 /* Do nothing, phy ID already set up in
9846 * tg3_get_eeprom_hw_cfg().
9847 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009848 } else {
9849 struct subsys_tbl_ent *p;
9850
9851 /* No eeprom signature? Try the hardcoded
9852 * subsys device table.
9853 */
9854 p = lookup_by_subsys(tp);
9855 if (!p)
9856 return -ENODEV;
9857
9858 tp->phy_id = p->phy_id;
9859 if (!tp->phy_id ||
9860 tp->phy_id == PHY_ID_BCM8002)
9861 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9862 }
9863 }
9864
Michael Chan747e8f82005-07-25 12:33:22 -07009865 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07009866 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9867 u32 bmsr, adv_reg, tg3_ctrl;
9868
9869 tg3_readphy(tp, MII_BMSR, &bmsr);
9870 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9871 (bmsr & BMSR_LSTATUS))
9872 goto skip_phy_reset;
9873
9874 err = tg3_phy_reset(tp);
9875 if (err)
9876 return err;
9877
9878 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9879 ADVERTISE_100HALF | ADVERTISE_100FULL |
9880 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9881 tg3_ctrl = 0;
9882 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9883 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9884 MII_TG3_CTRL_ADV_1000_FULL);
9885 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9886 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9887 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9888 MII_TG3_CTRL_ENABLE_AS_MASTER);
9889 }
9890
9891 if (!tg3_copper_is_advertising_all(tp)) {
9892 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9893
9894 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9895 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9896
9897 tg3_writephy(tp, MII_BMCR,
9898 BMCR_ANENABLE | BMCR_ANRESTART);
9899 }
9900 tg3_phy_set_wirespeed(tp);
9901
9902 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9903 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9904 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9905 }
9906
9907skip_phy_reset:
9908 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9909 err = tg3_init_5401phy_dsp(tp);
9910 if (err)
9911 return err;
9912 }
9913
9914 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9915 err = tg3_init_5401phy_dsp(tp);
9916 }
9917
Michael Chan747e8f82005-07-25 12:33:22 -07009918 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009919 tp->link_config.advertising =
9920 (ADVERTISED_1000baseT_Half |
9921 ADVERTISED_1000baseT_Full |
9922 ADVERTISED_Autoneg |
9923 ADVERTISED_FIBRE);
9924 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9925 tp->link_config.advertising &=
9926 ~(ADVERTISED_1000baseT_Half |
9927 ADVERTISED_1000baseT_Full);
9928
9929 return err;
9930}
9931
9932static void __devinit tg3_read_partno(struct tg3 *tp)
9933{
9934 unsigned char vpd_data[256];
9935 int i;
Michael Chan1b277772006-03-20 22:27:48 -08009936 u32 magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009937
Michael Chan18201802006-03-20 22:29:15 -08009938 if (tg3_nvram_read_swab(tp, 0x0, &magic))
David S. Millerf49639e2006-06-09 11:58:36 -07009939 goto out_not_found;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009940
Michael Chan18201802006-03-20 22:29:15 -08009941 if (magic == TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -08009942 for (i = 0; i < 256; i += 4) {
9943 u32 tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009944
Michael Chan1b277772006-03-20 22:27:48 -08009945 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9946 goto out_not_found;
9947
9948 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
9949 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
9950 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9951 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9952 }
9953 } else {
9954 int vpd_cap;
9955
9956 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
9957 for (i = 0; i < 256; i += 4) {
9958 u32 tmp, j = 0;
9959 u16 tmp16;
9960
9961 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
9962 i);
9963 while (j++ < 100) {
9964 pci_read_config_word(tp->pdev, vpd_cap +
9965 PCI_VPD_ADDR, &tmp16);
9966 if (tmp16 & 0x8000)
9967 break;
9968 msleep(1);
9969 }
David S. Millerf49639e2006-06-09 11:58:36 -07009970 if (!(tmp16 & 0x8000))
9971 goto out_not_found;
9972
Michael Chan1b277772006-03-20 22:27:48 -08009973 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
9974 &tmp);
9975 tmp = cpu_to_le32(tmp);
9976 memcpy(&vpd_data[i], &tmp, 4);
9977 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009978 }
9979
9980 /* Now parse and find the part number. */
9981 for (i = 0; i < 256; ) {
9982 unsigned char val = vpd_data[i];
9983 int block_end;
9984
9985 if (val == 0x82 || val == 0x91) {
9986 i = (i + 3 +
9987 (vpd_data[i + 1] +
9988 (vpd_data[i + 2] << 8)));
9989 continue;
9990 }
9991
9992 if (val != 0x90)
9993 goto out_not_found;
9994
9995 block_end = (i + 3 +
9996 (vpd_data[i + 1] +
9997 (vpd_data[i + 2] << 8)));
9998 i += 3;
9999 while (i < block_end) {
10000 if (vpd_data[i + 0] == 'P' &&
10001 vpd_data[i + 1] == 'N') {
10002 int partno_len = vpd_data[i + 2];
10003
10004 if (partno_len > 24)
10005 goto out_not_found;
10006
10007 memcpy(tp->board_part_number,
10008 &vpd_data[i + 3],
10009 partno_len);
10010
10011 /* Success. */
10012 return;
10013 }
10014 }
10015
10016 /* Part number not found. */
10017 goto out_not_found;
10018 }
10019
10020out_not_found:
10021 strcpy(tp->board_part_number, "none");
10022}
10023
Michael Chanc4e65752006-03-20 22:29:32 -080010024static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10025{
10026 u32 val, offset, start;
10027
10028 if (tg3_nvram_read_swab(tp, 0, &val))
10029 return;
10030
10031 if (val != TG3_EEPROM_MAGIC)
10032 return;
10033
10034 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10035 tg3_nvram_read_swab(tp, 0x4, &start))
10036 return;
10037
10038 offset = tg3_nvram_logical_addr(tp, offset);
10039 if (tg3_nvram_read_swab(tp, offset, &val))
10040 return;
10041
10042 if ((val & 0xfc000000) == 0x0c000000) {
10043 u32 ver_offset, addr;
10044 int i;
10045
10046 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10047 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10048 return;
10049
10050 if (val != 0)
10051 return;
10052
10053 addr = offset + ver_offset - start;
10054 for (i = 0; i < 16; i += 4) {
10055 if (tg3_nvram_read(tp, addr + i, &val))
10056 return;
10057
10058 val = cpu_to_le32(val);
10059 memcpy(tp->fw_ver + i, &val, 4);
10060 }
10061 }
10062}
10063
Linus Torvalds1da177e2005-04-16 15:20:36 -070010064static int __devinit tg3_get_invariants(struct tg3 *tp)
10065{
10066 static struct pci_device_id write_reorder_chipsets[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010067 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10068 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
Michael Chan399de502005-10-03 14:02:39 -070010069 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10070 PCI_DEVICE_ID_VIA_8385_0) },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010071 { },
10072 };
10073 u32 misc_ctrl_reg;
10074 u32 cacheline_sz_reg;
10075 u32 pci_state_reg, grc_misc_cfg;
10076 u32 val;
10077 u16 pci_cmd;
10078 int err;
10079
Linus Torvalds1da177e2005-04-16 15:20:36 -070010080 /* Force memory write invalidate off. If we leave it on,
10081 * then on 5700_BX chips we have to enable a workaround.
10082 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10083 * to match the cacheline size. The Broadcom driver have this
10084 * workaround but turns MWI off all the times so never uses
10085 * it. This seems to suggest that the workaround is insufficient.
10086 */
10087 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10088 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10089 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10090
10091 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10092 * has the register indirect write enable bit set before
10093 * we try to access any of the MMIO registers. It is also
10094 * critical that the PCI-X hw workaround situation is decided
10095 * before that as well.
10096 */
10097 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10098 &misc_ctrl_reg);
10099
10100 tp->pci_chip_rev_id = (misc_ctrl_reg >>
10101 MISC_HOST_CTRL_CHIPREV_SHIFT);
10102
Michael Chanff645be2005-04-21 17:09:53 -070010103 /* Wrong chip ID in 5752 A0. This code can be removed later
10104 * as A0 is not in production.
10105 */
10106 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10107 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10108
Michael Chan68929142005-08-09 20:17:14 -070010109 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10110 * we need to disable memory and use config. cycles
10111 * only to access all registers. The 5702/03 chips
10112 * can mistakenly decode the special cycles from the
10113 * ICH chipsets as memory write cycles, causing corruption
10114 * of register and memory space. Only certain ICH bridges
10115 * will drive special cycles with non-zero data during the
10116 * address phase which can fall within the 5703's address
10117 * range. This is not an ICH bug as the PCI spec allows
10118 * non-zero address during special cycles. However, only
10119 * these ICH bridges are known to drive non-zero addresses
10120 * during special cycles.
10121 *
10122 * Since special cycles do not cross PCI bridges, we only
10123 * enable this workaround if the 5703 is on the secondary
10124 * bus of these ICH bridges.
10125 */
10126 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10127 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10128 static struct tg3_dev_id {
10129 u32 vendor;
10130 u32 device;
10131 u32 rev;
10132 } ich_chipsets[] = {
10133 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10134 PCI_ANY_ID },
10135 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10136 PCI_ANY_ID },
10137 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10138 0xa },
10139 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10140 PCI_ANY_ID },
10141 { },
10142 };
10143 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10144 struct pci_dev *bridge = NULL;
10145
10146 while (pci_id->vendor != 0) {
10147 bridge = pci_get_device(pci_id->vendor, pci_id->device,
10148 bridge);
10149 if (!bridge) {
10150 pci_id++;
10151 continue;
10152 }
10153 if (pci_id->rev != PCI_ANY_ID) {
10154 u8 rev;
10155
10156 pci_read_config_byte(bridge, PCI_REVISION_ID,
10157 &rev);
10158 if (rev > pci_id->rev)
10159 continue;
10160 }
10161 if (bridge->subordinate &&
10162 (bridge->subordinate->number ==
10163 tp->pdev->bus->number)) {
10164
10165 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10166 pci_dev_put(bridge);
10167 break;
10168 }
10169 }
10170 }
10171
Michael Chan4a29cc22006-03-19 13:21:12 -080010172 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10173 * DMA addresses > 40-bit. This bridge may have other additional
10174 * 57xx devices behind it in some 4-port NIC designs for example.
10175 * Any tg3 device found behind the bridge will also need the 40-bit
10176 * DMA workaround.
10177 */
Michael Chana4e2b342005-10-26 15:46:52 -070010178 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10179 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10180 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
Michael Chan4a29cc22006-03-19 13:21:12 -080010181 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
Michael Chan4cf78e42005-07-25 12:29:19 -070010182 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
Michael Chana4e2b342005-10-26 15:46:52 -070010183 }
Michael Chan4a29cc22006-03-19 13:21:12 -080010184 else {
10185 struct pci_dev *bridge = NULL;
10186
10187 do {
10188 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10189 PCI_DEVICE_ID_SERVERWORKS_EPB,
10190 bridge);
10191 if (bridge && bridge->subordinate &&
10192 (bridge->subordinate->number <=
10193 tp->pdev->bus->number) &&
10194 (bridge->subordinate->subordinate >=
10195 tp->pdev->bus->number)) {
10196 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10197 pci_dev_put(bridge);
10198 break;
10199 }
10200 } while (bridge);
10201 }
Michael Chan4cf78e42005-07-25 12:29:19 -070010202
Linus Torvalds1da177e2005-04-16 15:20:36 -070010203 /* Initialize misc host control in PCI block. */
10204 tp->misc_host_ctrl |= (misc_ctrl_reg &
10205 MISC_HOST_CTRL_CHIPREV);
10206 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10207 tp->misc_host_ctrl);
10208
10209 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10210 &cacheline_sz_reg);
10211
10212 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
10213 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
10214 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
10215 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
10216
John W. Linville2052da92005-04-21 16:56:08 -070010217 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
Michael Chan4cf78e42005-07-25 12:29:19 -070010218 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -080010219 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chand9ab5ad2006-03-20 22:27:35 -080010220 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Michael Chana4e2b342005-10-26 15:46:52 -070010221 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
John W. Linville6708e5c2005-04-21 17:00:52 -070010222 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10223
John W. Linville1b440c562005-04-21 17:03:18 -070010224 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10225 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10226 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10227
Michael Chan5a6f3072006-03-20 22:28:05 -080010228 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Michael Chanaf36e6b2006-03-23 01:28:06 -080010229 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10230 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
Michael Chan5a6f3072006-03-20 22:28:05 -080010231 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
Michael Chanfcfa0a32006-03-20 22:28:41 -080010232 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
Michael Chan52c0fd82006-06-29 20:15:54 -070010233 } else {
10234 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
10235 TG3_FLG2_HW_TSO_1_BUG;
10236 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10237 ASIC_REV_5750 &&
10238 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10239 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
10240 }
Michael Chan5a6f3072006-03-20 22:28:05 -080010241 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010242
Michael Chan0f893dc2005-07-25 12:30:38 -070010243 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10244 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
Michael Chand9ab5ad2006-03-20 22:27:35 -080010245 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
Michael Chanaf36e6b2006-03-23 01:28:06 -080010246 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
Michael Chand9ab5ad2006-03-20 22:27:35 -080010247 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
Michael Chan0f893dc2005-07-25 12:30:38 -070010248 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10249
Linus Torvalds1da177e2005-04-16 15:20:36 -070010250 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10251 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10252
Michael Chan399de502005-10-03 14:02:39 -070010253 /* If we have an AMD 762 or VIA K8T800 chipset, write
10254 * reordering to the mailbox registers done by the host
10255 * controller can cause major troubles. We read back from
10256 * every mailbox register write to force the writes to be
10257 * posted to the chip in order.
10258 */
10259 if (pci_dev_present(write_reorder_chipsets) &&
10260 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10261 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10262
Linus Torvalds1da177e2005-04-16 15:20:36 -070010263 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10264 tp->pci_lat_timer < 64) {
10265 tp->pci_lat_timer = 64;
10266
10267 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
10268 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
10269 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
10270 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
10271
10272 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10273 cacheline_sz_reg);
10274 }
10275
10276 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10277 &pci_state_reg);
10278
10279 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10280 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10281
10282 /* If this is a 5700 BX chipset, and we are in PCI-X
10283 * mode, enable register write workaround.
10284 *
10285 * The workaround is to use indirect register accesses
10286 * for all chip writes not to mailbox registers.
10287 */
10288 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10289 u32 pm_reg;
10290 u16 pci_cmd;
10291
10292 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10293
10294 /* The chip can have it's power management PCI config
10295 * space registers clobbered due to this bug.
10296 * So explicitly force the chip into D0 here.
10297 */
10298 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10299 &pm_reg);
10300 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10301 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10302 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10303 pm_reg);
10304
10305 /* Also, force SERR#/PERR# in PCI command. */
10306 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10307 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10308 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10309 }
10310 }
10311
Michael Chan087fe252005-08-09 20:17:41 -070010312 /* 5700 BX chips need to have their TX producer index mailboxes
10313 * written twice to workaround a bug.
10314 */
10315 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10316 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10317
Linus Torvalds1da177e2005-04-16 15:20:36 -070010318 /* Back to back register writes can cause problems on this chip,
10319 * the workaround is to read back all reg writes except those to
10320 * mailbox regs. See tg3_write_indirect_reg32().
10321 *
10322 * PCI Express 5750_A0 rev chips need this workaround too.
10323 */
10324 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10325 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10326 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10327 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10328
10329 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10330 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10331 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10332 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10333
10334 /* Chip-specific fixup from Broadcom driver */
10335 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10336 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10337 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10338 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10339 }
10340
Michael Chan1ee582d2005-08-09 20:16:46 -070010341 /* Default fast path register access methods */
Michael Chan20094932005-08-09 20:16:32 -070010342 tp->read32 = tg3_read32;
Michael Chan1ee582d2005-08-09 20:16:46 -070010343 tp->write32 = tg3_write32;
Michael Chan09ee9292005-08-09 20:17:00 -070010344 tp->read32_mbox = tg3_read32;
Michael Chan20094932005-08-09 20:16:32 -070010345 tp->write32_mbox = tg3_write32;
Michael Chan1ee582d2005-08-09 20:16:46 -070010346 tp->write32_tx_mbox = tg3_write32;
10347 tp->write32_rx_mbox = tg3_write32;
10348
10349 /* Various workaround register access methods */
10350 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10351 tp->write32 = tg3_write_indirect_reg32;
10352 else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10353 tp->write32 = tg3_write_flush_reg32;
10354
10355 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10356 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10357 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10358 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10359 tp->write32_rx_mbox = tg3_write_flush_reg32;
10360 }
Michael Chan20094932005-08-09 20:16:32 -070010361
Michael Chan68929142005-08-09 20:17:14 -070010362 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10363 tp->read32 = tg3_read_indirect_reg32;
10364 tp->write32 = tg3_write_indirect_reg32;
10365 tp->read32_mbox = tg3_read_indirect_mbox;
10366 tp->write32_mbox = tg3_write_indirect_mbox;
10367 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10368 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10369
10370 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070010371 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070010372
10373 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10374 pci_cmd &= ~PCI_COMMAND_MEMORY;
10375 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10376 }
10377
Michael Chanbbadf502006-04-06 21:46:34 -070010378 if (tp->write32 == tg3_write_indirect_reg32 ||
10379 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10380 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
David S. Millerf49639e2006-06-09 11:58:36 -070010381 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
Michael Chanbbadf502006-04-06 21:46:34 -070010382 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10383
Michael Chan7d0c41e2005-04-21 17:06:20 -070010384 /* Get eeprom hw config before calling tg3_set_power_state().
10385 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10386 * determined before calling tg3_set_power_state() so that
10387 * we know whether or not to switch out of Vaux power.
10388 * When the flag is set, it means that GPIO1 is used for eeprom
10389 * write protect and also implies that it is a LOM where GPIOs
10390 * are not used to switch power.
10391 */
10392 tg3_get_eeprom_hw_cfg(tp);
10393
Michael Chan314fba32005-04-21 17:07:04 -070010394 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10395 * GPIO1 driven high will bring 5700's external PHY out of reset.
10396 * It is also used as eeprom write protect on LOMs.
10397 */
10398 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10399 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10400 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10401 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10402 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan3e7d83b2005-04-21 17:10:36 -070010403 /* Unused GPIO3 must be driven as output on 5752 because there
10404 * are no pull-up resistors on unused GPIO pins.
10405 */
10406 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10407 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chan314fba32005-04-21 17:07:04 -070010408
Michael Chanaf36e6b2006-03-23 01:28:06 -080010409 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10410 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10411
Linus Torvalds1da177e2005-04-16 15:20:36 -070010412 /* Force the chip into D0. */
Michael Chanbc1c7562006-03-20 17:48:03 -080010413 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010414 if (err) {
10415 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10416 pci_name(tp->pdev));
10417 return err;
10418 }
10419
10420 /* 5700 B0 chips do not support checksumming correctly due
10421 * to hardware bugs.
10422 */
10423 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10424 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10425
Linus Torvalds1da177e2005-04-16 15:20:36 -070010426 /* Derive initial jumbo mode from MTU assigned in
10427 * ether_setup() via the alloc_etherdev() call
10428 */
Michael Chan0f893dc2005-07-25 12:30:38 -070010429 if (tp->dev->mtu > ETH_DATA_LEN &&
Michael Chana4e2b342005-10-26 15:46:52 -070010430 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan0f893dc2005-07-25 12:30:38 -070010431 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010432
10433 /* Determine WakeOnLan speed to use. */
10434 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10435 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10436 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10437 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10438 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10439 } else {
10440 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10441 }
10442
10443 /* A few boards don't want Ethernet@WireSpeed phy feature */
10444 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10445 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10446 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
Michael Chan747e8f82005-07-25 12:33:22 -070010447 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10448 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
Linus Torvalds1da177e2005-04-16 15:20:36 -070010449 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10450
10451 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10452 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10453 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10454 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10455 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10456
Michael Chanc424cb22006-04-29 18:56:34 -070010457 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10458 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10459 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10460 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10461 else
10462 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10463 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010464
Linus Torvalds1da177e2005-04-16 15:20:36 -070010465 tp->coalesce_mode = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010466 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10467 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10468 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10469
10470 /* Initialize MAC MI mode, polling disabled. */
10471 tw32_f(MAC_MI_MODE, tp->mi_mode);
10472 udelay(80);
10473
10474 /* Initialize data/descriptor byte/word swapping. */
10475 val = tr32(GRC_MODE);
10476 val &= GRC_MODE_HOST_STACKUP;
10477 tw32(GRC_MODE, val | tp->grc_mode);
10478
10479 tg3_switch_clocks(tp);
10480
10481 /* Clear this out for sanity. */
10482 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10483
10484 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10485 &pci_state_reg);
10486 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10487 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10488 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10489
10490 if (chiprevid == CHIPREV_ID_5701_A0 ||
10491 chiprevid == CHIPREV_ID_5701_B0 ||
10492 chiprevid == CHIPREV_ID_5701_B2 ||
10493 chiprevid == CHIPREV_ID_5701_B5) {
10494 void __iomem *sram_base;
10495
10496 /* Write some dummy words into the SRAM status block
10497 * area, see if it reads back correctly. If the return
10498 * value is bad, force enable the PCIX workaround.
10499 */
10500 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10501
10502 writel(0x00000000, sram_base);
10503 writel(0x00000000, sram_base + 4);
10504 writel(0xffffffff, sram_base + 4);
10505 if (readl(sram_base) != 0x00000000)
10506 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10507 }
10508 }
10509
10510 udelay(50);
10511 tg3_nvram_init(tp);
10512
10513 grc_misc_cfg = tr32(GRC_MISC_CFG);
10514 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10515
10516 /* Broadcom's driver says that CIOBE multisplit has a bug */
10517#if 0
10518 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10519 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10520 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10521 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10522 }
10523#endif
10524 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10525 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10526 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10527 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10528
David S. Millerfac9b832005-05-18 22:46:34 -070010529 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10530 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10531 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10532 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10533 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10534 HOSTCC_MODE_CLRTICK_TXBD);
10535
10536 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10537 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10538 tp->misc_host_ctrl);
10539 }
10540
Linus Torvalds1da177e2005-04-16 15:20:36 -070010541 /* these are limited to 10/100 only */
10542 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10543 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10544 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10545 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10546 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10547 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10548 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10549 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10550 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10551 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10552 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10553
10554 err = tg3_phy_probe(tp);
10555 if (err) {
10556 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10557 pci_name(tp->pdev), err);
10558 /* ... but do not return immediately ... */
10559 }
10560
10561 tg3_read_partno(tp);
Michael Chanc4e65752006-03-20 22:29:32 -080010562 tg3_read_fw_ver(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010563
10564 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10565 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10566 } else {
10567 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10568 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10569 else
10570 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10571 }
10572
10573 /* 5700 {AX,BX} chips have a broken status block link
10574 * change bit implementation, so we must use the
10575 * status register in those cases.
10576 */
10577 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10578 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10579 else
10580 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10581
10582 /* The led_ctrl is set during tg3_phy_probe, here we might
10583 * have to force the link status polling mechanism based
10584 * upon subsystem IDs.
10585 */
10586 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10587 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10588 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10589 TG3_FLAG_USE_LINKCHG_REG);
10590 }
10591
10592 /* For all SERDES we poll the MAC status register. */
10593 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10594 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10595 else
10596 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10597
Michael Chan5a6f3072006-03-20 22:28:05 -080010598 /* All chips before 5787 can get confused if TX buffers
Linus Torvalds1da177e2005-04-16 15:20:36 -070010599 * straddle the 4GB address boundary in some cases.
10600 */
Michael Chanaf36e6b2006-03-23 01:28:06 -080010601 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10602 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
Michael Chan5a6f3072006-03-20 22:28:05 -080010603 tp->dev->hard_start_xmit = tg3_start_xmit;
10604 else
10605 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010606
10607 tp->rx_offset = 2;
10608 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10609 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10610 tp->rx_offset = 0;
10611
Michael Chanf92905d2006-06-29 20:14:29 -070010612 tp->rx_std_max_post = TG3_RX_RING_SIZE;
10613
10614 /* Increment the rx prod index on the rx std ring by at most
10615 * 8 for these chips to workaround hw errata.
10616 */
10617 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10618 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10619 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10620 tp->rx_std_max_post = 8;
10621
Linus Torvalds1da177e2005-04-16 15:20:36 -070010622 /* By default, disable wake-on-lan. User can change this
10623 * using ETHTOOL_SWOL.
10624 */
10625 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10626
10627 return err;
10628}
10629
10630#ifdef CONFIG_SPARC64
10631static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10632{
10633 struct net_device *dev = tp->dev;
10634 struct pci_dev *pdev = tp->pdev;
10635 struct pcidev_cookie *pcp = pdev->sysdata;
10636
10637 if (pcp != NULL) {
David S. Millerde8d28b2006-06-22 16:18:54 -070010638 unsigned char *addr;
10639 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010640
David S. Millerde8d28b2006-06-22 16:18:54 -070010641 addr = of_get_property(pcp->prom_node, "local-mac-address",
10642 &len);
10643 if (addr && len == 6) {
10644 memcpy(dev->dev_addr, addr, 6);
John W. Linville2ff43692005-09-12 14:44:20 -070010645 memcpy(dev->perm_addr, dev->dev_addr, 6);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010646 return 0;
10647 }
10648 }
10649 return -ENODEV;
10650}
10651
10652static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10653{
10654 struct net_device *dev = tp->dev;
10655
10656 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
John W. Linville2ff43692005-09-12 14:44:20 -070010657 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010658 return 0;
10659}
10660#endif
10661
10662static int __devinit tg3_get_device_address(struct tg3 *tp)
10663{
10664 struct net_device *dev = tp->dev;
10665 u32 hi, lo, mac_offset;
Michael Chan008652b2006-03-27 23:14:53 -080010666 int addr_ok = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010667
10668#ifdef CONFIG_SPARC64
10669 if (!tg3_get_macaddr_sparc(tp))
10670 return 0;
10671#endif
10672
10673 mac_offset = 0x7c;
David S. Millerf49639e2006-06-09 11:58:36 -070010674 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
Michael Chana4e2b342005-10-26 15:46:52 -070010675 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010676 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10677 mac_offset = 0xcc;
10678 if (tg3_nvram_lock(tp))
10679 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10680 else
10681 tg3_nvram_unlock(tp);
10682 }
10683
10684 /* First try to get it from MAC address mailbox. */
10685 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10686 if ((hi >> 16) == 0x484b) {
10687 dev->dev_addr[0] = (hi >> 8) & 0xff;
10688 dev->dev_addr[1] = (hi >> 0) & 0xff;
10689
10690 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10691 dev->dev_addr[2] = (lo >> 24) & 0xff;
10692 dev->dev_addr[3] = (lo >> 16) & 0xff;
10693 dev->dev_addr[4] = (lo >> 8) & 0xff;
10694 dev->dev_addr[5] = (lo >> 0) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010695
Michael Chan008652b2006-03-27 23:14:53 -080010696 /* Some old bootcode may report a 0 MAC address in SRAM */
10697 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10698 }
10699 if (!addr_ok) {
10700 /* Next, try NVRAM. */
David S. Millerf49639e2006-06-09 11:58:36 -070010701 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
Michael Chan008652b2006-03-27 23:14:53 -080010702 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10703 dev->dev_addr[0] = ((hi >> 16) & 0xff);
10704 dev->dev_addr[1] = ((hi >> 24) & 0xff);
10705 dev->dev_addr[2] = ((lo >> 0) & 0xff);
10706 dev->dev_addr[3] = ((lo >> 8) & 0xff);
10707 dev->dev_addr[4] = ((lo >> 16) & 0xff);
10708 dev->dev_addr[5] = ((lo >> 24) & 0xff);
10709 }
10710 /* Finally just fetch it out of the MAC control regs. */
10711 else {
10712 hi = tr32(MAC_ADDR_0_HIGH);
10713 lo = tr32(MAC_ADDR_0_LOW);
10714
10715 dev->dev_addr[5] = lo & 0xff;
10716 dev->dev_addr[4] = (lo >> 8) & 0xff;
10717 dev->dev_addr[3] = (lo >> 16) & 0xff;
10718 dev->dev_addr[2] = (lo >> 24) & 0xff;
10719 dev->dev_addr[1] = hi & 0xff;
10720 dev->dev_addr[0] = (hi >> 8) & 0xff;
10721 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010722 }
10723
10724 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10725#ifdef CONFIG_SPARC64
10726 if (!tg3_get_default_macaddr_sparc(tp))
10727 return 0;
10728#endif
10729 return -EINVAL;
10730 }
John W. Linville2ff43692005-09-12 14:44:20 -070010731 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010732 return 0;
10733}
10734
David S. Miller59e6b432005-05-18 22:50:10 -070010735#define BOUNDARY_SINGLE_CACHELINE 1
10736#define BOUNDARY_MULTI_CACHELINE 2
10737
10738static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10739{
10740 int cacheline_size;
10741 u8 byte;
10742 int goal;
10743
10744 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10745 if (byte == 0)
10746 cacheline_size = 1024;
10747 else
10748 cacheline_size = (int) byte * 4;
10749
10750 /* On 5703 and later chips, the boundary bits have no
10751 * effect.
10752 */
10753 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10754 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10755 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10756 goto out;
10757
10758#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10759 goal = BOUNDARY_MULTI_CACHELINE;
10760#else
10761#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10762 goal = BOUNDARY_SINGLE_CACHELINE;
10763#else
10764 goal = 0;
10765#endif
10766#endif
10767
10768 if (!goal)
10769 goto out;
10770
10771 /* PCI controllers on most RISC systems tend to disconnect
10772 * when a device tries to burst across a cache-line boundary.
10773 * Therefore, letting tg3 do so just wastes PCI bandwidth.
10774 *
10775 * Unfortunately, for PCI-E there are only limited
10776 * write-side controls for this, and thus for reads
10777 * we will still get the disconnects. We'll also waste
10778 * these PCI cycles for both read and write for chips
10779 * other than 5700 and 5701 which do not implement the
10780 * boundary bits.
10781 */
10782 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10783 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10784 switch (cacheline_size) {
10785 case 16:
10786 case 32:
10787 case 64:
10788 case 128:
10789 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10790 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10791 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10792 } else {
10793 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10794 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10795 }
10796 break;
10797
10798 case 256:
10799 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10800 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10801 break;
10802
10803 default:
10804 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10805 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10806 break;
10807 };
10808 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10809 switch (cacheline_size) {
10810 case 16:
10811 case 32:
10812 case 64:
10813 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10814 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10815 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10816 break;
10817 }
10818 /* fallthrough */
10819 case 128:
10820 default:
10821 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10822 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10823 break;
10824 };
10825 } else {
10826 switch (cacheline_size) {
10827 case 16:
10828 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10829 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10830 DMA_RWCTRL_WRITE_BNDRY_16);
10831 break;
10832 }
10833 /* fallthrough */
10834 case 32:
10835 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10836 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10837 DMA_RWCTRL_WRITE_BNDRY_32);
10838 break;
10839 }
10840 /* fallthrough */
10841 case 64:
10842 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10843 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10844 DMA_RWCTRL_WRITE_BNDRY_64);
10845 break;
10846 }
10847 /* fallthrough */
10848 case 128:
10849 if (goal == BOUNDARY_SINGLE_CACHELINE) {
10850 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10851 DMA_RWCTRL_WRITE_BNDRY_128);
10852 break;
10853 }
10854 /* fallthrough */
10855 case 256:
10856 val |= (DMA_RWCTRL_READ_BNDRY_256 |
10857 DMA_RWCTRL_WRITE_BNDRY_256);
10858 break;
10859 case 512:
10860 val |= (DMA_RWCTRL_READ_BNDRY_512 |
10861 DMA_RWCTRL_WRITE_BNDRY_512);
10862 break;
10863 case 1024:
10864 default:
10865 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10866 DMA_RWCTRL_WRITE_BNDRY_1024);
10867 break;
10868 };
10869 }
10870
10871out:
10872 return val;
10873}
10874
Linus Torvalds1da177e2005-04-16 15:20:36 -070010875static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10876{
10877 struct tg3_internal_buffer_desc test_desc;
10878 u32 sram_dma_descs;
10879 int i, ret;
10880
10881 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10882
10883 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10884 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10885 tw32(RDMAC_STATUS, 0);
10886 tw32(WDMAC_STATUS, 0);
10887
10888 tw32(BUFMGR_MODE, 0);
10889 tw32(FTQ_RESET, 0);
10890
10891 test_desc.addr_hi = ((u64) buf_dma) >> 32;
10892 test_desc.addr_lo = buf_dma & 0xffffffff;
10893 test_desc.nic_mbuf = 0x00002100;
10894 test_desc.len = size;
10895
10896 /*
10897 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10898 * the *second* time the tg3 driver was getting loaded after an
10899 * initial scan.
10900 *
10901 * Broadcom tells me:
10902 * ...the DMA engine is connected to the GRC block and a DMA
10903 * reset may affect the GRC block in some unpredictable way...
10904 * The behavior of resets to individual blocks has not been tested.
10905 *
10906 * Broadcom noted the GRC reset will also reset all sub-components.
10907 */
10908 if (to_device) {
10909 test_desc.cqid_sqid = (13 << 8) | 2;
10910
10911 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10912 udelay(40);
10913 } else {
10914 test_desc.cqid_sqid = (16 << 8) | 7;
10915
10916 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10917 udelay(40);
10918 }
10919 test_desc.flags = 0x00000005;
10920
10921 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10922 u32 val;
10923
10924 val = *(((u32 *)&test_desc) + i);
10925 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10926 sram_dma_descs + (i * sizeof(u32)));
10927 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10928 }
10929 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10930
10931 if (to_device) {
10932 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10933 } else {
10934 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10935 }
10936
10937 ret = -ENODEV;
10938 for (i = 0; i < 40; i++) {
10939 u32 val;
10940
10941 if (to_device)
10942 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10943 else
10944 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10945 if ((val & 0xffff) == sram_dma_descs) {
10946 ret = 0;
10947 break;
10948 }
10949
10950 udelay(100);
10951 }
10952
10953 return ret;
10954}
10955
David S. Millerded73402005-05-23 13:59:47 -070010956#define TEST_BUFFER_SIZE 0x2000
Linus Torvalds1da177e2005-04-16 15:20:36 -070010957
10958static int __devinit tg3_test_dma(struct tg3 *tp)
10959{
10960 dma_addr_t buf_dma;
David S. Miller59e6b432005-05-18 22:50:10 -070010961 u32 *buf, saved_dma_rwctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010962 int ret;
10963
10964 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10965 if (!buf) {
10966 ret = -ENOMEM;
10967 goto out_nofree;
10968 }
10969
10970 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10971 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10972
David S. Miller59e6b432005-05-18 22:50:10 -070010973 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010974
10975 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10976 /* DMA read watermark not used on PCIE */
10977 tp->dma_rwctrl |= 0x00180000;
10978 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
Michael Chan85e94ce2005-04-21 17:05:28 -070010979 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10980 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010981 tp->dma_rwctrl |= 0x003f0000;
10982 else
10983 tp->dma_rwctrl |= 0x003f000f;
10984 } else {
10985 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10986 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10987 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10988
Michael Chan4a29cc22006-03-19 13:21:12 -080010989 /* If the 5704 is behind the EPB bridge, we can
10990 * do the less restrictive ONE_DMA workaround for
10991 * better performance.
10992 */
10993 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
10994 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10995 tp->dma_rwctrl |= 0x8000;
10996 else if (ccval == 0x6 || ccval == 0x7)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010997 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10998
David S. Miller59e6b432005-05-18 22:50:10 -070010999 /* Set bit 23 to enable PCIX hw bug fix */
Linus Torvalds1da177e2005-04-16 15:20:36 -070011000 tp->dma_rwctrl |= 0x009f0000;
Michael Chan4cf78e42005-07-25 12:29:19 -070011001 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11002 /* 5780 always in PCIX mode */
11003 tp->dma_rwctrl |= 0x00144000;
Michael Chana4e2b342005-10-26 15:46:52 -070011004 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11005 /* 5714 always in PCIX mode */
11006 tp->dma_rwctrl |= 0x00148000;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011007 } else {
11008 tp->dma_rwctrl |= 0x001b000f;
11009 }
11010 }
11011
11012 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11013 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11014 tp->dma_rwctrl &= 0xfffffff0;
11015
11016 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11017 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11018 /* Remove this if it causes problems for some boards. */
11019 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11020
11021 /* On 5700/5701 chips, we need to set this bit.
11022 * Otherwise the chip will issue cacheline transactions
11023 * to streamable DMA memory with not all the byte
11024 * enables turned on. This is an error on several
11025 * RISC PCI controllers, in particular sparc64.
11026 *
11027 * On 5703/5704 chips, this bit has been reassigned
11028 * a different meaning. In particular, it is used
11029 * on those chips to enable a PCI-X workaround.
11030 */
11031 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11032 }
11033
11034 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11035
11036#if 0
11037 /* Unneeded, already done by tg3_get_invariants. */
11038 tg3_switch_clocks(tp);
11039#endif
11040
11041 ret = 0;
11042 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11043 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11044 goto out;
11045
David S. Miller59e6b432005-05-18 22:50:10 -070011046 /* It is best to perform DMA test with maximum write burst size
11047 * to expose the 5700/5701 write DMA bug.
11048 */
11049 saved_dma_rwctrl = tp->dma_rwctrl;
11050 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11051 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11052
Linus Torvalds1da177e2005-04-16 15:20:36 -070011053 while (1) {
11054 u32 *p = buf, i;
11055
11056 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11057 p[i] = i;
11058
11059 /* Send the buffer to the chip. */
11060 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11061 if (ret) {
11062 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11063 break;
11064 }
11065
11066#if 0
11067 /* validate data reached card RAM correctly. */
11068 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11069 u32 val;
11070 tg3_read_mem(tp, 0x2100 + (i*4), &val);
11071 if (le32_to_cpu(val) != p[i]) {
11072 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
11073 /* ret = -ENODEV here? */
11074 }
11075 p[i] = 0;
11076 }
11077#endif
11078 /* Now read it back. */
11079 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11080 if (ret) {
11081 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11082
11083 break;
11084 }
11085
11086 /* Verify it. */
11087 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11088 if (p[i] == i)
11089 continue;
11090
David S. Miller59e6b432005-05-18 22:50:10 -070011091 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11092 DMA_RWCTRL_WRITE_BNDRY_16) {
11093 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011094 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11095 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11096 break;
11097 } else {
11098 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11099 ret = -ENODEV;
11100 goto out;
11101 }
11102 }
11103
11104 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11105 /* Success. */
11106 ret = 0;
11107 break;
11108 }
11109 }
David S. Miller59e6b432005-05-18 22:50:10 -070011110 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11111 DMA_RWCTRL_WRITE_BNDRY_16) {
Michael Chan6d1cfba2005-06-08 14:13:14 -070011112 static struct pci_device_id dma_wait_state_chipsets[] = {
11113 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11114 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11115 { },
11116 };
11117
David S. Miller59e6b432005-05-18 22:50:10 -070011118 /* DMA test passed without adjusting DMA boundary,
Michael Chan6d1cfba2005-06-08 14:13:14 -070011119 * now look for chipsets that are known to expose the
11120 * DMA bug without failing the test.
David S. Miller59e6b432005-05-18 22:50:10 -070011121 */
Michael Chan6d1cfba2005-06-08 14:13:14 -070011122 if (pci_dev_present(dma_wait_state_chipsets)) {
11123 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11124 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11125 }
11126 else
11127 /* Safe to use the calculated DMA boundary. */
11128 tp->dma_rwctrl = saved_dma_rwctrl;
11129
David S. Miller59e6b432005-05-18 22:50:10 -070011130 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11131 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011132
11133out:
11134 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11135out_nofree:
11136 return ret;
11137}
11138
11139static void __devinit tg3_init_link_config(struct tg3 *tp)
11140{
11141 tp->link_config.advertising =
11142 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11143 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11144 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11145 ADVERTISED_Autoneg | ADVERTISED_MII);
11146 tp->link_config.speed = SPEED_INVALID;
11147 tp->link_config.duplex = DUPLEX_INVALID;
11148 tp->link_config.autoneg = AUTONEG_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011149 tp->link_config.active_speed = SPEED_INVALID;
11150 tp->link_config.active_duplex = DUPLEX_INVALID;
11151 tp->link_config.phy_is_low_power = 0;
11152 tp->link_config.orig_speed = SPEED_INVALID;
11153 tp->link_config.orig_duplex = DUPLEX_INVALID;
11154 tp->link_config.orig_autoneg = AUTONEG_INVALID;
11155}
11156
11157static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11158{
Michael Chanfdfec172005-07-25 12:31:48 -070011159 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11160 tp->bufmgr_config.mbuf_read_dma_low_water =
11161 DEFAULT_MB_RDMA_LOW_WATER_5705;
11162 tp->bufmgr_config.mbuf_mac_rx_low_water =
11163 DEFAULT_MB_MACRX_LOW_WATER_5705;
11164 tp->bufmgr_config.mbuf_high_water =
11165 DEFAULT_MB_HIGH_WATER_5705;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011166
Michael Chanfdfec172005-07-25 12:31:48 -070011167 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11168 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11169 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11170 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11171 tp->bufmgr_config.mbuf_high_water_jumbo =
11172 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11173 } else {
11174 tp->bufmgr_config.mbuf_read_dma_low_water =
11175 DEFAULT_MB_RDMA_LOW_WATER;
11176 tp->bufmgr_config.mbuf_mac_rx_low_water =
11177 DEFAULT_MB_MACRX_LOW_WATER;
11178 tp->bufmgr_config.mbuf_high_water =
11179 DEFAULT_MB_HIGH_WATER;
11180
11181 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11182 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11183 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11184 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11185 tp->bufmgr_config.mbuf_high_water_jumbo =
11186 DEFAULT_MB_HIGH_WATER_JUMBO;
11187 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011188
11189 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11190 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11191}
11192
11193static char * __devinit tg3_phy_string(struct tg3 *tp)
11194{
11195 switch (tp->phy_id & PHY_ID_MASK) {
11196 case PHY_ID_BCM5400: return "5400";
11197 case PHY_ID_BCM5401: return "5401";
11198 case PHY_ID_BCM5411: return "5411";
11199 case PHY_ID_BCM5701: return "5701";
11200 case PHY_ID_BCM5703: return "5703";
11201 case PHY_ID_BCM5704: return "5704";
11202 case PHY_ID_BCM5705: return "5705";
11203 case PHY_ID_BCM5750: return "5750";
Michael Chan85e94ce2005-04-21 17:05:28 -070011204 case PHY_ID_BCM5752: return "5752";
Michael Chana4e2b342005-10-26 15:46:52 -070011205 case PHY_ID_BCM5714: return "5714";
Michael Chan4cf78e42005-07-25 12:29:19 -070011206 case PHY_ID_BCM5780: return "5780";
Michael Chanaf36e6b2006-03-23 01:28:06 -080011207 case PHY_ID_BCM5755: return "5755";
Michael Chand9ab5ad2006-03-20 22:27:35 -080011208 case PHY_ID_BCM5787: return "5787";
Linus Torvalds1da177e2005-04-16 15:20:36 -070011209 case PHY_ID_BCM8002: return "8002/serdes";
11210 case 0: return "serdes";
11211 default: return "unknown";
11212 };
11213}
11214
Michael Chanf9804dd2005-09-27 12:13:10 -070011215static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11216{
11217 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11218 strcpy(str, "PCI Express");
11219 return str;
11220 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11221 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11222
11223 strcpy(str, "PCIX:");
11224
11225 if ((clock_ctrl == 7) ||
11226 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11227 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11228 strcat(str, "133MHz");
11229 else if (clock_ctrl == 0)
11230 strcat(str, "33MHz");
11231 else if (clock_ctrl == 2)
11232 strcat(str, "50MHz");
11233 else if (clock_ctrl == 4)
11234 strcat(str, "66MHz");
11235 else if (clock_ctrl == 6)
11236 strcat(str, "100MHz");
Michael Chanf9804dd2005-09-27 12:13:10 -070011237 } else {
11238 strcpy(str, "PCI:");
11239 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11240 strcat(str, "66MHz");
11241 else
11242 strcat(str, "33MHz");
11243 }
11244 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11245 strcat(str, ":32-bit");
11246 else
11247 strcat(str, ":64-bit");
11248 return str;
11249}
11250
Michael Chan8c2dc7e2005-12-19 16:26:02 -080011251static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011252{
11253 struct pci_dev *peer;
11254 unsigned int func, devnr = tp->pdev->devfn & ~7;
11255
11256 for (func = 0; func < 8; func++) {
11257 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11258 if (peer && peer != tp->pdev)
11259 break;
11260 pci_dev_put(peer);
11261 }
Michael Chan16fe9d72005-12-13 21:09:54 -080011262 /* 5704 can be configured in single-port mode, set peer to
11263 * tp->pdev in that case.
11264 */
11265 if (!peer) {
11266 peer = tp->pdev;
11267 return peer;
11268 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011269
11270 /*
11271 * We don't need to keep the refcount elevated; there's no way
11272 * to remove one half of this device without removing the other
11273 */
11274 pci_dev_put(peer);
11275
11276 return peer;
11277}
11278
David S. Miller15f98502005-05-18 22:49:26 -070011279static void __devinit tg3_init_coal(struct tg3 *tp)
11280{
11281 struct ethtool_coalesce *ec = &tp->coal;
11282
11283 memset(ec, 0, sizeof(*ec));
11284 ec->cmd = ETHTOOL_GCOALESCE;
11285 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11286 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11287 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11288 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11289 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11290 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11291 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11292 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11293 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11294
11295 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11296 HOSTCC_MODE_CLRTICK_TXBD)) {
11297 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11298 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11299 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11300 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11301 }
Michael Chand244c892005-07-05 14:42:33 -070011302
11303 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11304 ec->rx_coalesce_usecs_irq = 0;
11305 ec->tx_coalesce_usecs_irq = 0;
11306 ec->stats_block_coalesce_usecs = 0;
11307 }
David S. Miller15f98502005-05-18 22:49:26 -070011308}
11309
Linus Torvalds1da177e2005-04-16 15:20:36 -070011310static int __devinit tg3_init_one(struct pci_dev *pdev,
11311 const struct pci_device_id *ent)
11312{
11313 static int tg3_version_printed = 0;
11314 unsigned long tg3reg_base, tg3reg_len;
11315 struct net_device *dev;
11316 struct tg3 *tp;
Michael Chan72f2afb2006-03-06 19:28:35 -080011317 int i, err, pm_cap;
Michael Chanf9804dd2005-09-27 12:13:10 -070011318 char str[40];
Michael Chan72f2afb2006-03-06 19:28:35 -080011319 u64 dma_mask, persist_dma_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011320
11321 if (tg3_version_printed++ == 0)
11322 printk(KERN_INFO "%s", version);
11323
11324 err = pci_enable_device(pdev);
11325 if (err) {
11326 printk(KERN_ERR PFX "Cannot enable PCI device, "
11327 "aborting.\n");
11328 return err;
11329 }
11330
11331 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11332 printk(KERN_ERR PFX "Cannot find proper PCI device "
11333 "base address, aborting.\n");
11334 err = -ENODEV;
11335 goto err_out_disable_pdev;
11336 }
11337
11338 err = pci_request_regions(pdev, DRV_MODULE_NAME);
11339 if (err) {
11340 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11341 "aborting.\n");
11342 goto err_out_disable_pdev;
11343 }
11344
11345 pci_set_master(pdev);
11346
11347 /* Find power-management capability. */
11348 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11349 if (pm_cap == 0) {
11350 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11351 "aborting.\n");
11352 err = -EIO;
11353 goto err_out_free_res;
11354 }
11355
Linus Torvalds1da177e2005-04-16 15:20:36 -070011356 tg3reg_base = pci_resource_start(pdev, 0);
11357 tg3reg_len = pci_resource_len(pdev, 0);
11358
11359 dev = alloc_etherdev(sizeof(*tp));
11360 if (!dev) {
11361 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11362 err = -ENOMEM;
11363 goto err_out_free_res;
11364 }
11365
11366 SET_MODULE_OWNER(dev);
11367 SET_NETDEV_DEV(dev, &pdev->dev);
11368
Linus Torvalds1da177e2005-04-16 15:20:36 -070011369#if TG3_VLAN_TAG_USED
11370 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11371 dev->vlan_rx_register = tg3_vlan_rx_register;
11372 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11373#endif
11374
11375 tp = netdev_priv(dev);
11376 tp->pdev = pdev;
11377 tp->dev = dev;
11378 tp->pm_cap = pm_cap;
11379 tp->mac_mode = TG3_DEF_MAC_MODE;
11380 tp->rx_mode = TG3_DEF_RX_MODE;
11381 tp->tx_mode = TG3_DEF_TX_MODE;
11382 tp->mi_mode = MAC_MI_MODE_BASE;
11383 if (tg3_debug > 0)
11384 tp->msg_enable = tg3_debug;
11385 else
11386 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11387
11388 /* The word/byte swap controls here control register access byte
11389 * swapping. DMA data byte swapping is controlled in the GRC_MODE
11390 * setting below.
11391 */
11392 tp->misc_host_ctrl =
11393 MISC_HOST_CTRL_MASK_PCI_INT |
11394 MISC_HOST_CTRL_WORD_SWAP |
11395 MISC_HOST_CTRL_INDIR_ACCESS |
11396 MISC_HOST_CTRL_PCISTATE_RW;
11397
11398 /* The NONFRM (non-frame) byte/word swap controls take effect
11399 * on descriptor entries, anything which isn't packet data.
11400 *
11401 * The StrongARM chips on the board (one for tx, one for rx)
11402 * are running in big-endian mode.
11403 */
11404 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11405 GRC_MODE_WSWAP_NONFRM_DATA);
11406#ifdef __BIG_ENDIAN
11407 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11408#endif
11409 spin_lock_init(&tp->lock);
11410 spin_lock_init(&tp->tx_lock);
11411 spin_lock_init(&tp->indirect_lock);
11412 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11413
11414 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11415 if (tp->regs == 0UL) {
11416 printk(KERN_ERR PFX "Cannot map device registers, "
11417 "aborting.\n");
11418 err = -ENOMEM;
11419 goto err_out_free_dev;
11420 }
11421
11422 tg3_init_link_config(tp);
11423
Linus Torvalds1da177e2005-04-16 15:20:36 -070011424 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11425 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11426 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11427
11428 dev->open = tg3_open;
11429 dev->stop = tg3_close;
11430 dev->get_stats = tg3_get_stats;
11431 dev->set_multicast_list = tg3_set_rx_mode;
11432 dev->set_mac_address = tg3_set_mac_addr;
11433 dev->do_ioctl = tg3_ioctl;
11434 dev->tx_timeout = tg3_tx_timeout;
11435 dev->poll = tg3_poll;
11436 dev->ethtool_ops = &tg3_ethtool_ops;
11437 dev->weight = 64;
11438 dev->watchdog_timeo = TG3_TX_TIMEOUT;
11439 dev->change_mtu = tg3_change_mtu;
11440 dev->irq = pdev->irq;
11441#ifdef CONFIG_NET_POLL_CONTROLLER
11442 dev->poll_controller = tg3_poll_controller;
11443#endif
11444
11445 err = tg3_get_invariants(tp);
11446 if (err) {
11447 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11448 "aborting.\n");
11449 goto err_out_iounmap;
11450 }
11451
Michael Chan4a29cc22006-03-19 13:21:12 -080011452 /* The EPB bridge inside 5714, 5715, and 5780 and any
11453 * device behind the EPB cannot support DMA addresses > 40-bit.
Michael Chan72f2afb2006-03-06 19:28:35 -080011454 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11455 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11456 * do DMA address check in tg3_start_xmit().
11457 */
Michael Chan4a29cc22006-03-19 13:21:12 -080011458 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11459 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11460 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
Michael Chan72f2afb2006-03-06 19:28:35 -080011461 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11462#ifdef CONFIG_HIGHMEM
11463 dma_mask = DMA_64BIT_MASK;
11464#endif
Michael Chan4a29cc22006-03-19 13:21:12 -080011465 } else
Michael Chan72f2afb2006-03-06 19:28:35 -080011466 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11467
11468 /* Configure DMA attributes. */
11469 if (dma_mask > DMA_32BIT_MASK) {
11470 err = pci_set_dma_mask(pdev, dma_mask);
11471 if (!err) {
11472 dev->features |= NETIF_F_HIGHDMA;
11473 err = pci_set_consistent_dma_mask(pdev,
11474 persist_dma_mask);
11475 if (err < 0) {
11476 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11477 "DMA for consistent allocations\n");
11478 goto err_out_iounmap;
11479 }
11480 }
11481 }
11482 if (err || dma_mask == DMA_32BIT_MASK) {
11483 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11484 if (err) {
11485 printk(KERN_ERR PFX "No usable DMA configuration, "
11486 "aborting.\n");
11487 goto err_out_iounmap;
11488 }
11489 }
11490
Michael Chanfdfec172005-07-25 12:31:48 -070011491 tg3_init_bufmgr_config(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011492
11493#if TG3_TSO_SUPPORT != 0
11494 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11495 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11496 }
11497 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11498 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11499 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11500 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11501 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11502 } else {
11503 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11504 }
11505
Michael Chan4e3a7aa2006-03-20 17:47:44 -080011506 /* TSO is on by default on chips that support hardware TSO.
11507 * Firmware TSO on older chips gives lower performance, so it
11508 * is off by default, but can be enabled using ethtool.
11509 */
11510 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011511 dev->features |= NETIF_F_TSO;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011512
11513#endif
11514
11515 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11516 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11517 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11518 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11519 tp->rx_pending = 63;
11520 }
11521
Michael Chan8c2dc7e2005-12-19 16:26:02 -080011522 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11523 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11524 tp->pdev_peer = tg3_find_peer(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011525
11526 err = tg3_get_device_address(tp);
11527 if (err) {
11528 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11529 "aborting.\n");
11530 goto err_out_iounmap;
11531 }
11532
11533 /*
11534 * Reset chip in case UNDI or EFI driver did not shutdown
11535 * DMA self test will enable WDMAC and we'll see (spurious)
11536 * pending DMA on the PCI bus at that point.
11537 */
11538 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11539 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11540 pci_save_state(tp->pdev);
11541 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
Michael Chan944d9802005-05-29 14:57:48 -070011542 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011543 }
11544
11545 err = tg3_test_dma(tp);
11546 if (err) {
11547 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11548 goto err_out_iounmap;
11549 }
11550
11551 /* Tigon3 can do ipv4 only... and some chips have buggy
11552 * checksumming.
11553 */
11554 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
Michael Chanaf36e6b2006-03-23 01:28:06 -080011555 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11556 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
Michael Chan9c27dbd2006-03-20 22:28:27 -080011557 dev->features |= NETIF_F_HW_CSUM;
11558 else
11559 dev->features |= NETIF_F_IP_CSUM;
11560 dev->features |= NETIF_F_SG;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011561 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11562 } else
11563 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11564
Linus Torvalds1da177e2005-04-16 15:20:36 -070011565 /* flow control autonegotiation is default behavior */
11566 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11567
David S. Miller15f98502005-05-18 22:49:26 -070011568 tg3_init_coal(tp);
11569
David S. Miller7d3f4c92005-08-06 06:35:48 -070011570 /* Now that we have fully setup the chip, save away a snapshot
11571 * of the PCI config space. We need to restore this after
11572 * GRC_MISC_CFG core clock resets and some resume events.
11573 */
11574 pci_save_state(tp->pdev);
11575
Linus Torvalds1da177e2005-04-16 15:20:36 -070011576 err = register_netdev(dev);
11577 if (err) {
11578 printk(KERN_ERR PFX "Cannot register net device, "
11579 "aborting.\n");
11580 goto err_out_iounmap;
11581 }
11582
11583 pci_set_drvdata(pdev, dev);
11584
Michael Chanf9804dd2005-09-27 12:13:10 -070011585 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
Linus Torvalds1da177e2005-04-16 15:20:36 -070011586 dev->name,
11587 tp->board_part_number,
11588 tp->pci_chip_rev_id,
11589 tg3_phy_string(tp),
Michael Chanf9804dd2005-09-27 12:13:10 -070011590 tg3_bus_string(tp, str),
Linus Torvalds1da177e2005-04-16 15:20:36 -070011591 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11592
11593 for (i = 0; i < 6; i++)
11594 printk("%2.2x%c", dev->dev_addr[i],
11595 i == 5 ? '\n' : ':');
11596
11597 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11598 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11599 "TSOcap[%d] \n",
11600 dev->name,
11601 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11602 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11603 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11604 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11605 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11606 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11607 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
Michael Chan4a29cc22006-03-19 13:21:12 -080011608 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11609 dev->name, tp->dma_rwctrl,
11610 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11611 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011612
Jeff Mahoney59f17412006-03-20 22:39:21 -080011613 netif_carrier_off(tp->dev);
11614
Linus Torvalds1da177e2005-04-16 15:20:36 -070011615 return 0;
11616
11617err_out_iounmap:
Michael Chan68929142005-08-09 20:17:14 -070011618 if (tp->regs) {
11619 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070011620 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070011621 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011622
11623err_out_free_dev:
11624 free_netdev(dev);
11625
11626err_out_free_res:
11627 pci_release_regions(pdev);
11628
11629err_out_disable_pdev:
11630 pci_disable_device(pdev);
11631 pci_set_drvdata(pdev, NULL);
11632 return err;
11633}
11634
11635static void __devexit tg3_remove_one(struct pci_dev *pdev)
11636{
11637 struct net_device *dev = pci_get_drvdata(pdev);
11638
11639 if (dev) {
11640 struct tg3 *tp = netdev_priv(dev);
11641
Michael Chan7faa0062006-02-02 17:29:28 -080011642 flush_scheduled_work();
Linus Torvalds1da177e2005-04-16 15:20:36 -070011643 unregister_netdev(dev);
Michael Chan68929142005-08-09 20:17:14 -070011644 if (tp->regs) {
11645 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070011646 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070011647 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011648 free_netdev(dev);
11649 pci_release_regions(pdev);
11650 pci_disable_device(pdev);
11651 pci_set_drvdata(pdev, NULL);
11652 }
11653}
11654
11655static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11656{
11657 struct net_device *dev = pci_get_drvdata(pdev);
11658 struct tg3 *tp = netdev_priv(dev);
11659 int err;
11660
11661 if (!netif_running(dev))
11662 return 0;
11663
Michael Chan7faa0062006-02-02 17:29:28 -080011664 flush_scheduled_work();
Linus Torvalds1da177e2005-04-16 15:20:36 -070011665 tg3_netif_stop(tp);
11666
11667 del_timer_sync(&tp->timer);
11668
David S. Millerf47c11e2005-06-24 20:18:35 -070011669 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011670 tg3_disable_ints(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -070011671 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011672
11673 netif_device_detach(dev);
11674
David S. Millerf47c11e2005-06-24 20:18:35 -070011675 tg3_full_lock(tp, 0);
Michael Chan944d9802005-05-29 14:57:48 -070011676 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan6a9eba12005-12-13 21:08:58 -080011677 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
David S. Millerf47c11e2005-06-24 20:18:35 -070011678 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011679
11680 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11681 if (err) {
David S. Millerf47c11e2005-06-24 20:18:35 -070011682 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011683
Michael Chan6a9eba12005-12-13 21:08:58 -080011684 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Gary Zambrano8e7a22e2006-04-29 18:59:13 -070011685 tg3_init_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011686
11687 tp->timer.expires = jiffies + tp->timer_offset;
11688 add_timer(&tp->timer);
11689
11690 netif_device_attach(dev);
11691 tg3_netif_start(tp);
11692
David S. Millerf47c11e2005-06-24 20:18:35 -070011693 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011694 }
11695
11696 return err;
11697}
11698
11699static int tg3_resume(struct pci_dev *pdev)
11700{
11701 struct net_device *dev = pci_get_drvdata(pdev);
11702 struct tg3 *tp = netdev_priv(dev);
11703 int err;
11704
11705 if (!netif_running(dev))
11706 return 0;
11707
11708 pci_restore_state(tp->pdev);
11709
Michael Chanbc1c7562006-03-20 17:48:03 -080011710 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011711 if (err)
11712 return err;
11713
11714 netif_device_attach(dev);
11715
David S. Millerf47c11e2005-06-24 20:18:35 -070011716 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011717
Michael Chan6a9eba12005-12-13 21:08:58 -080011718 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Gary Zambrano8e7a22e2006-04-29 18:59:13 -070011719 tg3_init_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011720
11721 tp->timer.expires = jiffies + tp->timer_offset;
11722 add_timer(&tp->timer);
11723
Linus Torvalds1da177e2005-04-16 15:20:36 -070011724 tg3_netif_start(tp);
11725
David S. Millerf47c11e2005-06-24 20:18:35 -070011726 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011727
11728 return 0;
11729}
11730
11731static struct pci_driver tg3_driver = {
11732 .name = DRV_MODULE_NAME,
11733 .id_table = tg3_pci_tbl,
11734 .probe = tg3_init_one,
11735 .remove = __devexit_p(tg3_remove_one),
11736 .suspend = tg3_suspend,
11737 .resume = tg3_resume
11738};
11739
11740static int __init tg3_init(void)
11741{
11742 return pci_module_init(&tg3_driver);
11743}
11744
11745static void __exit tg3_cleanup(void)
11746{
11747 pci_unregister_driver(&tg3_driver);
11748}
11749
11750module_init(tg3_init);
11751module_exit(tg3_cleanup);