blob: 0a82ea878aead2bfaaa6c2ea81dd723c66dcd319 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
Michael Chan65610fb2007-02-13 12:18:46 -08007 * Copyright (C) 2005-2007 Broadcom Corporation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Firmware is:
Michael Chan49cabf42005-06-06 15:15:17 -070010 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/compiler.h>
24#include <linux/slab.h>
25#include <linux/delay.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020026#include <linux/in.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
Matt Carlson158d7ab2008-05-29 01:37:54 -070035#include <linux/phy.h>
Matt Carlsona9daf362008-05-25 23:49:44 -070036#include <linux/brcmphy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/if_vlan.h>
38#include <linux/ip.h>
39#include <linux/tcp.h>
40#include <linux/workqueue.h>
Michael Chan61487482005-09-05 17:53:19 -070041#include <linux/prefetch.h>
Tobias Klauserf9a5f7d2005-10-29 15:09:26 +020042#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44#include <net/checksum.h>
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -030045#include <net/ip.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47#include <asm/system.h>
48#include <asm/io.h>
49#include <asm/byteorder.h>
50#include <asm/uaccess.h>
51
David S. Miller49b6e95f2007-03-29 01:38:42 -070052#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <asm/idprom.h>
David S. Miller49b6e95f2007-03-29 01:38:42 -070054#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#endif
56
Matt Carlson63532392008-11-03 16:49:57 -080057#define BAR_0 0
58#define BAR_2 2
59
Linus Torvalds1da177e2005-04-16 15:20:36 -070060#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
61#define TG3_VLAN_TAG_USED 1
62#else
63#define TG3_VLAN_TAG_USED 0
64#endif
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#define TG3_TSO_SUPPORT 1
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
68#include "tg3.h"
69
70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": "
Matt Carlsonfa228b32008-11-03 16:58:53 -080072#define DRV_MODULE_VERSION "3.95"
73#define DRV_MODULE_RELDATE "November 3, 2008"
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0
77#define TG3_DEF_TX_MODE 0
78#define TG3_DEF_MSG_ENABLE \
79 (NETIF_MSG_DRV | \
80 NETIF_MSG_PROBE | \
81 NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | \
83 NETIF_MSG_IFDOWN | \
84 NETIF_MSG_IFUP | \
85 NETIF_MSG_RX_ERR | \
86 NETIF_MSG_TX_ERR)
87
88/* length of time before we decide the hardware is borked,
89 * and dev->tx_timeout() should be called to fix the problem
90 */
91#define TG3_TX_TIMEOUT (5 * HZ)
92
93/* hardware minimum and maximum for a single frame's data payload */
94#define TG3_MIN_MTU 60
95#define TG3_MAX_MTU(tp) \
Michael Chan0f893dc2005-07-25 12:30:38 -070096 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
98/* These numbers seem to be hard coded in the NIC firmware somehow.
99 * You can't change the ring sizes, but you can change where you place
100 * them in the NIC onboard memory.
101 */
102#define TG3_RX_RING_SIZE 512
103#define TG3_DEF_RX_RING_PENDING 200
104#define TG3_RX_JUMBO_RING_SIZE 256
105#define TG3_DEF_RX_JUMBO_RING_PENDING 100
106
107/* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
112 */
113#define TG3_RX_RCB_RING_SIZE(tp) \
114 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
115
116#define TG3_TX_RING_SIZE 512
117#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
118
119#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120 TG3_RX_RING_SIZE)
121#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 TG3_RX_JUMBO_RING_SIZE)
123#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RCB_RING_SIZE(tp))
125#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
126 TG3_TX_RING_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128
129#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
130#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
131
132/* minimum number of free TX descriptors required to wake up TX process */
Ranjit Manomohan42952232006-10-18 20:54:26 -0700133#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
Matt Carlsonad829262008-11-21 17:16:16 -0800135#define TG3_RAW_IP_ALIGN 2
136
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137/* number of ETHTOOL_GSTATS u64's */
138#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
139
Michael Chan4cafd3f2005-05-29 14:56:34 -0700140#define TG3_NUM_TEST 6
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142static char version[] __devinitdata =
143 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
144
145MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
146MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
147MODULE_LICENSE("GPL");
148MODULE_VERSION(DRV_MODULE_VERSION);
149
150static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
151module_param(tg3_debug, int, 0);
152MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
153
154static struct pci_device_id tg3_pci_tbl[] = {
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
Michael Chan126a3362006-09-27 16:03:07 -0700179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
Michael Chan126a3362006-09-27 16:03:07 -0700194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
Michael Chan676917d2006-12-07 00:20:22 -0800198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
Michael Chanb5d37722006-09-27 16:06:21 -0700206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
Matt Carlsond30cdd22007-10-07 23:28:35 -0700208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
Matt Carlson6c7af272007-10-21 16:12:02 -0700210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
Matt Carlson9936bcf2007-10-10 18:03:07 -0700211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
Matt Carlsonc88e6682008-11-03 16:49:18 -0800213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
Matt Carlson57e69832008-05-25 23:48:31 -0700215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700216 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
217 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
218 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
219 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
220 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
221 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
222 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
223 {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224};
225
226MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
227
Andreas Mohr50da8592006-08-14 23:54:30 -0700228static const struct {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 const char string[ETH_GSTRING_LEN];
230} ethtool_stats_keys[TG3_NUM_STATS] = {
231 { "rx_octets" },
232 { "rx_fragments" },
233 { "rx_ucast_packets" },
234 { "rx_mcast_packets" },
235 { "rx_bcast_packets" },
236 { "rx_fcs_errors" },
237 { "rx_align_errors" },
238 { "rx_xon_pause_rcvd" },
239 { "rx_xoff_pause_rcvd" },
240 { "rx_mac_ctrl_rcvd" },
241 { "rx_xoff_entered" },
242 { "rx_frame_too_long_errors" },
243 { "rx_jabbers" },
244 { "rx_undersize_packets" },
245 { "rx_in_length_errors" },
246 { "rx_out_length_errors" },
247 { "rx_64_or_less_octet_packets" },
248 { "rx_65_to_127_octet_packets" },
249 { "rx_128_to_255_octet_packets" },
250 { "rx_256_to_511_octet_packets" },
251 { "rx_512_to_1023_octet_packets" },
252 { "rx_1024_to_1522_octet_packets" },
253 { "rx_1523_to_2047_octet_packets" },
254 { "rx_2048_to_4095_octet_packets" },
255 { "rx_4096_to_8191_octet_packets" },
256 { "rx_8192_to_9022_octet_packets" },
257
258 { "tx_octets" },
259 { "tx_collisions" },
260
261 { "tx_xon_sent" },
262 { "tx_xoff_sent" },
263 { "tx_flow_control" },
264 { "tx_mac_errors" },
265 { "tx_single_collisions" },
266 { "tx_mult_collisions" },
267 { "tx_deferred" },
268 { "tx_excessive_collisions" },
269 { "tx_late_collisions" },
270 { "tx_collide_2times" },
271 { "tx_collide_3times" },
272 { "tx_collide_4times" },
273 { "tx_collide_5times" },
274 { "tx_collide_6times" },
275 { "tx_collide_7times" },
276 { "tx_collide_8times" },
277 { "tx_collide_9times" },
278 { "tx_collide_10times" },
279 { "tx_collide_11times" },
280 { "tx_collide_12times" },
281 { "tx_collide_13times" },
282 { "tx_collide_14times" },
283 { "tx_collide_15times" },
284 { "tx_ucast_packets" },
285 { "tx_mcast_packets" },
286 { "tx_bcast_packets" },
287 { "tx_carrier_sense_errors" },
288 { "tx_discards" },
289 { "tx_errors" },
290
291 { "dma_writeq_full" },
292 { "dma_write_prioq_full" },
293 { "rxbds_empty" },
294 { "rx_discards" },
295 { "rx_errors" },
296 { "rx_threshold_hit" },
297
298 { "dma_readq_full" },
299 { "dma_read_prioq_full" },
300 { "tx_comp_queue_full" },
301
302 { "ring_set_send_prod_index" },
303 { "ring_status_update" },
304 { "nic_irqs" },
305 { "nic_avoided_irqs" },
306 { "nic_tx_threshold_hit" }
307};
308
Andreas Mohr50da8592006-08-14 23:54:30 -0700309static const struct {
Michael Chan4cafd3f2005-05-29 14:56:34 -0700310 const char string[ETH_GSTRING_LEN];
311} ethtool_test_keys[TG3_NUM_TEST] = {
312 { "nvram test (online) " },
313 { "link test (online) " },
314 { "register test (offline)" },
315 { "memory test (offline)" },
316 { "loopback test (offline)" },
317 { "interrupt test (offline)" },
318};
319
Michael Chanb401e9e2005-12-19 16:27:04 -0800320static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
321{
322 writel(val, tp->regs + off);
323}
324
325static u32 tg3_read32(struct tg3 *tp, u32 off)
326{
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400327 return (readl(tp->regs + off));
Michael Chanb401e9e2005-12-19 16:27:04 -0800328}
329
Matt Carlson0d3031d2007-10-10 18:02:43 -0700330static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
331{
332 writel(val, tp->aperegs + off);
333}
334
335static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
336{
337 return (readl(tp->aperegs + off));
338}
339
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
341{
Michael Chan68929142005-08-09 20:17:14 -0700342 unsigned long flags;
343
344 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700345 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
346 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
Michael Chan68929142005-08-09 20:17:14 -0700347 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700348}
349
350static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
351{
352 writel(val, tp->regs + off);
353 readl(tp->regs + off);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354}
355
Michael Chan68929142005-08-09 20:17:14 -0700356static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
357{
358 unsigned long flags;
359 u32 val;
360
361 spin_lock_irqsave(&tp->indirect_lock, flags);
362 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
363 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
364 spin_unlock_irqrestore(&tp->indirect_lock, flags);
365 return val;
366}
367
368static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
369{
370 unsigned long flags;
371
372 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
373 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
374 TG3_64BIT_REG_LOW, val);
375 return;
376 }
377 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
378 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
379 TG3_64BIT_REG_LOW, val);
380 return;
381 }
382
383 spin_lock_irqsave(&tp->indirect_lock, flags);
384 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
385 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
386 spin_unlock_irqrestore(&tp->indirect_lock, flags);
387
388 /* In indirect mode when disabling interrupts, we also need
389 * to clear the interrupt bit in the GRC local ctrl register.
390 */
391 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
392 (val == 0x1)) {
393 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
394 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
395 }
396}
397
398static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
399{
400 unsigned long flags;
401 u32 val;
402
403 spin_lock_irqsave(&tp->indirect_lock, flags);
404 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
405 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
406 spin_unlock_irqrestore(&tp->indirect_lock, flags);
407 return val;
408}
409
Michael Chanb401e9e2005-12-19 16:27:04 -0800410/* usec_wait specifies the wait time in usec when writing to certain registers
411 * where it is unsafe to read back the register without some delay.
412 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
413 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
414 */
415static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416{
Michael Chanb401e9e2005-12-19 16:27:04 -0800417 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
418 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
419 /* Non-posted methods */
420 tp->write32(tp, off, val);
421 else {
422 /* Posted method */
423 tg3_write32(tp, off, val);
424 if (usec_wait)
425 udelay(usec_wait);
426 tp->read32(tp, off);
427 }
428 /* Wait again after the read for the posted method to guarantee that
429 * the wait time is met.
430 */
431 if (usec_wait)
432 udelay(usec_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433}
434
Michael Chan09ee9292005-08-09 20:17:00 -0700435static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
436{
437 tp->write32_mbox(tp, off, val);
Michael Chan68929142005-08-09 20:17:14 -0700438 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
439 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
440 tp->read32_mbox(tp, off);
Michael Chan09ee9292005-08-09 20:17:00 -0700441}
442
Michael Chan20094932005-08-09 20:16:32 -0700443static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444{
445 void __iomem *mbox = tp->regs + off;
446 writel(val, mbox);
447 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
448 writel(val, mbox);
449 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
450 readl(mbox);
451}
452
Michael Chanb5d37722006-09-27 16:06:21 -0700453static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
454{
455 return (readl(tp->regs + off + GRCMBOX_BASE));
456}
457
458static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
459{
460 writel(val, tp->regs + off + GRCMBOX_BASE);
461}
462
Michael Chan20094932005-08-09 20:16:32 -0700463#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700464#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
Michael Chan20094932005-08-09 20:16:32 -0700465#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
466#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700467#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
Michael Chan20094932005-08-09 20:16:32 -0700468
469#define tw32(reg,val) tp->write32(tp, reg, val)
Michael Chanb401e9e2005-12-19 16:27:04 -0800470#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
471#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
Michael Chan20094932005-08-09 20:16:32 -0700472#define tr32(reg) tp->read32(tp, reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473
474static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
475{
Michael Chan68929142005-08-09 20:17:14 -0700476 unsigned long flags;
477
Michael Chanb5d37722006-09-27 16:06:21 -0700478 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
479 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
480 return;
481
Michael Chan68929142005-08-09 20:17:14 -0700482 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700483 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
484 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
485 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486
Michael Chanbbadf502006-04-06 21:46:34 -0700487 /* Always leave this as zero. */
488 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
489 } else {
490 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
491 tw32_f(TG3PCI_MEM_WIN_DATA, val);
492
493 /* Always leave this as zero. */
494 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
495 }
Michael Chan68929142005-08-09 20:17:14 -0700496 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497}
498
499static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
500{
Michael Chan68929142005-08-09 20:17:14 -0700501 unsigned long flags;
502
Michael Chanb5d37722006-09-27 16:06:21 -0700503 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
504 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
505 *val = 0;
506 return;
507 }
508
Michael Chan68929142005-08-09 20:17:14 -0700509 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700510 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
511 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
512 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513
Michael Chanbbadf502006-04-06 21:46:34 -0700514 /* Always leave this as zero. */
515 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
516 } else {
517 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
518 *val = tr32(TG3PCI_MEM_WIN_DATA);
519
520 /* Always leave this as zero. */
521 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
522 }
Michael Chan68929142005-08-09 20:17:14 -0700523 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524}
525
Matt Carlson0d3031d2007-10-10 18:02:43 -0700526static void tg3_ape_lock_init(struct tg3 *tp)
527{
528 int i;
529
530 /* Make sure the driver hasn't any stale locks. */
531 for (i = 0; i < 8; i++)
532 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
533 APE_LOCK_GRANT_DRIVER);
534}
535
536static int tg3_ape_lock(struct tg3 *tp, int locknum)
537{
538 int i, off;
539 int ret = 0;
540 u32 status;
541
542 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
543 return 0;
544
545 switch (locknum) {
Matt Carlson77b483f2008-08-15 14:07:24 -0700546 case TG3_APE_LOCK_GRC:
Matt Carlson0d3031d2007-10-10 18:02:43 -0700547 case TG3_APE_LOCK_MEM:
548 break;
549 default:
550 return -EINVAL;
551 }
552
553 off = 4 * locknum;
554
555 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
556
557 /* Wait for up to 1 millisecond to acquire lock. */
558 for (i = 0; i < 100; i++) {
559 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
560 if (status == APE_LOCK_GRANT_DRIVER)
561 break;
562 udelay(10);
563 }
564
565 if (status != APE_LOCK_GRANT_DRIVER) {
566 /* Revoke the lock request. */
567 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
568 APE_LOCK_GRANT_DRIVER);
569
570 ret = -EBUSY;
571 }
572
573 return ret;
574}
575
576static void tg3_ape_unlock(struct tg3 *tp, int locknum)
577{
578 int off;
579
580 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
581 return;
582
583 switch (locknum) {
Matt Carlson77b483f2008-08-15 14:07:24 -0700584 case TG3_APE_LOCK_GRC:
Matt Carlson0d3031d2007-10-10 18:02:43 -0700585 case TG3_APE_LOCK_MEM:
586 break;
587 default:
588 return;
589 }
590
591 off = 4 * locknum;
592 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
593}
594
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595static void tg3_disable_ints(struct tg3 *tp)
596{
597 tw32(TG3PCI_MISC_HOST_CTRL,
598 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700599 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600}
601
602static inline void tg3_cond_int(struct tg3 *tp)
603{
Michael Chan38f38432005-09-05 17:53:32 -0700604 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
605 (tp->hw_status->status & SD_STATUS_UPDATED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
Michael Chanb5d37722006-09-27 16:06:21 -0700607 else
608 tw32(HOSTCC_MODE, tp->coalesce_mode |
609 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610}
611
612static void tg3_enable_ints(struct tg3 *tp)
613{
Michael Chanbbe832c2005-06-24 20:20:04 -0700614 tp->irq_sync = 0;
615 wmb();
616
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 tw32(TG3PCI_MISC_HOST_CTRL,
618 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700619 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
620 (tp->last_tag << 24));
Michael Chanfcfa0a32006-03-20 22:28:41 -0800621 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
622 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
623 (tp->last_tag << 24));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 tg3_cond_int(tp);
625}
626
Michael Chan04237dd2005-04-25 15:17:17 -0700627static inline unsigned int tg3_has_work(struct tg3 *tp)
628{
629 struct tg3_hw_status *sblk = tp->hw_status;
630 unsigned int work_exists = 0;
631
632 /* check for phy events */
633 if (!(tp->tg3_flags &
634 (TG3_FLAG_USE_LINKCHG_REG |
635 TG3_FLAG_POLL_SERDES))) {
636 if (sblk->status & SD_STATUS_LINK_CHG)
637 work_exists = 1;
638 }
639 /* check for RX/TX work to do */
640 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
641 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
642 work_exists = 1;
643
644 return work_exists;
645}
646
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647/* tg3_restart_ints
Michael Chan04237dd2005-04-25 15:17:17 -0700648 * similar to tg3_enable_ints, but it accurately determines whether there
649 * is new work pending and can return without flushing the PIO write
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400650 * which reenables interrupts
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 */
652static void tg3_restart_ints(struct tg3 *tp)
653{
David S. Millerfac9b832005-05-18 22:46:34 -0700654 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
655 tp->last_tag << 24);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 mmiowb();
657
David S. Millerfac9b832005-05-18 22:46:34 -0700658 /* When doing tagged status, this work check is unnecessary.
659 * The last_tag we write above tells the chip which piece of
660 * work we've completed.
661 */
662 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
663 tg3_has_work(tp))
Michael Chan04237dd2005-04-25 15:17:17 -0700664 tw32(HOSTCC_MODE, tp->coalesce_mode |
665 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666}
667
668static inline void tg3_netif_stop(struct tg3 *tp)
669{
Michael Chanbbe832c2005-06-24 20:20:04 -0700670 tp->dev->trans_start = jiffies; /* prevent tx timeout */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700671 napi_disable(&tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 netif_tx_disable(tp->dev);
673}
674
675static inline void tg3_netif_start(struct tg3 *tp)
676{
677 netif_wake_queue(tp->dev);
678 /* NOTE: unconditional netif_wake_queue is only appropriate
679 * so long as all callers are assured to have free tx slots
680 * (such as after tg3_init_hw)
681 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700682 napi_enable(&tp->napi);
David S. Millerf47c11e2005-06-24 20:18:35 -0700683 tp->hw_status->status |= SD_STATUS_UPDATED;
684 tg3_enable_ints(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685}
686
687static void tg3_switch_clocks(struct tg3 *tp)
688{
689 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
690 u32 orig_clock_ctrl;
691
Matt Carlson795d01c2007-10-07 23:28:17 -0700692 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
693 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan4cf78e42005-07-25 12:29:19 -0700694 return;
695
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 orig_clock_ctrl = clock_ctrl;
697 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
698 CLOCK_CTRL_CLKRUN_OENABLE |
699 0x1f);
700 tp->pci_clock_ctrl = clock_ctrl;
701
702 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
703 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800704 tw32_wait_f(TG3PCI_CLOCK_CTRL,
705 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 }
707 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800708 tw32_wait_f(TG3PCI_CLOCK_CTRL,
709 clock_ctrl |
710 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
711 40);
712 tw32_wait_f(TG3PCI_CLOCK_CTRL,
713 clock_ctrl | (CLOCK_CTRL_ALTCLK),
714 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 }
Michael Chanb401e9e2005-12-19 16:27:04 -0800716 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717}
718
719#define PHY_BUSY_LOOPS 5000
720
721static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
722{
723 u32 frame_val;
724 unsigned int loops;
725 int ret;
726
727 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
728 tw32_f(MAC_MI_MODE,
729 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
730 udelay(80);
731 }
732
733 *val = 0x0;
734
735 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
736 MI_COM_PHY_ADDR_MASK);
737 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
738 MI_COM_REG_ADDR_MASK);
739 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400740
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 tw32_f(MAC_MI_COM, frame_val);
742
743 loops = PHY_BUSY_LOOPS;
744 while (loops != 0) {
745 udelay(10);
746 frame_val = tr32(MAC_MI_COM);
747
748 if ((frame_val & MI_COM_BUSY) == 0) {
749 udelay(5);
750 frame_val = tr32(MAC_MI_COM);
751 break;
752 }
753 loops -= 1;
754 }
755
756 ret = -EBUSY;
757 if (loops != 0) {
758 *val = frame_val & MI_COM_DATA_MASK;
759 ret = 0;
760 }
761
762 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
763 tw32_f(MAC_MI_MODE, tp->mi_mode);
764 udelay(80);
765 }
766
767 return ret;
768}
769
770static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
771{
772 u32 frame_val;
773 unsigned int loops;
774 int ret;
775
Michael Chanb5d37722006-09-27 16:06:21 -0700776 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
777 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
778 return 0;
779
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
781 tw32_f(MAC_MI_MODE,
782 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
783 udelay(80);
784 }
785
786 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
787 MI_COM_PHY_ADDR_MASK);
788 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
789 MI_COM_REG_ADDR_MASK);
790 frame_val |= (val & MI_COM_DATA_MASK);
791 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400792
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 tw32_f(MAC_MI_COM, frame_val);
794
795 loops = PHY_BUSY_LOOPS;
796 while (loops != 0) {
797 udelay(10);
798 frame_val = tr32(MAC_MI_COM);
799 if ((frame_val & MI_COM_BUSY) == 0) {
800 udelay(5);
801 frame_val = tr32(MAC_MI_COM);
802 break;
803 }
804 loops -= 1;
805 }
806
807 ret = -EBUSY;
808 if (loops != 0)
809 ret = 0;
810
811 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
812 tw32_f(MAC_MI_MODE, tp->mi_mode);
813 udelay(80);
814 }
815
816 return ret;
817}
818
Matt Carlson95e28692008-05-25 23:44:14 -0700819static int tg3_bmcr_reset(struct tg3 *tp)
820{
821 u32 phy_control;
822 int limit, err;
823
824 /* OK, reset it, and poll the BMCR_RESET bit until it
825 * clears or we time out.
826 */
827 phy_control = BMCR_RESET;
828 err = tg3_writephy(tp, MII_BMCR, phy_control);
829 if (err != 0)
830 return -EBUSY;
831
832 limit = 5000;
833 while (limit--) {
834 err = tg3_readphy(tp, MII_BMCR, &phy_control);
835 if (err != 0)
836 return -EBUSY;
837
838 if ((phy_control & BMCR_RESET) == 0) {
839 udelay(40);
840 break;
841 }
842 udelay(10);
843 }
844 if (limit <= 0)
845 return -EBUSY;
846
847 return 0;
848}
849
Matt Carlson158d7ab2008-05-29 01:37:54 -0700850static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
851{
852 struct tg3 *tp = (struct tg3 *)bp->priv;
853 u32 val;
854
855 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
856 return -EAGAIN;
857
858 if (tg3_readphy(tp, reg, &val))
859 return -EIO;
860
861 return val;
862}
863
864static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
865{
866 struct tg3 *tp = (struct tg3 *)bp->priv;
867
868 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
869 return -EAGAIN;
870
871 if (tg3_writephy(tp, reg, val))
872 return -EIO;
873
874 return 0;
875}
876
877static int tg3_mdio_reset(struct mii_bus *bp)
878{
879 return 0;
880}
881
Matt Carlson9c61d6b2008-11-03 16:54:56 -0800882static void tg3_mdio_config_5785(struct tg3 *tp)
Matt Carlsona9daf362008-05-25 23:49:44 -0700883{
884 u32 val;
Matt Carlsonfcb389d2008-11-03 16:55:44 -0800885 struct phy_device *phydev;
Matt Carlsona9daf362008-05-25 23:49:44 -0700886
Matt Carlsonfcb389d2008-11-03 16:55:44 -0800887 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
888 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
889 case TG3_PHY_ID_BCM50610:
890 val = MAC_PHYCFG2_50610_LED_MODES;
891 break;
892 case TG3_PHY_ID_BCMAC131:
893 val = MAC_PHYCFG2_AC131_LED_MODES;
894 break;
895 case TG3_PHY_ID_RTL8211C:
896 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
897 break;
898 case TG3_PHY_ID_RTL8201E:
899 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
900 break;
901 default:
Matt Carlsona9daf362008-05-25 23:49:44 -0700902 return;
Matt Carlsonfcb389d2008-11-03 16:55:44 -0800903 }
904
905 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
906 tw32(MAC_PHYCFG2, val);
907
908 val = tr32(MAC_PHYCFG1);
909 val &= ~MAC_PHYCFG1_RGMII_INT;
910 tw32(MAC_PHYCFG1, val);
911
912 return;
913 }
914
915 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
916 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
917 MAC_PHYCFG2_FMODE_MASK_MASK |
918 MAC_PHYCFG2_GMODE_MASK_MASK |
919 MAC_PHYCFG2_ACT_MASK_MASK |
920 MAC_PHYCFG2_QUAL_MASK_MASK |
921 MAC_PHYCFG2_INBAND_ENABLE;
922
923 tw32(MAC_PHYCFG2, val);
Matt Carlsona9daf362008-05-25 23:49:44 -0700924
925 val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
926 MAC_PHYCFG1_RGMII_SND_STAT_EN);
927 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
928 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
929 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
930 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
931 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
932 }
933 tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
934
Matt Carlsona9daf362008-05-25 23:49:44 -0700935 val = tr32(MAC_EXT_RGMII_MODE);
936 val &= ~(MAC_RGMII_MODE_RX_INT_B |
937 MAC_RGMII_MODE_RX_QUALITY |
938 MAC_RGMII_MODE_RX_ACTIVITY |
939 MAC_RGMII_MODE_RX_ENG_DET |
940 MAC_RGMII_MODE_TX_ENABLE |
941 MAC_RGMII_MODE_TX_LOWPWR |
942 MAC_RGMII_MODE_TX_RESET);
Matt Carlsonfcb389d2008-11-03 16:55:44 -0800943 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
Matt Carlsona9daf362008-05-25 23:49:44 -0700944 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
945 val |= MAC_RGMII_MODE_RX_INT_B |
946 MAC_RGMII_MODE_RX_QUALITY |
947 MAC_RGMII_MODE_RX_ACTIVITY |
948 MAC_RGMII_MODE_RX_ENG_DET;
949 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
950 val |= MAC_RGMII_MODE_TX_ENABLE |
951 MAC_RGMII_MODE_TX_LOWPWR |
952 MAC_RGMII_MODE_TX_RESET;
953 }
954 tw32(MAC_EXT_RGMII_MODE, val);
955}
956
Matt Carlson158d7ab2008-05-29 01:37:54 -0700957static void tg3_mdio_start(struct tg3 *tp)
958{
959 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700960 mutex_lock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700961 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700962 mutex_unlock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700963 }
964
965 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
966 tw32_f(MAC_MI_MODE, tp->mi_mode);
967 udelay(80);
Matt Carlsona9daf362008-05-25 23:49:44 -0700968
Matt Carlson9c61d6b2008-11-03 16:54:56 -0800969 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
970 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
971 tg3_mdio_config_5785(tp);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700972}
973
974static void tg3_mdio_stop(struct tg3 *tp)
975{
976 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700977 mutex_lock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700978 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700979 mutex_unlock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700980 }
981}
982
983static int tg3_mdio_init(struct tg3 *tp)
984{
985 int i;
986 u32 reg;
Matt Carlsona9daf362008-05-25 23:49:44 -0700987 struct phy_device *phydev;
Matt Carlson158d7ab2008-05-29 01:37:54 -0700988
989 tg3_mdio_start(tp);
990
991 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
992 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
993 return 0;
994
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700995 tp->mdio_bus = mdiobus_alloc();
996 if (tp->mdio_bus == NULL)
997 return -ENOMEM;
Matt Carlson158d7ab2008-05-29 01:37:54 -0700998
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700999 tp->mdio_bus->name = "tg3 mdio bus";
1000 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
Matt Carlson158d7ab2008-05-29 01:37:54 -07001001 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001002 tp->mdio_bus->priv = tp;
1003 tp->mdio_bus->parent = &tp->pdev->dev;
1004 tp->mdio_bus->read = &tg3_mdio_read;
1005 tp->mdio_bus->write = &tg3_mdio_write;
1006 tp->mdio_bus->reset = &tg3_mdio_reset;
1007 tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
1008 tp->mdio_bus->irq = &tp->mdio_irq[0];
Matt Carlson158d7ab2008-05-29 01:37:54 -07001009
1010 for (i = 0; i < PHY_MAX_ADDR; i++)
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001011 tp->mdio_bus->irq[i] = PHY_POLL;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001012
1013 /* The bus registration will look for all the PHYs on the mdio bus.
1014 * Unfortunately, it does not ensure the PHY is powered up before
1015 * accessing the PHY ID registers. A chip reset is the
1016 * quickest way to bring the device back to an operational state..
1017 */
1018 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1019 tg3_bmcr_reset(tp);
1020
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001021 i = mdiobus_register(tp->mdio_bus);
Matt Carlsona9daf362008-05-25 23:49:44 -07001022 if (i) {
Matt Carlson158d7ab2008-05-29 01:37:54 -07001023 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1024 tp->dev->name, i);
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001025 mdiobus_free(tp->mdio_bus);
Matt Carlsona9daf362008-05-25 23:49:44 -07001026 return i;
1027 }
Matt Carlson158d7ab2008-05-29 01:37:54 -07001028
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001029 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsona9daf362008-05-25 23:49:44 -07001030
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001031 if (!phydev || !phydev->drv) {
1032 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1033 mdiobus_unregister(tp->mdio_bus);
1034 mdiobus_free(tp->mdio_bus);
1035 return -ENODEV;
1036 }
1037
1038 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
Matt Carlsona9daf362008-05-25 23:49:44 -07001039 case TG3_PHY_ID_BCM50610:
Matt Carlsona9daf362008-05-25 23:49:44 -07001040 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1041 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1042 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1043 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1044 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1045 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001046 /* fallthru */
1047 case TG3_PHY_ID_RTL8211C:
1048 phydev->interface = PHY_INTERFACE_MODE_RGMII;
Matt Carlsona9daf362008-05-25 23:49:44 -07001049 break;
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001050 case TG3_PHY_ID_RTL8201E:
Matt Carlsona9daf362008-05-25 23:49:44 -07001051 case TG3_PHY_ID_BCMAC131:
1052 phydev->interface = PHY_INTERFACE_MODE_MII;
1053 break;
1054 }
1055
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001056 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1057
1058 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1059 tg3_mdio_config_5785(tp);
Matt Carlsona9daf362008-05-25 23:49:44 -07001060
1061 return 0;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001062}
1063
1064static void tg3_mdio_fini(struct tg3 *tp)
1065{
1066 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1067 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001068 mdiobus_unregister(tp->mdio_bus);
1069 mdiobus_free(tp->mdio_bus);
Matt Carlson158d7ab2008-05-29 01:37:54 -07001070 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1071 }
1072}
1073
Matt Carlson95e28692008-05-25 23:44:14 -07001074/* tp->lock is held. */
Matt Carlson4ba526c2008-08-15 14:10:04 -07001075static inline void tg3_generate_fw_event(struct tg3 *tp)
1076{
1077 u32 val;
1078
1079 val = tr32(GRC_RX_CPU_EVENT);
1080 val |= GRC_RX_CPU_DRIVER_EVENT;
1081 tw32_f(GRC_RX_CPU_EVENT, val);
1082
1083 tp->last_event_jiffies = jiffies;
1084}
1085
1086#define TG3_FW_EVENT_TIMEOUT_USEC 2500
1087
1088/* tp->lock is held. */
Matt Carlson95e28692008-05-25 23:44:14 -07001089static void tg3_wait_for_event_ack(struct tg3 *tp)
1090{
1091 int i;
Matt Carlson4ba526c2008-08-15 14:10:04 -07001092 unsigned int delay_cnt;
1093 long time_remain;
Matt Carlson95e28692008-05-25 23:44:14 -07001094
Matt Carlson4ba526c2008-08-15 14:10:04 -07001095 /* If enough time has passed, no wait is necessary. */
1096 time_remain = (long)(tp->last_event_jiffies + 1 +
1097 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1098 (long)jiffies;
1099 if (time_remain < 0)
1100 return;
1101
1102 /* Check if we can shorten the wait time. */
1103 delay_cnt = jiffies_to_usecs(time_remain);
1104 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1105 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1106 delay_cnt = (delay_cnt >> 3) + 1;
1107
1108 for (i = 0; i < delay_cnt; i++) {
Matt Carlson95e28692008-05-25 23:44:14 -07001109 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1110 break;
Matt Carlson4ba526c2008-08-15 14:10:04 -07001111 udelay(8);
Matt Carlson95e28692008-05-25 23:44:14 -07001112 }
1113}
1114
1115/* tp->lock is held. */
1116static void tg3_ump_link_report(struct tg3 *tp)
1117{
1118 u32 reg;
1119 u32 val;
1120
1121 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1122 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1123 return;
1124
1125 tg3_wait_for_event_ack(tp);
1126
1127 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1128
1129 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1130
1131 val = 0;
1132 if (!tg3_readphy(tp, MII_BMCR, &reg))
1133 val = reg << 16;
1134 if (!tg3_readphy(tp, MII_BMSR, &reg))
1135 val |= (reg & 0xffff);
1136 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1137
1138 val = 0;
1139 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1140 val = reg << 16;
1141 if (!tg3_readphy(tp, MII_LPA, &reg))
1142 val |= (reg & 0xffff);
1143 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1144
1145 val = 0;
1146 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1147 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1148 val = reg << 16;
1149 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1150 val |= (reg & 0xffff);
1151 }
1152 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1153
1154 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1155 val = reg << 16;
1156 else
1157 val = 0;
1158 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1159
Matt Carlson4ba526c2008-08-15 14:10:04 -07001160 tg3_generate_fw_event(tp);
Matt Carlson95e28692008-05-25 23:44:14 -07001161}
1162
1163static void tg3_link_report(struct tg3 *tp)
1164{
1165 if (!netif_carrier_ok(tp->dev)) {
1166 if (netif_msg_link(tp))
1167 printk(KERN_INFO PFX "%s: Link is down.\n",
1168 tp->dev->name);
1169 tg3_ump_link_report(tp);
1170 } else if (netif_msg_link(tp)) {
1171 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1172 tp->dev->name,
1173 (tp->link_config.active_speed == SPEED_1000 ?
1174 1000 :
1175 (tp->link_config.active_speed == SPEED_100 ?
1176 100 : 10)),
1177 (tp->link_config.active_duplex == DUPLEX_FULL ?
1178 "full" : "half"));
1179
1180 printk(KERN_INFO PFX
1181 "%s: Flow control is %s for TX and %s for RX.\n",
1182 tp->dev->name,
1183 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1184 "on" : "off",
1185 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1186 "on" : "off");
1187 tg3_ump_link_report(tp);
1188 }
1189}
1190
1191static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1192{
1193 u16 miireg;
1194
1195 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1196 miireg = ADVERTISE_PAUSE_CAP;
1197 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1198 miireg = ADVERTISE_PAUSE_ASYM;
1199 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1200 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1201 else
1202 miireg = 0;
1203
1204 return miireg;
1205}
1206
1207static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1208{
1209 u16 miireg;
1210
1211 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1212 miireg = ADVERTISE_1000XPAUSE;
1213 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1214 miireg = ADVERTISE_1000XPSE_ASYM;
1215 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1216 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1217 else
1218 miireg = 0;
1219
1220 return miireg;
1221}
1222
1223static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1224{
1225 u8 cap = 0;
1226
1227 if (lcladv & ADVERTISE_PAUSE_CAP) {
1228 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1229 if (rmtadv & LPA_PAUSE_CAP)
1230 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1231 else if (rmtadv & LPA_PAUSE_ASYM)
1232 cap = TG3_FLOW_CTRL_RX;
1233 } else {
1234 if (rmtadv & LPA_PAUSE_CAP)
1235 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1236 }
1237 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1238 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1239 cap = TG3_FLOW_CTRL_TX;
1240 }
1241
1242 return cap;
1243}
1244
1245static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1246{
1247 u8 cap = 0;
1248
1249 if (lcladv & ADVERTISE_1000XPAUSE) {
1250 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1251 if (rmtadv & LPA_1000XPAUSE)
1252 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1253 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1254 cap = TG3_FLOW_CTRL_RX;
1255 } else {
1256 if (rmtadv & LPA_1000XPAUSE)
1257 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1258 }
1259 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1260 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1261 cap = TG3_FLOW_CTRL_TX;
1262 }
1263
1264 return cap;
1265}
1266
Matt Carlsonf51f3562008-05-25 23:45:08 -07001267static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
Matt Carlson95e28692008-05-25 23:44:14 -07001268{
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001269 u8 autoneg;
Matt Carlsonf51f3562008-05-25 23:45:08 -07001270 u8 flowctrl = 0;
Matt Carlson95e28692008-05-25 23:44:14 -07001271 u32 old_rx_mode = tp->rx_mode;
1272 u32 old_tx_mode = tp->tx_mode;
1273
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001274 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001275 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001276 else
1277 autoneg = tp->link_config.autoneg;
1278
1279 if (autoneg == AUTONEG_ENABLE &&
Matt Carlson95e28692008-05-25 23:44:14 -07001280 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1281 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Matt Carlsonf51f3562008-05-25 23:45:08 -07001282 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
Matt Carlson95e28692008-05-25 23:44:14 -07001283 else
Matt Carlsonf51f3562008-05-25 23:45:08 -07001284 flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1285 } else
1286 flowctrl = tp->link_config.flowctrl;
Matt Carlson95e28692008-05-25 23:44:14 -07001287
Matt Carlsonf51f3562008-05-25 23:45:08 -07001288 tp->link_config.active_flowctrl = flowctrl;
Matt Carlson95e28692008-05-25 23:44:14 -07001289
Matt Carlsonf51f3562008-05-25 23:45:08 -07001290 if (flowctrl & TG3_FLOW_CTRL_RX)
Matt Carlson95e28692008-05-25 23:44:14 -07001291 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1292 else
1293 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1294
Matt Carlsonf51f3562008-05-25 23:45:08 -07001295 if (old_rx_mode != tp->rx_mode)
Matt Carlson95e28692008-05-25 23:44:14 -07001296 tw32_f(MAC_RX_MODE, tp->rx_mode);
Matt Carlson95e28692008-05-25 23:44:14 -07001297
Matt Carlsonf51f3562008-05-25 23:45:08 -07001298 if (flowctrl & TG3_FLOW_CTRL_TX)
Matt Carlson95e28692008-05-25 23:44:14 -07001299 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1300 else
1301 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1302
Matt Carlsonf51f3562008-05-25 23:45:08 -07001303 if (old_tx_mode != tp->tx_mode)
Matt Carlson95e28692008-05-25 23:44:14 -07001304 tw32_f(MAC_TX_MODE, tp->tx_mode);
Matt Carlson95e28692008-05-25 23:44:14 -07001305}
1306
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001307static void tg3_adjust_link(struct net_device *dev)
1308{
1309 u8 oldflowctrl, linkmesg = 0;
1310 u32 mac_mode, lcl_adv, rmt_adv;
1311 struct tg3 *tp = netdev_priv(dev);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001312 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001313
1314 spin_lock(&tp->lock);
1315
1316 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1317 MAC_MODE_HALF_DUPLEX);
1318
1319 oldflowctrl = tp->link_config.active_flowctrl;
1320
1321 if (phydev->link) {
1322 lcl_adv = 0;
1323 rmt_adv = 0;
1324
1325 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1326 mac_mode |= MAC_MODE_PORT_MODE_MII;
1327 else
1328 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1329
1330 if (phydev->duplex == DUPLEX_HALF)
1331 mac_mode |= MAC_MODE_HALF_DUPLEX;
1332 else {
1333 lcl_adv = tg3_advert_flowctrl_1000T(
1334 tp->link_config.flowctrl);
1335
1336 if (phydev->pause)
1337 rmt_adv = LPA_PAUSE_CAP;
1338 if (phydev->asym_pause)
1339 rmt_adv |= LPA_PAUSE_ASYM;
1340 }
1341
1342 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1343 } else
1344 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1345
1346 if (mac_mode != tp->mac_mode) {
1347 tp->mac_mode = mac_mode;
1348 tw32_f(MAC_MODE, tp->mac_mode);
1349 udelay(40);
1350 }
1351
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001352 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1353 if (phydev->speed == SPEED_10)
1354 tw32(MAC_MI_STAT,
1355 MAC_MI_STAT_10MBPS_MODE |
1356 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1357 else
1358 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1359 }
1360
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001361 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1362 tw32(MAC_TX_LENGTHS,
1363 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1364 (6 << TX_LENGTHS_IPG_SHIFT) |
1365 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1366 else
1367 tw32(MAC_TX_LENGTHS,
1368 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1369 (6 << TX_LENGTHS_IPG_SHIFT) |
1370 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1371
1372 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1373 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1374 phydev->speed != tp->link_config.active_speed ||
1375 phydev->duplex != tp->link_config.active_duplex ||
1376 oldflowctrl != tp->link_config.active_flowctrl)
1377 linkmesg = 1;
1378
1379 tp->link_config.active_speed = phydev->speed;
1380 tp->link_config.active_duplex = phydev->duplex;
1381
1382 spin_unlock(&tp->lock);
1383
1384 if (linkmesg)
1385 tg3_link_report(tp);
1386}
1387
1388static int tg3_phy_init(struct tg3 *tp)
1389{
1390 struct phy_device *phydev;
1391
1392 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1393 return 0;
1394
1395 /* Bring the PHY back to a known state. */
1396 tg3_bmcr_reset(tp);
1397
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001398 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001399
1400 /* Attach the MAC to the PHY. */
Kay Sieversfb28ad32008-11-10 13:55:14 -08001401 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
Matt Carlsona9daf362008-05-25 23:49:44 -07001402 phydev->dev_flags, phydev->interface);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001403 if (IS_ERR(phydev)) {
1404 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1405 return PTR_ERR(phydev);
1406 }
1407
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001408 /* Mask with MAC supported features. */
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001409 switch (phydev->interface) {
1410 case PHY_INTERFACE_MODE_GMII:
1411 case PHY_INTERFACE_MODE_RGMII:
1412 phydev->supported &= (PHY_GBIT_FEATURES |
1413 SUPPORTED_Pause |
1414 SUPPORTED_Asym_Pause);
1415 break;
1416 case PHY_INTERFACE_MODE_MII:
1417 phydev->supported &= (PHY_BASIC_FEATURES |
1418 SUPPORTED_Pause |
1419 SUPPORTED_Asym_Pause);
1420 break;
1421 default:
1422 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1423 return -EINVAL;
1424 }
1425
1426 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001427
1428 phydev->advertising = phydev->supported;
1429
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001430 return 0;
1431}
1432
1433static void tg3_phy_start(struct tg3 *tp)
1434{
1435 struct phy_device *phydev;
1436
1437 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1438 return;
1439
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001440 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001441
1442 if (tp->link_config.phy_is_low_power) {
1443 tp->link_config.phy_is_low_power = 0;
1444 phydev->speed = tp->link_config.orig_speed;
1445 phydev->duplex = tp->link_config.orig_duplex;
1446 phydev->autoneg = tp->link_config.orig_autoneg;
1447 phydev->advertising = tp->link_config.orig_advertising;
1448 }
1449
1450 phy_start(phydev);
1451
1452 phy_start_aneg(phydev);
1453}
1454
1455static void tg3_phy_stop(struct tg3 *tp)
1456{
1457 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1458 return;
1459
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001460 phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001461}
1462
1463static void tg3_phy_fini(struct tg3 *tp)
1464{
1465 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001466 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001467 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1468 }
1469}
1470
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001471static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1472{
1473 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1474 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1475}
1476
Matt Carlson9ef8ca92007-07-11 19:48:29 -07001477static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1478{
1479 u32 phy;
1480
1481 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1482 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1483 return;
1484
1485 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1486 u32 ephy;
1487
1488 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1489 tg3_writephy(tp, MII_TG3_EPHY_TEST,
1490 ephy | MII_TG3_EPHY_SHADOW_EN);
1491 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1492 if (enable)
1493 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1494 else
1495 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1496 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1497 }
1498 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1499 }
1500 } else {
1501 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1502 MII_TG3_AUXCTL_SHDWSEL_MISC;
1503 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1504 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1505 if (enable)
1506 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1507 else
1508 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1509 phy |= MII_TG3_AUXCTL_MISC_WREN;
1510 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1511 }
1512 }
1513}
1514
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515static void tg3_phy_set_wirespeed(struct tg3 *tp)
1516{
1517 u32 val;
1518
1519 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1520 return;
1521
1522 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1523 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1524 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1525 (val | (1 << 15) | (1 << 4)));
1526}
1527
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001528static void tg3_phy_apply_otp(struct tg3 *tp)
1529{
1530 u32 otp, phy;
1531
1532 if (!tp->phy_otp)
1533 return;
1534
1535 otp = tp->phy_otp;
1536
1537 /* Enable SM_DSP clock and tx 6dB coding. */
1538 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1539 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1540 MII_TG3_AUXCTL_ACTL_TX_6DB;
1541 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1542
1543 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1544 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1545 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1546
1547 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1548 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1549 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1550
1551 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1552 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1553 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1554
1555 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1556 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1557
1558 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1559 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1560
1561 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1562 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1563 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1564
1565 /* Turn off SM_DSP clock. */
1566 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1567 MII_TG3_AUXCTL_ACTL_TX_6DB;
1568 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1569}
1570
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571static int tg3_wait_macro_done(struct tg3 *tp)
1572{
1573 int limit = 100;
1574
1575 while (limit--) {
1576 u32 tmp32;
1577
1578 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1579 if ((tmp32 & 0x1000) == 0)
1580 break;
1581 }
1582 }
1583 if (limit <= 0)
1584 return -EBUSY;
1585
1586 return 0;
1587}
1588
1589static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1590{
1591 static const u32 test_pat[4][6] = {
1592 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1593 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1594 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1595 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1596 };
1597 int chan;
1598
1599 for (chan = 0; chan < 4; chan++) {
1600 int i;
1601
1602 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1603 (chan * 0x2000) | 0x0200);
1604 tg3_writephy(tp, 0x16, 0x0002);
1605
1606 for (i = 0; i < 6; i++)
1607 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1608 test_pat[chan][i]);
1609
1610 tg3_writephy(tp, 0x16, 0x0202);
1611 if (tg3_wait_macro_done(tp)) {
1612 *resetp = 1;
1613 return -EBUSY;
1614 }
1615
1616 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1617 (chan * 0x2000) | 0x0200);
1618 tg3_writephy(tp, 0x16, 0x0082);
1619 if (tg3_wait_macro_done(tp)) {
1620 *resetp = 1;
1621 return -EBUSY;
1622 }
1623
1624 tg3_writephy(tp, 0x16, 0x0802);
1625 if (tg3_wait_macro_done(tp)) {
1626 *resetp = 1;
1627 return -EBUSY;
1628 }
1629
1630 for (i = 0; i < 6; i += 2) {
1631 u32 low, high;
1632
1633 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1634 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1635 tg3_wait_macro_done(tp)) {
1636 *resetp = 1;
1637 return -EBUSY;
1638 }
1639 low &= 0x7fff;
1640 high &= 0x000f;
1641 if (low != test_pat[chan][i] ||
1642 high != test_pat[chan][i+1]) {
1643 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1644 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1645 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1646
1647 return -EBUSY;
1648 }
1649 }
1650 }
1651
1652 return 0;
1653}
1654
1655static int tg3_phy_reset_chanpat(struct tg3 *tp)
1656{
1657 int chan;
1658
1659 for (chan = 0; chan < 4; chan++) {
1660 int i;
1661
1662 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1663 (chan * 0x2000) | 0x0200);
1664 tg3_writephy(tp, 0x16, 0x0002);
1665 for (i = 0; i < 6; i++)
1666 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1667 tg3_writephy(tp, 0x16, 0x0202);
1668 if (tg3_wait_macro_done(tp))
1669 return -EBUSY;
1670 }
1671
1672 return 0;
1673}
1674
1675static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1676{
1677 u32 reg32, phy9_orig;
1678 int retries, do_phy_reset, err;
1679
1680 retries = 10;
1681 do_phy_reset = 1;
1682 do {
1683 if (do_phy_reset) {
1684 err = tg3_bmcr_reset(tp);
1685 if (err)
1686 return err;
1687 do_phy_reset = 0;
1688 }
1689
1690 /* Disable transmitter and interrupt. */
1691 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1692 continue;
1693
1694 reg32 |= 0x3000;
1695 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1696
1697 /* Set full-duplex, 1000 mbps. */
1698 tg3_writephy(tp, MII_BMCR,
1699 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1700
1701 /* Set to master mode. */
1702 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1703 continue;
1704
1705 tg3_writephy(tp, MII_TG3_CTRL,
1706 (MII_TG3_CTRL_AS_MASTER |
1707 MII_TG3_CTRL_ENABLE_AS_MASTER));
1708
1709 /* Enable SM_DSP_CLOCK and 6dB. */
1710 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1711
1712 /* Block the PHY control access. */
1713 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1714 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1715
1716 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1717 if (!err)
1718 break;
1719 } while (--retries);
1720
1721 err = tg3_phy_reset_chanpat(tp);
1722 if (err)
1723 return err;
1724
1725 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1726 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1727
1728 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1729 tg3_writephy(tp, 0x16, 0x0000);
1730
1731 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1732 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1733 /* Set Extended packet length bit for jumbo frames */
1734 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1735 }
1736 else {
1737 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1738 }
1739
1740 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1741
1742 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1743 reg32 &= ~0x3000;
1744 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1745 } else if (!err)
1746 err = -EBUSY;
1747
1748 return err;
1749}
1750
1751/* This will reset the tigon3 PHY if there is no valid
1752 * link unless the FORCE argument is non-zero.
1753 */
1754static int tg3_phy_reset(struct tg3 *tp)
1755{
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001756 u32 cpmuctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 u32 phy_status;
1758 int err;
1759
Michael Chan60189dd2006-12-17 17:08:07 -08001760 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1761 u32 val;
1762
1763 val = tr32(GRC_MISC_CFG);
1764 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1765 udelay(40);
1766 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1768 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1769 if (err != 0)
1770 return -EBUSY;
1771
Michael Chanc8e1e822006-04-29 18:55:17 -07001772 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1773 netif_carrier_off(tp->dev);
1774 tg3_link_report(tp);
1775 }
1776
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1778 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1779 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1780 err = tg3_phy_reset_5703_4_5(tp);
1781 if (err)
1782 return err;
1783 goto out;
1784 }
1785
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001786 cpmuctrl = 0;
1787 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1788 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1789 cpmuctrl = tr32(TG3_CPMU_CTRL);
1790 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1791 tw32(TG3_CPMU_CTRL,
1792 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1793 }
1794
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 err = tg3_bmcr_reset(tp);
1796 if (err)
1797 return err;
1798
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001799 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1800 u32 phy;
1801
1802 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1803 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1804
1805 tw32(TG3_CPMU_CTRL, cpmuctrl);
1806 }
1807
Matt Carlsonbcb37f62008-11-03 16:52:09 -08001808 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1809 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
Matt Carlsonce057f02007-11-12 21:08:03 -08001810 u32 val;
1811
1812 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1813 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1814 CPMU_LSPD_1000MB_MACCLK_12_5) {
1815 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1816 udelay(40);
1817 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1818 }
Matt Carlson662f38d2007-11-12 21:16:17 -08001819
1820 /* Disable GPHY autopowerdown. */
1821 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1822 MII_TG3_MISC_SHDW_WREN |
1823 MII_TG3_MISC_SHDW_APD_SEL |
1824 MII_TG3_MISC_SHDW_APD_WKTM_84MS);
Matt Carlsonce057f02007-11-12 21:08:03 -08001825 }
1826
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001827 tg3_phy_apply_otp(tp);
1828
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829out:
1830 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1831 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1832 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1833 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1834 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1835 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1836 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1837 }
1838 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1839 tg3_writephy(tp, 0x1c, 0x8d68);
1840 tg3_writephy(tp, 0x1c, 0x8d68);
1841 }
1842 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1843 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1844 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1845 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1846 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1847 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1848 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1849 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1850 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1851 }
Michael Chanc424cb22006-04-29 18:56:34 -07001852 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1853 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1854 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
Michael Chanc1d2a192007-01-08 19:57:20 -08001855 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1856 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1857 tg3_writephy(tp, MII_TG3_TEST1,
1858 MII_TG3_TEST1_TRIM_EN | 0x4);
1859 } else
1860 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
Michael Chanc424cb22006-04-29 18:56:34 -07001861 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1862 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863 /* Set Extended packet length bit (bit 14) on all chips that */
1864 /* support jumbo frames */
1865 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1866 /* Cannot do read-modify-write on 5401 */
1867 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
Michael Chan0f893dc2005-07-25 12:30:38 -07001868 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 u32 phy_reg;
1870
1871 /* Set bit 14 with read-modify-write to preserve other bits */
1872 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1873 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1874 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1875 }
1876
1877 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1878 * jumbo frames transmission.
1879 */
Michael Chan0f893dc2005-07-25 12:30:38 -07001880 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 u32 phy_reg;
1882
1883 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1884 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1885 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1886 }
1887
Michael Chan715116a2006-09-27 16:09:25 -07001888 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan715116a2006-09-27 16:09:25 -07001889 /* adjust output voltage */
1890 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
Michael Chan715116a2006-09-27 16:09:25 -07001891 }
1892
Matt Carlson9ef8ca92007-07-11 19:48:29 -07001893 tg3_phy_toggle_automdix(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 tg3_phy_set_wirespeed(tp);
1895 return 0;
1896}
1897
1898static void tg3_frob_aux_power(struct tg3 *tp)
1899{
1900 struct tg3 *tp_peer = tp;
1901
Michael Chan9d26e212006-12-07 00:21:14 -08001902 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 return;
1904
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001905 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1906 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1907 struct net_device *dev_peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001909 dev_peer = pci_get_drvdata(tp->pdev_peer);
Michael Chanbc1c7562006-03-20 17:48:03 -08001910 /* remove_one() may have been run on the peer. */
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001911 if (!dev_peer)
Michael Chanbc1c7562006-03-20 17:48:03 -08001912 tp_peer = tp;
1913 else
1914 tp_peer = netdev_priv(dev_peer);
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001915 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916
1917 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
Michael Chan6921d202005-12-13 21:15:53 -08001918 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1919 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1920 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1922 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
Michael Chanb401e9e2005-12-19 16:27:04 -08001923 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1924 (GRC_LCLCTRL_GPIO_OE0 |
1925 GRC_LCLCTRL_GPIO_OE1 |
1926 GRC_LCLCTRL_GPIO_OE2 |
1927 GRC_LCLCTRL_GPIO_OUTPUT0 |
1928 GRC_LCLCTRL_GPIO_OUTPUT1),
1929 100);
Matt Carlson5f0c4a32008-06-09 15:41:12 -07001930 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1931 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1932 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1933 GRC_LCLCTRL_GPIO_OE1 |
1934 GRC_LCLCTRL_GPIO_OE2 |
1935 GRC_LCLCTRL_GPIO_OUTPUT0 |
1936 GRC_LCLCTRL_GPIO_OUTPUT1 |
1937 tp->grc_local_ctrl;
1938 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1939
1940 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1941 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1942
1943 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1944 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 } else {
1946 u32 no_gpio2;
Michael Chandc56b7d2005-12-19 16:26:28 -08001947 u32 grc_local_ctrl = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948
1949 if (tp_peer != tp &&
1950 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1951 return;
1952
Michael Chandc56b7d2005-12-19 16:26:28 -08001953 /* Workaround to prevent overdrawing Amps. */
1954 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1955 ASIC_REV_5714) {
1956 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chanb401e9e2005-12-19 16:27:04 -08001957 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1958 grc_local_ctrl, 100);
Michael Chandc56b7d2005-12-19 16:26:28 -08001959 }
1960
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 /* On 5753 and variants, GPIO2 cannot be used. */
1962 no_gpio2 = tp->nic_sram_data_cfg &
1963 NIC_SRAM_DATA_CFG_NO_GPIO2;
1964
Michael Chandc56b7d2005-12-19 16:26:28 -08001965 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 GRC_LCLCTRL_GPIO_OE1 |
1967 GRC_LCLCTRL_GPIO_OE2 |
1968 GRC_LCLCTRL_GPIO_OUTPUT1 |
1969 GRC_LCLCTRL_GPIO_OUTPUT2;
1970 if (no_gpio2) {
1971 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1972 GRC_LCLCTRL_GPIO_OUTPUT2);
1973 }
Michael Chanb401e9e2005-12-19 16:27:04 -08001974 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1975 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976
1977 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1978
Michael Chanb401e9e2005-12-19 16:27:04 -08001979 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1980 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981
1982 if (!no_gpio2) {
1983 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chanb401e9e2005-12-19 16:27:04 -08001984 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1985 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 }
1987 }
1988 } else {
1989 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1990 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1991 if (tp_peer != tp &&
1992 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1993 return;
1994
Michael Chanb401e9e2005-12-19 16:27:04 -08001995 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1996 (GRC_LCLCTRL_GPIO_OE1 |
1997 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998
Michael Chanb401e9e2005-12-19 16:27:04 -08001999 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2000 GRC_LCLCTRL_GPIO_OE1, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001
Michael Chanb401e9e2005-12-19 16:27:04 -08002002 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2003 (GRC_LCLCTRL_GPIO_OE1 |
2004 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 }
2006 }
2007}
2008
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002009static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2010{
2011 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2012 return 1;
2013 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2014 if (speed != SPEED_10)
2015 return 1;
2016 } else if (speed == SPEED_10)
2017 return 1;
2018
2019 return 0;
2020}
2021
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022static int tg3_setup_phy(struct tg3 *, int);
2023
2024#define RESET_KIND_SHUTDOWN 0
2025#define RESET_KIND_INIT 1
2026#define RESET_KIND_SUSPEND 2
2027
2028static void tg3_write_sig_post_reset(struct tg3 *, int);
2029static int tg3_halt_cpu(struct tg3 *, u32);
Michael Chan6921d202005-12-13 21:15:53 -08002030static int tg3_nvram_lock(struct tg3 *);
2031static void tg3_nvram_unlock(struct tg3 *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032
Matt Carlson0a459aa2008-11-03 16:54:15 -08002033static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
Michael Chan15c3b692006-03-22 01:06:52 -08002034{
Matt Carlsonce057f02007-11-12 21:08:03 -08002035 u32 val;
2036
Michael Chan51297242007-02-13 12:17:57 -08002037 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2038 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2039 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2040 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2041
2042 sg_dig_ctrl |=
2043 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2044 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2045 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2046 }
Michael Chan3f7045c2006-09-27 16:02:29 -07002047 return;
Michael Chan51297242007-02-13 12:17:57 -08002048 }
Michael Chan3f7045c2006-09-27 16:02:29 -07002049
Michael Chan60189dd2006-12-17 17:08:07 -08002050 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan60189dd2006-12-17 17:08:07 -08002051 tg3_bmcr_reset(tp);
2052 val = tr32(GRC_MISC_CFG);
2053 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2054 udelay(40);
2055 return;
Matt Carlson0a459aa2008-11-03 16:54:15 -08002056 } else if (do_low_power) {
Michael Chan715116a2006-09-27 16:09:25 -07002057 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2058 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
Matt Carlson0a459aa2008-11-03 16:54:15 -08002059
2060 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2061 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2062 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2063 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2064 MII_TG3_AUXCTL_PCTL_VREG_11V);
Michael Chan715116a2006-09-27 16:09:25 -07002065 }
Michael Chan3f7045c2006-09-27 16:02:29 -07002066
Michael Chan15c3b692006-03-22 01:06:52 -08002067 /* The PHY should not be powered down on some chips because
2068 * of bugs.
2069 */
2070 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2071 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2072 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2073 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2074 return;
Matt Carlsonce057f02007-11-12 21:08:03 -08002075
Matt Carlsonbcb37f62008-11-03 16:52:09 -08002076 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2077 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
Matt Carlsonce057f02007-11-12 21:08:03 -08002078 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2079 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2080 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2081 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2082 }
2083
Michael Chan15c3b692006-03-22 01:06:52 -08002084 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2085}
2086
Matt Carlson3f007892008-11-03 16:51:36 -08002087/* tp->lock is held. */
2088static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2089{
2090 u32 addr_high, addr_low;
2091 int i;
2092
2093 addr_high = ((tp->dev->dev_addr[0] << 8) |
2094 tp->dev->dev_addr[1]);
2095 addr_low = ((tp->dev->dev_addr[2] << 24) |
2096 (tp->dev->dev_addr[3] << 16) |
2097 (tp->dev->dev_addr[4] << 8) |
2098 (tp->dev->dev_addr[5] << 0));
2099 for (i = 0; i < 4; i++) {
2100 if (i == 1 && skip_mac_1)
2101 continue;
2102 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2103 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2104 }
2105
2106 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2107 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2108 for (i = 0; i < 12; i++) {
2109 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2110 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2111 }
2112 }
2113
2114 addr_high = (tp->dev->dev_addr[0] +
2115 tp->dev->dev_addr[1] +
2116 tp->dev->dev_addr[2] +
2117 tp->dev->dev_addr[3] +
2118 tp->dev->dev_addr[4] +
2119 tp->dev->dev_addr[5]) &
2120 TX_BACKOFF_SEED_MASK;
2121 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2122}
2123
Michael Chanbc1c7562006-03-20 17:48:03 -08002124static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125{
2126 u32 misc_host_ctrl;
Matt Carlson0a459aa2008-11-03 16:54:15 -08002127 bool device_should_wake, do_low_power;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128
2129 /* Make sure register accesses (indirect or otherwise)
2130 * will function correctly.
2131 */
2132 pci_write_config_dword(tp->pdev,
2133 TG3PCI_MISC_HOST_CTRL,
2134 tp->misc_host_ctrl);
2135
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136 switch (state) {
Michael Chanbc1c7562006-03-20 17:48:03 -08002137 case PCI_D0:
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002138 pci_enable_wake(tp->pdev, state, false);
2139 pci_set_power_state(tp->pdev, PCI_D0);
Michael Chan8c6bda12005-04-21 17:09:08 -07002140
Michael Chan9d26e212006-12-07 00:21:14 -08002141 /* Switch out of Vaux if it is a NIC */
2142 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
Michael Chanb401e9e2005-12-19 16:27:04 -08002143 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144
2145 return 0;
2146
Michael Chanbc1c7562006-03-20 17:48:03 -08002147 case PCI_D1:
Michael Chanbc1c7562006-03-20 17:48:03 -08002148 case PCI_D2:
Michael Chanbc1c7562006-03-20 17:48:03 -08002149 case PCI_D3hot:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 break;
2151
2152 default:
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002153 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2154 tp->dev->name, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155 return -EINVAL;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002156 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2158 tw32(TG3PCI_MISC_HOST_CTRL,
2159 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2160
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002161 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2162 device_may_wakeup(&tp->pdev->dev) &&
2163 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2164
Matt Carlsondd477002008-05-25 23:45:58 -07002165 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
Matt Carlson0a459aa2008-11-03 16:54:15 -08002166 do_low_power = false;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002167 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2168 !tp->link_config.phy_is_low_power) {
2169 struct phy_device *phydev;
Matt Carlson0a459aa2008-11-03 16:54:15 -08002170 u32 phyid, advertising;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002171
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07002172 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002173
2174 tp->link_config.phy_is_low_power = 1;
2175
2176 tp->link_config.orig_speed = phydev->speed;
2177 tp->link_config.orig_duplex = phydev->duplex;
2178 tp->link_config.orig_autoneg = phydev->autoneg;
2179 tp->link_config.orig_advertising = phydev->advertising;
2180
2181 advertising = ADVERTISED_TP |
2182 ADVERTISED_Pause |
2183 ADVERTISED_Autoneg |
2184 ADVERTISED_10baseT_Half;
2185
2186 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002187 device_should_wake) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002188 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2189 advertising |=
2190 ADVERTISED_100baseT_Half |
2191 ADVERTISED_100baseT_Full |
2192 ADVERTISED_10baseT_Full;
2193 else
2194 advertising |= ADVERTISED_10baseT_Full;
2195 }
2196
2197 phydev->advertising = advertising;
2198
2199 phy_start_aneg(phydev);
Matt Carlson0a459aa2008-11-03 16:54:15 -08002200
2201 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2202 if (phyid != TG3_PHY_ID_BCMAC131) {
2203 phyid &= TG3_PHY_OUI_MASK;
2204 if (phyid == TG3_PHY_OUI_1 &&
2205 phyid == TG3_PHY_OUI_2 &&
2206 phyid == TG3_PHY_OUI_3)
2207 do_low_power = true;
2208 }
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002209 }
Matt Carlsondd477002008-05-25 23:45:58 -07002210 } else {
Matt Carlson0a459aa2008-11-03 16:54:15 -08002211 do_low_power = false;
2212
Matt Carlsondd477002008-05-25 23:45:58 -07002213 if (tp->link_config.phy_is_low_power == 0) {
2214 tp->link_config.phy_is_low_power = 1;
2215 tp->link_config.orig_speed = tp->link_config.speed;
2216 tp->link_config.orig_duplex = tp->link_config.duplex;
2217 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2218 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219
Matt Carlsondd477002008-05-25 23:45:58 -07002220 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2221 tp->link_config.speed = SPEED_10;
2222 tp->link_config.duplex = DUPLEX_HALF;
2223 tp->link_config.autoneg = AUTONEG_ENABLE;
2224 tg3_setup_phy(tp, 0);
2225 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226 }
2227
Matt Carlson3f007892008-11-03 16:51:36 -08002228 __tg3_set_mac_addr(tp, 0);
2229
Michael Chanb5d37722006-09-27 16:06:21 -07002230 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2231 u32 val;
2232
2233 val = tr32(GRC_VCPU_EXT_CTRL);
2234 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2235 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chan6921d202005-12-13 21:15:53 -08002236 int i;
2237 u32 val;
2238
2239 for (i = 0; i < 200; i++) {
2240 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2241 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2242 break;
2243 msleep(1);
2244 }
2245 }
Gary Zambranoa85feb82007-05-05 11:52:19 -07002246 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2247 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2248 WOL_DRV_STATE_SHUTDOWN |
2249 WOL_DRV_WOL |
2250 WOL_SET_MAGIC_PKT);
Michael Chan6921d202005-12-13 21:15:53 -08002251
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002252 if (device_should_wake) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253 u32 mac_mode;
2254
2255 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
Matt Carlson0a459aa2008-11-03 16:54:15 -08002256 if (do_low_power) {
Matt Carlsondd477002008-05-25 23:45:58 -07002257 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2258 udelay(40);
2259 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260
Michael Chan3f7045c2006-09-27 16:02:29 -07002261 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2262 mac_mode = MAC_MODE_PORT_MODE_GMII;
2263 else
2264 mac_mode = MAC_MODE_PORT_MODE_MII;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002266 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2267 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2268 ASIC_REV_5700) {
2269 u32 speed = (tp->tg3_flags &
2270 TG3_FLAG_WOL_SPEED_100MB) ?
2271 SPEED_100 : SPEED_10;
2272 if (tg3_5700_link_polarity(tp, speed))
2273 mac_mode |= MAC_MODE_LINK_POLARITY;
2274 else
2275 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2276 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277 } else {
2278 mac_mode = MAC_MODE_PORT_MODE_TBI;
2279 }
2280
John W. Linvillecbf46852005-04-21 17:01:29 -07002281 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 tw32(MAC_LED_CTRL, tp->led_ctrl);
2283
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002284 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2285 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2286 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2287 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2288 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2289 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290
Matt Carlson3bda1252008-08-15 14:08:22 -07002291 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2292 mac_mode |= tp->mac_mode &
2293 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2294 if (mac_mode & MAC_MODE_APE_TX_EN)
2295 mac_mode |= MAC_MODE_TDE_ENABLE;
2296 }
2297
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 tw32_f(MAC_MODE, mac_mode);
2299 udelay(100);
2300
2301 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2302 udelay(10);
2303 }
2304
2305 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2306 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2307 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2308 u32 base_val;
2309
2310 base_val = tp->pci_clock_ctrl;
2311 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2312 CLOCK_CTRL_TXCLK_DISABLE);
2313
Michael Chanb401e9e2005-12-19 16:27:04 -08002314 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2315 CLOCK_CTRL_PWRDOWN_PLL133, 40);
Michael Chand7b0a852007-02-13 12:17:38 -08002316 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
Matt Carlson795d01c2007-10-07 23:28:17 -07002317 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
Michael Chand7b0a852007-02-13 12:17:38 -08002318 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
Michael Chan4cf78e42005-07-25 12:29:19 -07002319 /* do nothing */
Michael Chan85e94ce2005-04-21 17:05:28 -07002320 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2322 u32 newbits1, newbits2;
2323
2324 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2325 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2326 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2327 CLOCK_CTRL_TXCLK_DISABLE |
2328 CLOCK_CTRL_ALTCLK);
2329 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2330 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2331 newbits1 = CLOCK_CTRL_625_CORE;
2332 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2333 } else {
2334 newbits1 = CLOCK_CTRL_ALTCLK;
2335 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2336 }
2337
Michael Chanb401e9e2005-12-19 16:27:04 -08002338 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2339 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340
Michael Chanb401e9e2005-12-19 16:27:04 -08002341 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2342 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343
2344 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2345 u32 newbits3;
2346
2347 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2348 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2349 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2350 CLOCK_CTRL_TXCLK_DISABLE |
2351 CLOCK_CTRL_44MHZ_CORE);
2352 } else {
2353 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2354 }
2355
Michael Chanb401e9e2005-12-19 16:27:04 -08002356 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2357 tp->pci_clock_ctrl | newbits3, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358 }
2359 }
2360
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002361 if (!(device_should_wake) &&
Matt Carlson0d3031d2007-10-10 18:02:43 -07002362 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
2363 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
Matt Carlson0a459aa2008-11-03 16:54:15 -08002364 tg3_power_down_phy(tp, do_low_power);
Michael Chan6921d202005-12-13 21:15:53 -08002365
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366 tg3_frob_aux_power(tp);
2367
2368 /* Workaround for unstable PLL clock */
2369 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2370 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2371 u32 val = tr32(0x7d00);
2372
2373 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2374 tw32(0x7d00, val);
Michael Chan6921d202005-12-13 21:15:53 -08002375 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chanec41c7d2006-01-17 02:40:55 -08002376 int err;
2377
2378 err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 tg3_halt_cpu(tp, RX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -08002380 if (!err)
2381 tg3_nvram_unlock(tp);
Michael Chan6921d202005-12-13 21:15:53 -08002382 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383 }
2384
Michael Chanbbadf502006-04-06 21:46:34 -07002385 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2386
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002387 if (device_should_wake)
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002388 pci_enable_wake(tp->pdev, state, true);
2389
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390 /* Finally, set the new power state. */
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002391 pci_set_power_state(tp->pdev, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393 return 0;
2394}
2395
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2397{
2398 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2399 case MII_TG3_AUX_STAT_10HALF:
2400 *speed = SPEED_10;
2401 *duplex = DUPLEX_HALF;
2402 break;
2403
2404 case MII_TG3_AUX_STAT_10FULL:
2405 *speed = SPEED_10;
2406 *duplex = DUPLEX_FULL;
2407 break;
2408
2409 case MII_TG3_AUX_STAT_100HALF:
2410 *speed = SPEED_100;
2411 *duplex = DUPLEX_HALF;
2412 break;
2413
2414 case MII_TG3_AUX_STAT_100FULL:
2415 *speed = SPEED_100;
2416 *duplex = DUPLEX_FULL;
2417 break;
2418
2419 case MII_TG3_AUX_STAT_1000HALF:
2420 *speed = SPEED_1000;
2421 *duplex = DUPLEX_HALF;
2422 break;
2423
2424 case MII_TG3_AUX_STAT_1000FULL:
2425 *speed = SPEED_1000;
2426 *duplex = DUPLEX_FULL;
2427 break;
2428
2429 default:
Michael Chan715116a2006-09-27 16:09:25 -07002430 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2431 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2432 SPEED_10;
2433 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2434 DUPLEX_HALF;
2435 break;
2436 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437 *speed = SPEED_INVALID;
2438 *duplex = DUPLEX_INVALID;
2439 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002440 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441}
2442
2443static void tg3_phy_copper_begin(struct tg3 *tp)
2444{
2445 u32 new_adv;
2446 int i;
2447
2448 if (tp->link_config.phy_is_low_power) {
2449 /* Entering low power mode. Disable gigabit and
2450 * 100baseT advertisements.
2451 */
2452 tg3_writephy(tp, MII_TG3_CTRL, 0);
2453
2454 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2455 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2456 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2457 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2458
2459 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2460 } else if (tp->link_config.speed == SPEED_INVALID) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2462 tp->link_config.advertising &=
2463 ~(ADVERTISED_1000baseT_Half |
2464 ADVERTISED_1000baseT_Full);
2465
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002466 new_adv = ADVERTISE_CSMA;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2468 new_adv |= ADVERTISE_10HALF;
2469 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2470 new_adv |= ADVERTISE_10FULL;
2471 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2472 new_adv |= ADVERTISE_100HALF;
2473 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2474 new_adv |= ADVERTISE_100FULL;
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002475
2476 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2477
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2479
2480 if (tp->link_config.advertising &
2481 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2482 new_adv = 0;
2483 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2484 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2485 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2486 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2487 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2488 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2489 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2490 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2491 MII_TG3_CTRL_ENABLE_AS_MASTER);
2492 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2493 } else {
2494 tg3_writephy(tp, MII_TG3_CTRL, 0);
2495 }
2496 } else {
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002497 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2498 new_adv |= ADVERTISE_CSMA;
2499
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 /* Asking for a specific link mode. */
2501 if (tp->link_config.speed == SPEED_1000) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2503
2504 if (tp->link_config.duplex == DUPLEX_FULL)
2505 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2506 else
2507 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2508 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2509 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2510 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2511 MII_TG3_CTRL_ENABLE_AS_MASTER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513 if (tp->link_config.speed == SPEED_100) {
2514 if (tp->link_config.duplex == DUPLEX_FULL)
2515 new_adv |= ADVERTISE_100FULL;
2516 else
2517 new_adv |= ADVERTISE_100HALF;
2518 } else {
2519 if (tp->link_config.duplex == DUPLEX_FULL)
2520 new_adv |= ADVERTISE_10FULL;
2521 else
2522 new_adv |= ADVERTISE_10HALF;
2523 }
2524 tg3_writephy(tp, MII_ADVERTISE, new_adv);
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002525
2526 new_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527 }
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002528
2529 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530 }
2531
2532 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2533 tp->link_config.speed != SPEED_INVALID) {
2534 u32 bmcr, orig_bmcr;
2535
2536 tp->link_config.active_speed = tp->link_config.speed;
2537 tp->link_config.active_duplex = tp->link_config.duplex;
2538
2539 bmcr = 0;
2540 switch (tp->link_config.speed) {
2541 default:
2542 case SPEED_10:
2543 break;
2544
2545 case SPEED_100:
2546 bmcr |= BMCR_SPEED100;
2547 break;
2548
2549 case SPEED_1000:
2550 bmcr |= TG3_BMCR_SPEED1000;
2551 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002552 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553
2554 if (tp->link_config.duplex == DUPLEX_FULL)
2555 bmcr |= BMCR_FULLDPLX;
2556
2557 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2558 (bmcr != orig_bmcr)) {
2559 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2560 for (i = 0; i < 1500; i++) {
2561 u32 tmp;
2562
2563 udelay(10);
2564 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2565 tg3_readphy(tp, MII_BMSR, &tmp))
2566 continue;
2567 if (!(tmp & BMSR_LSTATUS)) {
2568 udelay(40);
2569 break;
2570 }
2571 }
2572 tg3_writephy(tp, MII_BMCR, bmcr);
2573 udelay(40);
2574 }
2575 } else {
2576 tg3_writephy(tp, MII_BMCR,
2577 BMCR_ANENABLE | BMCR_ANRESTART);
2578 }
2579}
2580
2581static int tg3_init_5401phy_dsp(struct tg3 *tp)
2582{
2583 int err;
2584
2585 /* Turn off tap power management. */
2586 /* Set Extended packet length bit */
2587 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2588
2589 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2590 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2591
2592 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2593 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2594
2595 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2596 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2597
2598 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2599 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2600
2601 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2602 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2603
2604 udelay(40);
2605
2606 return err;
2607}
2608
Michael Chan3600d912006-12-07 00:21:48 -08002609static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610{
Michael Chan3600d912006-12-07 00:21:48 -08002611 u32 adv_reg, all_mask = 0;
2612
2613 if (mask & ADVERTISED_10baseT_Half)
2614 all_mask |= ADVERTISE_10HALF;
2615 if (mask & ADVERTISED_10baseT_Full)
2616 all_mask |= ADVERTISE_10FULL;
2617 if (mask & ADVERTISED_100baseT_Half)
2618 all_mask |= ADVERTISE_100HALF;
2619 if (mask & ADVERTISED_100baseT_Full)
2620 all_mask |= ADVERTISE_100FULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621
2622 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2623 return 0;
2624
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625 if ((adv_reg & all_mask) != all_mask)
2626 return 0;
2627 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2628 u32 tg3_ctrl;
2629
Michael Chan3600d912006-12-07 00:21:48 -08002630 all_mask = 0;
2631 if (mask & ADVERTISED_1000baseT_Half)
2632 all_mask |= ADVERTISE_1000HALF;
2633 if (mask & ADVERTISED_1000baseT_Full)
2634 all_mask |= ADVERTISE_1000FULL;
2635
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2637 return 0;
2638
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639 if ((tg3_ctrl & all_mask) != all_mask)
2640 return 0;
2641 }
2642 return 1;
2643}
2644
Matt Carlsonef167e22007-12-20 20:10:01 -08002645static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2646{
2647 u32 curadv, reqadv;
2648
2649 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2650 return 1;
2651
2652 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2653 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2654
2655 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2656 if (curadv != reqadv)
2657 return 0;
2658
2659 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2660 tg3_readphy(tp, MII_LPA, rmtadv);
2661 } else {
2662 /* Reprogram the advertisement register, even if it
2663 * does not affect the current link. If the link
2664 * gets renegotiated in the future, we can save an
2665 * additional renegotiation cycle by advertising
2666 * it correctly in the first place.
2667 */
2668 if (curadv != reqadv) {
2669 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2670 ADVERTISE_PAUSE_ASYM);
2671 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2672 }
2673 }
2674
2675 return 1;
2676}
2677
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2679{
2680 int current_link_up;
2681 u32 bmsr, dummy;
Matt Carlsonef167e22007-12-20 20:10:01 -08002682 u32 lcl_adv, rmt_adv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683 u16 current_speed;
2684 u8 current_duplex;
2685 int i, err;
2686
2687 tw32(MAC_EVENT, 0);
2688
2689 tw32_f(MAC_STATUS,
2690 (MAC_STATUS_SYNC_CHANGED |
2691 MAC_STATUS_CFG_CHANGED |
2692 MAC_STATUS_MI_COMPLETION |
2693 MAC_STATUS_LNKSTATE_CHANGED));
2694 udelay(40);
2695
Matt Carlson8ef21422008-05-02 16:47:53 -07002696 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2697 tw32_f(MAC_MI_MODE,
2698 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2699 udelay(80);
2700 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701
2702 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2703
2704 /* Some third-party PHYs need to be reset on link going
2705 * down.
2706 */
2707 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2708 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2709 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2710 netif_carrier_ok(tp->dev)) {
2711 tg3_readphy(tp, MII_BMSR, &bmsr);
2712 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2713 !(bmsr & BMSR_LSTATUS))
2714 force_reset = 1;
2715 }
2716 if (force_reset)
2717 tg3_phy_reset(tp);
2718
2719 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2720 tg3_readphy(tp, MII_BMSR, &bmsr);
2721 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2722 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2723 bmsr = 0;
2724
2725 if (!(bmsr & BMSR_LSTATUS)) {
2726 err = tg3_init_5401phy_dsp(tp);
2727 if (err)
2728 return err;
2729
2730 tg3_readphy(tp, MII_BMSR, &bmsr);
2731 for (i = 0; i < 1000; i++) {
2732 udelay(10);
2733 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2734 (bmsr & BMSR_LSTATUS)) {
2735 udelay(40);
2736 break;
2737 }
2738 }
2739
2740 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2741 !(bmsr & BMSR_LSTATUS) &&
2742 tp->link_config.active_speed == SPEED_1000) {
2743 err = tg3_phy_reset(tp);
2744 if (!err)
2745 err = tg3_init_5401phy_dsp(tp);
2746 if (err)
2747 return err;
2748 }
2749 }
2750 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2751 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2752 /* 5701 {A0,B0} CRC bug workaround */
2753 tg3_writephy(tp, 0x15, 0x0a75);
2754 tg3_writephy(tp, 0x1c, 0x8c68);
2755 tg3_writephy(tp, 0x1c, 0x8d68);
2756 tg3_writephy(tp, 0x1c, 0x8c68);
2757 }
2758
2759 /* Clear pending interrupts... */
2760 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2761 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2762
2763 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2764 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
Michael Chan715116a2006-09-27 16:09:25 -07002765 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2767
2768 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2769 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2770 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2771 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2772 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2773 else
2774 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2775 }
2776
2777 current_link_up = 0;
2778 current_speed = SPEED_INVALID;
2779 current_duplex = DUPLEX_INVALID;
2780
2781 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2782 u32 val;
2783
2784 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2785 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2786 if (!(val & (1 << 10))) {
2787 val |= (1 << 10);
2788 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2789 goto relink;
2790 }
2791 }
2792
2793 bmsr = 0;
2794 for (i = 0; i < 100; i++) {
2795 tg3_readphy(tp, MII_BMSR, &bmsr);
2796 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2797 (bmsr & BMSR_LSTATUS))
2798 break;
2799 udelay(40);
2800 }
2801
2802 if (bmsr & BMSR_LSTATUS) {
2803 u32 aux_stat, bmcr;
2804
2805 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2806 for (i = 0; i < 2000; i++) {
2807 udelay(10);
2808 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2809 aux_stat)
2810 break;
2811 }
2812
2813 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2814 &current_speed,
2815 &current_duplex);
2816
2817 bmcr = 0;
2818 for (i = 0; i < 200; i++) {
2819 tg3_readphy(tp, MII_BMCR, &bmcr);
2820 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2821 continue;
2822 if (bmcr && bmcr != 0x7fff)
2823 break;
2824 udelay(10);
2825 }
2826
Matt Carlsonef167e22007-12-20 20:10:01 -08002827 lcl_adv = 0;
2828 rmt_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829
Matt Carlsonef167e22007-12-20 20:10:01 -08002830 tp->link_config.active_speed = current_speed;
2831 tp->link_config.active_duplex = current_duplex;
2832
2833 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2834 if ((bmcr & BMCR_ANENABLE) &&
2835 tg3_copper_is_advertising_all(tp,
2836 tp->link_config.advertising)) {
2837 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2838 &rmt_adv))
2839 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840 }
2841 } else {
2842 if (!(bmcr & BMCR_ANENABLE) &&
2843 tp->link_config.speed == current_speed &&
Matt Carlsonef167e22007-12-20 20:10:01 -08002844 tp->link_config.duplex == current_duplex &&
2845 tp->link_config.flowctrl ==
2846 tp->link_config.active_flowctrl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848 }
2849 }
2850
Matt Carlsonef167e22007-12-20 20:10:01 -08002851 if (current_link_up == 1 &&
2852 tp->link_config.active_duplex == DUPLEX_FULL)
2853 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 }
2855
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856relink:
Michael Chan6921d202005-12-13 21:15:53 -08002857 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 u32 tmp;
2859
2860 tg3_phy_copper_begin(tp);
2861
2862 tg3_readphy(tp, MII_BMSR, &tmp);
2863 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2864 (tmp & BMSR_LSTATUS))
2865 current_link_up = 1;
2866 }
2867
2868 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2869 if (current_link_up == 1) {
2870 if (tp->link_config.active_speed == SPEED_100 ||
2871 tp->link_config.active_speed == SPEED_10)
2872 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2873 else
2874 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2875 } else
2876 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2877
2878 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2879 if (tp->link_config.active_duplex == DUPLEX_HALF)
2880 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2881
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002883 if (current_link_up == 1 &&
2884 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002886 else
2887 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002888 }
2889
2890 /* ??? Without this setting Netgear GA302T PHY does not
2891 * ??? send/receive packets...
2892 */
2893 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2894 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2895 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2896 tw32_f(MAC_MI_MODE, tp->mi_mode);
2897 udelay(80);
2898 }
2899
2900 tw32_f(MAC_MODE, tp->mac_mode);
2901 udelay(40);
2902
2903 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2904 /* Polled via timer. */
2905 tw32_f(MAC_EVENT, 0);
2906 } else {
2907 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2908 }
2909 udelay(40);
2910
2911 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2912 current_link_up == 1 &&
2913 tp->link_config.active_speed == SPEED_1000 &&
2914 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2915 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2916 udelay(120);
2917 tw32_f(MAC_STATUS,
2918 (MAC_STATUS_SYNC_CHANGED |
2919 MAC_STATUS_CFG_CHANGED));
2920 udelay(40);
2921 tg3_write_mem(tp,
2922 NIC_SRAM_FIRMWARE_MBOX,
2923 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2924 }
2925
2926 if (current_link_up != netif_carrier_ok(tp->dev)) {
2927 if (current_link_up)
2928 netif_carrier_on(tp->dev);
2929 else
2930 netif_carrier_off(tp->dev);
2931 tg3_link_report(tp);
2932 }
2933
2934 return 0;
2935}
2936
2937struct tg3_fiber_aneginfo {
2938 int state;
2939#define ANEG_STATE_UNKNOWN 0
2940#define ANEG_STATE_AN_ENABLE 1
2941#define ANEG_STATE_RESTART_INIT 2
2942#define ANEG_STATE_RESTART 3
2943#define ANEG_STATE_DISABLE_LINK_OK 4
2944#define ANEG_STATE_ABILITY_DETECT_INIT 5
2945#define ANEG_STATE_ABILITY_DETECT 6
2946#define ANEG_STATE_ACK_DETECT_INIT 7
2947#define ANEG_STATE_ACK_DETECT 8
2948#define ANEG_STATE_COMPLETE_ACK_INIT 9
2949#define ANEG_STATE_COMPLETE_ACK 10
2950#define ANEG_STATE_IDLE_DETECT_INIT 11
2951#define ANEG_STATE_IDLE_DETECT 12
2952#define ANEG_STATE_LINK_OK 13
2953#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2954#define ANEG_STATE_NEXT_PAGE_WAIT 15
2955
2956 u32 flags;
2957#define MR_AN_ENABLE 0x00000001
2958#define MR_RESTART_AN 0x00000002
2959#define MR_AN_COMPLETE 0x00000004
2960#define MR_PAGE_RX 0x00000008
2961#define MR_NP_LOADED 0x00000010
2962#define MR_TOGGLE_TX 0x00000020
2963#define MR_LP_ADV_FULL_DUPLEX 0x00000040
2964#define MR_LP_ADV_HALF_DUPLEX 0x00000080
2965#define MR_LP_ADV_SYM_PAUSE 0x00000100
2966#define MR_LP_ADV_ASYM_PAUSE 0x00000200
2967#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2968#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2969#define MR_LP_ADV_NEXT_PAGE 0x00001000
2970#define MR_TOGGLE_RX 0x00002000
2971#define MR_NP_RX 0x00004000
2972
2973#define MR_LINK_OK 0x80000000
2974
2975 unsigned long link_time, cur_time;
2976
2977 u32 ability_match_cfg;
2978 int ability_match_count;
2979
2980 char ability_match, idle_match, ack_match;
2981
2982 u32 txconfig, rxconfig;
2983#define ANEG_CFG_NP 0x00000080
2984#define ANEG_CFG_ACK 0x00000040
2985#define ANEG_CFG_RF2 0x00000020
2986#define ANEG_CFG_RF1 0x00000010
2987#define ANEG_CFG_PS2 0x00000001
2988#define ANEG_CFG_PS1 0x00008000
2989#define ANEG_CFG_HD 0x00004000
2990#define ANEG_CFG_FD 0x00002000
2991#define ANEG_CFG_INVAL 0x00001f06
2992
2993};
2994#define ANEG_OK 0
2995#define ANEG_DONE 1
2996#define ANEG_TIMER_ENAB 2
2997#define ANEG_FAILED -1
2998
2999#define ANEG_STATE_SETTLE_TIME 10000
3000
3001static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3002 struct tg3_fiber_aneginfo *ap)
3003{
Matt Carlson5be73b42007-12-20 20:09:29 -08003004 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003005 unsigned long delta;
3006 u32 rx_cfg_reg;
3007 int ret;
3008
3009 if (ap->state == ANEG_STATE_UNKNOWN) {
3010 ap->rxconfig = 0;
3011 ap->link_time = 0;
3012 ap->cur_time = 0;
3013 ap->ability_match_cfg = 0;
3014 ap->ability_match_count = 0;
3015 ap->ability_match = 0;
3016 ap->idle_match = 0;
3017 ap->ack_match = 0;
3018 }
3019 ap->cur_time++;
3020
3021 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3022 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3023
3024 if (rx_cfg_reg != ap->ability_match_cfg) {
3025 ap->ability_match_cfg = rx_cfg_reg;
3026 ap->ability_match = 0;
3027 ap->ability_match_count = 0;
3028 } else {
3029 if (++ap->ability_match_count > 1) {
3030 ap->ability_match = 1;
3031 ap->ability_match_cfg = rx_cfg_reg;
3032 }
3033 }
3034 if (rx_cfg_reg & ANEG_CFG_ACK)
3035 ap->ack_match = 1;
3036 else
3037 ap->ack_match = 0;
3038
3039 ap->idle_match = 0;
3040 } else {
3041 ap->idle_match = 1;
3042 ap->ability_match_cfg = 0;
3043 ap->ability_match_count = 0;
3044 ap->ability_match = 0;
3045 ap->ack_match = 0;
3046
3047 rx_cfg_reg = 0;
3048 }
3049
3050 ap->rxconfig = rx_cfg_reg;
3051 ret = ANEG_OK;
3052
3053 switch(ap->state) {
3054 case ANEG_STATE_UNKNOWN:
3055 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3056 ap->state = ANEG_STATE_AN_ENABLE;
3057
3058 /* fallthru */
3059 case ANEG_STATE_AN_ENABLE:
3060 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3061 if (ap->flags & MR_AN_ENABLE) {
3062 ap->link_time = 0;
3063 ap->cur_time = 0;
3064 ap->ability_match_cfg = 0;
3065 ap->ability_match_count = 0;
3066 ap->ability_match = 0;
3067 ap->idle_match = 0;
3068 ap->ack_match = 0;
3069
3070 ap->state = ANEG_STATE_RESTART_INIT;
3071 } else {
3072 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3073 }
3074 break;
3075
3076 case ANEG_STATE_RESTART_INIT:
3077 ap->link_time = ap->cur_time;
3078 ap->flags &= ~(MR_NP_LOADED);
3079 ap->txconfig = 0;
3080 tw32(MAC_TX_AUTO_NEG, 0);
3081 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3082 tw32_f(MAC_MODE, tp->mac_mode);
3083 udelay(40);
3084
3085 ret = ANEG_TIMER_ENAB;
3086 ap->state = ANEG_STATE_RESTART;
3087
3088 /* fallthru */
3089 case ANEG_STATE_RESTART:
3090 delta = ap->cur_time - ap->link_time;
3091 if (delta > ANEG_STATE_SETTLE_TIME) {
3092 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3093 } else {
3094 ret = ANEG_TIMER_ENAB;
3095 }
3096 break;
3097
3098 case ANEG_STATE_DISABLE_LINK_OK:
3099 ret = ANEG_DONE;
3100 break;
3101
3102 case ANEG_STATE_ABILITY_DETECT_INIT:
3103 ap->flags &= ~(MR_TOGGLE_TX);
Matt Carlson5be73b42007-12-20 20:09:29 -08003104 ap->txconfig = ANEG_CFG_FD;
3105 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3106 if (flowctrl & ADVERTISE_1000XPAUSE)
3107 ap->txconfig |= ANEG_CFG_PS1;
3108 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3109 ap->txconfig |= ANEG_CFG_PS2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003110 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3111 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3112 tw32_f(MAC_MODE, tp->mac_mode);
3113 udelay(40);
3114
3115 ap->state = ANEG_STATE_ABILITY_DETECT;
3116 break;
3117
3118 case ANEG_STATE_ABILITY_DETECT:
3119 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3120 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3121 }
3122 break;
3123
3124 case ANEG_STATE_ACK_DETECT_INIT:
3125 ap->txconfig |= ANEG_CFG_ACK;
3126 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3127 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3128 tw32_f(MAC_MODE, tp->mac_mode);
3129 udelay(40);
3130
3131 ap->state = ANEG_STATE_ACK_DETECT;
3132
3133 /* fallthru */
3134 case ANEG_STATE_ACK_DETECT:
3135 if (ap->ack_match != 0) {
3136 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3137 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3138 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3139 } else {
3140 ap->state = ANEG_STATE_AN_ENABLE;
3141 }
3142 } else if (ap->ability_match != 0 &&
3143 ap->rxconfig == 0) {
3144 ap->state = ANEG_STATE_AN_ENABLE;
3145 }
3146 break;
3147
3148 case ANEG_STATE_COMPLETE_ACK_INIT:
3149 if (ap->rxconfig & ANEG_CFG_INVAL) {
3150 ret = ANEG_FAILED;
3151 break;
3152 }
3153 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3154 MR_LP_ADV_HALF_DUPLEX |
3155 MR_LP_ADV_SYM_PAUSE |
3156 MR_LP_ADV_ASYM_PAUSE |
3157 MR_LP_ADV_REMOTE_FAULT1 |
3158 MR_LP_ADV_REMOTE_FAULT2 |
3159 MR_LP_ADV_NEXT_PAGE |
3160 MR_TOGGLE_RX |
3161 MR_NP_RX);
3162 if (ap->rxconfig & ANEG_CFG_FD)
3163 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3164 if (ap->rxconfig & ANEG_CFG_HD)
3165 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3166 if (ap->rxconfig & ANEG_CFG_PS1)
3167 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3168 if (ap->rxconfig & ANEG_CFG_PS2)
3169 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3170 if (ap->rxconfig & ANEG_CFG_RF1)
3171 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3172 if (ap->rxconfig & ANEG_CFG_RF2)
3173 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3174 if (ap->rxconfig & ANEG_CFG_NP)
3175 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3176
3177 ap->link_time = ap->cur_time;
3178
3179 ap->flags ^= (MR_TOGGLE_TX);
3180 if (ap->rxconfig & 0x0008)
3181 ap->flags |= MR_TOGGLE_RX;
3182 if (ap->rxconfig & ANEG_CFG_NP)
3183 ap->flags |= MR_NP_RX;
3184 ap->flags |= MR_PAGE_RX;
3185
3186 ap->state = ANEG_STATE_COMPLETE_ACK;
3187 ret = ANEG_TIMER_ENAB;
3188 break;
3189
3190 case ANEG_STATE_COMPLETE_ACK:
3191 if (ap->ability_match != 0 &&
3192 ap->rxconfig == 0) {
3193 ap->state = ANEG_STATE_AN_ENABLE;
3194 break;
3195 }
3196 delta = ap->cur_time - ap->link_time;
3197 if (delta > ANEG_STATE_SETTLE_TIME) {
3198 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3199 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3200 } else {
3201 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3202 !(ap->flags & MR_NP_RX)) {
3203 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3204 } else {
3205 ret = ANEG_FAILED;
3206 }
3207 }
3208 }
3209 break;
3210
3211 case ANEG_STATE_IDLE_DETECT_INIT:
3212 ap->link_time = ap->cur_time;
3213 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3214 tw32_f(MAC_MODE, tp->mac_mode);
3215 udelay(40);
3216
3217 ap->state = ANEG_STATE_IDLE_DETECT;
3218 ret = ANEG_TIMER_ENAB;
3219 break;
3220
3221 case ANEG_STATE_IDLE_DETECT:
3222 if (ap->ability_match != 0 &&
3223 ap->rxconfig == 0) {
3224 ap->state = ANEG_STATE_AN_ENABLE;
3225 break;
3226 }
3227 delta = ap->cur_time - ap->link_time;
3228 if (delta > ANEG_STATE_SETTLE_TIME) {
3229 /* XXX another gem from the Broadcom driver :( */
3230 ap->state = ANEG_STATE_LINK_OK;
3231 }
3232 break;
3233
3234 case ANEG_STATE_LINK_OK:
3235 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3236 ret = ANEG_DONE;
3237 break;
3238
3239 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3240 /* ??? unimplemented */
3241 break;
3242
3243 case ANEG_STATE_NEXT_PAGE_WAIT:
3244 /* ??? unimplemented */
3245 break;
3246
3247 default:
3248 ret = ANEG_FAILED;
3249 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07003250 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003251
3252 return ret;
3253}
3254
Matt Carlson5be73b42007-12-20 20:09:29 -08003255static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003256{
3257 int res = 0;
3258 struct tg3_fiber_aneginfo aninfo;
3259 int status = ANEG_FAILED;
3260 unsigned int tick;
3261 u32 tmp;
3262
3263 tw32_f(MAC_TX_AUTO_NEG, 0);
3264
3265 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3266 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3267 udelay(40);
3268
3269 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3270 udelay(40);
3271
3272 memset(&aninfo, 0, sizeof(aninfo));
3273 aninfo.flags |= MR_AN_ENABLE;
3274 aninfo.state = ANEG_STATE_UNKNOWN;
3275 aninfo.cur_time = 0;
3276 tick = 0;
3277 while (++tick < 195000) {
3278 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3279 if (status == ANEG_DONE || status == ANEG_FAILED)
3280 break;
3281
3282 udelay(1);
3283 }
3284
3285 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3286 tw32_f(MAC_MODE, tp->mac_mode);
3287 udelay(40);
3288
Matt Carlson5be73b42007-12-20 20:09:29 -08003289 *txflags = aninfo.txconfig;
3290 *rxflags = aninfo.flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003291
3292 if (status == ANEG_DONE &&
3293 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3294 MR_LP_ADV_FULL_DUPLEX)))
3295 res = 1;
3296
3297 return res;
3298}
3299
3300static void tg3_init_bcm8002(struct tg3 *tp)
3301{
3302 u32 mac_status = tr32(MAC_STATUS);
3303 int i;
3304
3305 /* Reset when initting first time or we have a link. */
3306 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3307 !(mac_status & MAC_STATUS_PCS_SYNCED))
3308 return;
3309
3310 /* Set PLL lock range. */
3311 tg3_writephy(tp, 0x16, 0x8007);
3312
3313 /* SW reset */
3314 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3315
3316 /* Wait for reset to complete. */
3317 /* XXX schedule_timeout() ... */
3318 for (i = 0; i < 500; i++)
3319 udelay(10);
3320
3321 /* Config mode; select PMA/Ch 1 regs. */
3322 tg3_writephy(tp, 0x10, 0x8411);
3323
3324 /* Enable auto-lock and comdet, select txclk for tx. */
3325 tg3_writephy(tp, 0x11, 0x0a10);
3326
3327 tg3_writephy(tp, 0x18, 0x00a0);
3328 tg3_writephy(tp, 0x16, 0x41ff);
3329
3330 /* Assert and deassert POR. */
3331 tg3_writephy(tp, 0x13, 0x0400);
3332 udelay(40);
3333 tg3_writephy(tp, 0x13, 0x0000);
3334
3335 tg3_writephy(tp, 0x11, 0x0a50);
3336 udelay(40);
3337 tg3_writephy(tp, 0x11, 0x0a10);
3338
3339 /* Wait for signal to stabilize */
3340 /* XXX schedule_timeout() ... */
3341 for (i = 0; i < 15000; i++)
3342 udelay(10);
3343
3344 /* Deselect the channel register so we can read the PHYID
3345 * later.
3346 */
3347 tg3_writephy(tp, 0x10, 0x8011);
3348}
3349
3350static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3351{
Matt Carlson82cd3d12007-12-20 20:09:00 -08003352 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003353 u32 sg_dig_ctrl, sg_dig_status;
3354 u32 serdes_cfg, expected_sg_dig_ctrl;
3355 int workaround, port_a;
3356 int current_link_up;
3357
3358 serdes_cfg = 0;
3359 expected_sg_dig_ctrl = 0;
3360 workaround = 0;
3361 port_a = 1;
3362 current_link_up = 0;
3363
3364 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3365 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3366 workaround = 1;
3367 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3368 port_a = 0;
3369
3370 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3371 /* preserve bits 20-23 for voltage regulator */
3372 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3373 }
3374
3375 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3376
3377 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003378 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379 if (workaround) {
3380 u32 val = serdes_cfg;
3381
3382 if (port_a)
3383 val |= 0xc010000;
3384 else
3385 val |= 0x4010000;
3386 tw32_f(MAC_SERDES_CFG, val);
3387 }
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003388
3389 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003390 }
3391 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3392 tg3_setup_flow_control(tp, 0, 0);
3393 current_link_up = 1;
3394 }
3395 goto out;
3396 }
3397
3398 /* Want auto-negotiation. */
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003399 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003400
Matt Carlson82cd3d12007-12-20 20:09:00 -08003401 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3402 if (flowctrl & ADVERTISE_1000XPAUSE)
3403 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3404 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3405 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003406
3407 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003408 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3409 tp->serdes_counter &&
3410 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3411 MAC_STATUS_RCVD_CFG)) ==
3412 MAC_STATUS_PCS_SYNCED)) {
3413 tp->serdes_counter--;
3414 current_link_up = 1;
3415 goto out;
3416 }
3417restart_autoneg:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418 if (workaround)
3419 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003420 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421 udelay(5);
3422 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3423
Michael Chan3d3ebe72006-09-27 15:59:15 -07003424 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3425 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003426 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3427 MAC_STATUS_SIGNAL_DET)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003428 sg_dig_status = tr32(SG_DIG_STATUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429 mac_status = tr32(MAC_STATUS);
3430
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003431 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003432 (mac_status & MAC_STATUS_PCS_SYNCED)) {
Matt Carlson82cd3d12007-12-20 20:09:00 -08003433 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003434
Matt Carlson82cd3d12007-12-20 20:09:00 -08003435 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3436 local_adv |= ADVERTISE_1000XPAUSE;
3437 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3438 local_adv |= ADVERTISE_1000XPSE_ASYM;
3439
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003440 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08003441 remote_adv |= LPA_1000XPAUSE;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003442 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08003443 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003444
3445 tg3_setup_flow_control(tp, local_adv, remote_adv);
3446 current_link_up = 1;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003447 tp->serdes_counter = 0;
3448 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003449 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003450 if (tp->serdes_counter)
3451 tp->serdes_counter--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452 else {
3453 if (workaround) {
3454 u32 val = serdes_cfg;
3455
3456 if (port_a)
3457 val |= 0xc010000;
3458 else
3459 val |= 0x4010000;
3460
3461 tw32_f(MAC_SERDES_CFG, val);
3462 }
3463
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003464 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465 udelay(40);
3466
3467 /* Link parallel detection - link is up */
3468 /* only if we have PCS_SYNC and not */
3469 /* receiving config code words */
3470 mac_status = tr32(MAC_STATUS);
3471 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3472 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3473 tg3_setup_flow_control(tp, 0, 0);
3474 current_link_up = 1;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003475 tp->tg3_flags2 |=
3476 TG3_FLG2_PARALLEL_DETECT;
3477 tp->serdes_counter =
3478 SERDES_PARALLEL_DET_TIMEOUT;
3479 } else
3480 goto restart_autoneg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003481 }
3482 }
Michael Chan3d3ebe72006-09-27 15:59:15 -07003483 } else {
3484 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3485 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486 }
3487
3488out:
3489 return current_link_up;
3490}
3491
3492static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3493{
3494 int current_link_up = 0;
3495
Michael Chan5cf64b82007-05-05 12:11:21 -07003496 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498
3499 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
Matt Carlson5be73b42007-12-20 20:09:29 -08003500 u32 txflags, rxflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003501 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003502
Matt Carlson5be73b42007-12-20 20:09:29 -08003503 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3504 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505
Matt Carlson5be73b42007-12-20 20:09:29 -08003506 if (txflags & ANEG_CFG_PS1)
3507 local_adv |= ADVERTISE_1000XPAUSE;
3508 if (txflags & ANEG_CFG_PS2)
3509 local_adv |= ADVERTISE_1000XPSE_ASYM;
3510
3511 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3512 remote_adv |= LPA_1000XPAUSE;
3513 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3514 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003515
3516 tg3_setup_flow_control(tp, local_adv, remote_adv);
3517
Linus Torvalds1da177e2005-04-16 15:20:36 -07003518 current_link_up = 1;
3519 }
3520 for (i = 0; i < 30; i++) {
3521 udelay(20);
3522 tw32_f(MAC_STATUS,
3523 (MAC_STATUS_SYNC_CHANGED |
3524 MAC_STATUS_CFG_CHANGED));
3525 udelay(40);
3526 if ((tr32(MAC_STATUS) &
3527 (MAC_STATUS_SYNC_CHANGED |
3528 MAC_STATUS_CFG_CHANGED)) == 0)
3529 break;
3530 }
3531
3532 mac_status = tr32(MAC_STATUS);
3533 if (current_link_up == 0 &&
3534 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3535 !(mac_status & MAC_STATUS_RCVD_CFG))
3536 current_link_up = 1;
3537 } else {
Matt Carlson5be73b42007-12-20 20:09:29 -08003538 tg3_setup_flow_control(tp, 0, 0);
3539
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540 /* Forcing 1000FD link up. */
3541 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003542
3543 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3544 udelay(40);
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07003545
3546 tw32_f(MAC_MODE, tp->mac_mode);
3547 udelay(40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003548 }
3549
3550out:
3551 return current_link_up;
3552}
3553
3554static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3555{
3556 u32 orig_pause_cfg;
3557 u16 orig_active_speed;
3558 u8 orig_active_duplex;
3559 u32 mac_status;
3560 int current_link_up;
3561 int i;
3562
Matt Carlson8d018622007-12-20 20:05:44 -08003563 orig_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003564 orig_active_speed = tp->link_config.active_speed;
3565 orig_active_duplex = tp->link_config.active_duplex;
3566
3567 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3568 netif_carrier_ok(tp->dev) &&
3569 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3570 mac_status = tr32(MAC_STATUS);
3571 mac_status &= (MAC_STATUS_PCS_SYNCED |
3572 MAC_STATUS_SIGNAL_DET |
3573 MAC_STATUS_CFG_CHANGED |
3574 MAC_STATUS_RCVD_CFG);
3575 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3576 MAC_STATUS_SIGNAL_DET)) {
3577 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3578 MAC_STATUS_CFG_CHANGED));
3579 return 0;
3580 }
3581 }
3582
3583 tw32_f(MAC_TX_AUTO_NEG, 0);
3584
3585 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3586 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3587 tw32_f(MAC_MODE, tp->mac_mode);
3588 udelay(40);
3589
3590 if (tp->phy_id == PHY_ID_BCM8002)
3591 tg3_init_bcm8002(tp);
3592
3593 /* Enable link change event even when serdes polling. */
3594 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3595 udelay(40);
3596
3597 current_link_up = 0;
3598 mac_status = tr32(MAC_STATUS);
3599
3600 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3601 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3602 else
3603 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3604
Linus Torvalds1da177e2005-04-16 15:20:36 -07003605 tp->hw_status->status =
3606 (SD_STATUS_UPDATED |
3607 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3608
3609 for (i = 0; i < 100; i++) {
3610 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3611 MAC_STATUS_CFG_CHANGED));
3612 udelay(5);
3613 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
Michael Chan3d3ebe72006-09-27 15:59:15 -07003614 MAC_STATUS_CFG_CHANGED |
3615 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616 break;
3617 }
3618
3619 mac_status = tr32(MAC_STATUS);
3620 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3621 current_link_up = 0;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003622 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3623 tp->serdes_counter == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003624 tw32_f(MAC_MODE, (tp->mac_mode |
3625 MAC_MODE_SEND_CONFIGS));
3626 udelay(1);
3627 tw32_f(MAC_MODE, tp->mac_mode);
3628 }
3629 }
3630
3631 if (current_link_up == 1) {
3632 tp->link_config.active_speed = SPEED_1000;
3633 tp->link_config.active_duplex = DUPLEX_FULL;
3634 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3635 LED_CTRL_LNKLED_OVERRIDE |
3636 LED_CTRL_1000MBPS_ON));
3637 } else {
3638 tp->link_config.active_speed = SPEED_INVALID;
3639 tp->link_config.active_duplex = DUPLEX_INVALID;
3640 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3641 LED_CTRL_LNKLED_OVERRIDE |
3642 LED_CTRL_TRAFFIC_OVERRIDE));
3643 }
3644
3645 if (current_link_up != netif_carrier_ok(tp->dev)) {
3646 if (current_link_up)
3647 netif_carrier_on(tp->dev);
3648 else
3649 netif_carrier_off(tp->dev);
3650 tg3_link_report(tp);
3651 } else {
Matt Carlson8d018622007-12-20 20:05:44 -08003652 u32 now_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003653 if (orig_pause_cfg != now_pause_cfg ||
3654 orig_active_speed != tp->link_config.active_speed ||
3655 orig_active_duplex != tp->link_config.active_duplex)
3656 tg3_link_report(tp);
3657 }
3658
3659 return 0;
3660}
3661
Michael Chan747e8f82005-07-25 12:33:22 -07003662static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3663{
3664 int current_link_up, err = 0;
3665 u32 bmsr, bmcr;
3666 u16 current_speed;
3667 u8 current_duplex;
Matt Carlsonef167e22007-12-20 20:10:01 -08003668 u32 local_adv, remote_adv;
Michael Chan747e8f82005-07-25 12:33:22 -07003669
3670 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3671 tw32_f(MAC_MODE, tp->mac_mode);
3672 udelay(40);
3673
3674 tw32(MAC_EVENT, 0);
3675
3676 tw32_f(MAC_STATUS,
3677 (MAC_STATUS_SYNC_CHANGED |
3678 MAC_STATUS_CFG_CHANGED |
3679 MAC_STATUS_MI_COMPLETION |
3680 MAC_STATUS_LNKSTATE_CHANGED));
3681 udelay(40);
3682
3683 if (force_reset)
3684 tg3_phy_reset(tp);
3685
3686 current_link_up = 0;
3687 current_speed = SPEED_INVALID;
3688 current_duplex = DUPLEX_INVALID;
3689
3690 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3691 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08003692 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3693 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3694 bmsr |= BMSR_LSTATUS;
3695 else
3696 bmsr &= ~BMSR_LSTATUS;
3697 }
Michael Chan747e8f82005-07-25 12:33:22 -07003698
3699 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3700
3701 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
Matt Carlson2bd3ed02008-06-09 15:39:55 -07003702 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
Michael Chan747e8f82005-07-25 12:33:22 -07003703 /* do nothing, just check for link up at the end */
3704 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3705 u32 adv, new_adv;
3706
3707 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3708 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3709 ADVERTISE_1000XPAUSE |
3710 ADVERTISE_1000XPSE_ASYM |
3711 ADVERTISE_SLCT);
3712
Matt Carlsonba4d07a2007-12-20 20:08:00 -08003713 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
Michael Chan747e8f82005-07-25 12:33:22 -07003714
3715 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3716 new_adv |= ADVERTISE_1000XHALF;
3717 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3718 new_adv |= ADVERTISE_1000XFULL;
3719
3720 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3721 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3722 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3723 tg3_writephy(tp, MII_BMCR, bmcr);
3724
3725 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
Michael Chan3d3ebe72006-09-27 15:59:15 -07003726 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
Michael Chan747e8f82005-07-25 12:33:22 -07003727 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3728
3729 return err;
3730 }
3731 } else {
3732 u32 new_bmcr;
3733
3734 bmcr &= ~BMCR_SPEED1000;
3735 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3736
3737 if (tp->link_config.duplex == DUPLEX_FULL)
3738 new_bmcr |= BMCR_FULLDPLX;
3739
3740 if (new_bmcr != bmcr) {
3741 /* BMCR_SPEED1000 is a reserved bit that needs
3742 * to be set on write.
3743 */
3744 new_bmcr |= BMCR_SPEED1000;
3745
3746 /* Force a linkdown */
3747 if (netif_carrier_ok(tp->dev)) {
3748 u32 adv;
3749
3750 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3751 adv &= ~(ADVERTISE_1000XFULL |
3752 ADVERTISE_1000XHALF |
3753 ADVERTISE_SLCT);
3754 tg3_writephy(tp, MII_ADVERTISE, adv);
3755 tg3_writephy(tp, MII_BMCR, bmcr |
3756 BMCR_ANRESTART |
3757 BMCR_ANENABLE);
3758 udelay(10);
3759 netif_carrier_off(tp->dev);
3760 }
3761 tg3_writephy(tp, MII_BMCR, new_bmcr);
3762 bmcr = new_bmcr;
3763 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3764 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08003765 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3766 ASIC_REV_5714) {
3767 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3768 bmsr |= BMSR_LSTATUS;
3769 else
3770 bmsr &= ~BMSR_LSTATUS;
3771 }
Michael Chan747e8f82005-07-25 12:33:22 -07003772 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3773 }
3774 }
3775
3776 if (bmsr & BMSR_LSTATUS) {
3777 current_speed = SPEED_1000;
3778 current_link_up = 1;
3779 if (bmcr & BMCR_FULLDPLX)
3780 current_duplex = DUPLEX_FULL;
3781 else
3782 current_duplex = DUPLEX_HALF;
3783
Matt Carlsonef167e22007-12-20 20:10:01 -08003784 local_adv = 0;
3785 remote_adv = 0;
3786
Michael Chan747e8f82005-07-25 12:33:22 -07003787 if (bmcr & BMCR_ANENABLE) {
Matt Carlsonef167e22007-12-20 20:10:01 -08003788 u32 common;
Michael Chan747e8f82005-07-25 12:33:22 -07003789
3790 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3791 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3792 common = local_adv & remote_adv;
3793 if (common & (ADVERTISE_1000XHALF |
3794 ADVERTISE_1000XFULL)) {
3795 if (common & ADVERTISE_1000XFULL)
3796 current_duplex = DUPLEX_FULL;
3797 else
3798 current_duplex = DUPLEX_HALF;
Michael Chan747e8f82005-07-25 12:33:22 -07003799 }
3800 else
3801 current_link_up = 0;
3802 }
3803 }
3804
Matt Carlsonef167e22007-12-20 20:10:01 -08003805 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3806 tg3_setup_flow_control(tp, local_adv, remote_adv);
3807
Michael Chan747e8f82005-07-25 12:33:22 -07003808 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3809 if (tp->link_config.active_duplex == DUPLEX_HALF)
3810 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3811
3812 tw32_f(MAC_MODE, tp->mac_mode);
3813 udelay(40);
3814
3815 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3816
3817 tp->link_config.active_speed = current_speed;
3818 tp->link_config.active_duplex = current_duplex;
3819
3820 if (current_link_up != netif_carrier_ok(tp->dev)) {
3821 if (current_link_up)
3822 netif_carrier_on(tp->dev);
3823 else {
3824 netif_carrier_off(tp->dev);
3825 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3826 }
3827 tg3_link_report(tp);
3828 }
3829 return err;
3830}
3831
3832static void tg3_serdes_parallel_detect(struct tg3 *tp)
3833{
Michael Chan3d3ebe72006-09-27 15:59:15 -07003834 if (tp->serdes_counter) {
Michael Chan747e8f82005-07-25 12:33:22 -07003835 /* Give autoneg time to complete. */
Michael Chan3d3ebe72006-09-27 15:59:15 -07003836 tp->serdes_counter--;
Michael Chan747e8f82005-07-25 12:33:22 -07003837 return;
3838 }
3839 if (!netif_carrier_ok(tp->dev) &&
3840 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3841 u32 bmcr;
3842
3843 tg3_readphy(tp, MII_BMCR, &bmcr);
3844 if (bmcr & BMCR_ANENABLE) {
3845 u32 phy1, phy2;
3846
3847 /* Select shadow register 0x1f */
3848 tg3_writephy(tp, 0x1c, 0x7c00);
3849 tg3_readphy(tp, 0x1c, &phy1);
3850
3851 /* Select expansion interrupt status register */
3852 tg3_writephy(tp, 0x17, 0x0f01);
3853 tg3_readphy(tp, 0x15, &phy2);
3854 tg3_readphy(tp, 0x15, &phy2);
3855
3856 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3857 /* We have signal detect and not receiving
3858 * config code words, link is up by parallel
3859 * detection.
3860 */
3861
3862 bmcr &= ~BMCR_ANENABLE;
3863 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3864 tg3_writephy(tp, MII_BMCR, bmcr);
3865 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3866 }
3867 }
3868 }
3869 else if (netif_carrier_ok(tp->dev) &&
3870 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3871 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3872 u32 phy2;
3873
3874 /* Select expansion interrupt status register */
3875 tg3_writephy(tp, 0x17, 0x0f01);
3876 tg3_readphy(tp, 0x15, &phy2);
3877 if (phy2 & 0x20) {
3878 u32 bmcr;
3879
3880 /* Config code words received, turn on autoneg. */
3881 tg3_readphy(tp, MII_BMCR, &bmcr);
3882 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3883
3884 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3885
3886 }
3887 }
3888}
3889
Linus Torvalds1da177e2005-04-16 15:20:36 -07003890static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3891{
3892 int err;
3893
3894 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3895 err = tg3_setup_fiber_phy(tp, force_reset);
Michael Chan747e8f82005-07-25 12:33:22 -07003896 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3897 err = tg3_setup_fiber_mii_phy(tp, force_reset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003898 } else {
3899 err = tg3_setup_copper_phy(tp, force_reset);
3900 }
3901
Matt Carlsonbcb37f62008-11-03 16:52:09 -08003902 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
Matt Carlsonaa6c91f2007-11-12 21:18:04 -08003903 u32 val, scale;
3904
3905 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3906 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3907 scale = 65;
3908 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3909 scale = 6;
3910 else
3911 scale = 12;
3912
3913 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3914 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3915 tw32(GRC_MISC_CFG, val);
3916 }
3917
Linus Torvalds1da177e2005-04-16 15:20:36 -07003918 if (tp->link_config.active_speed == SPEED_1000 &&
3919 tp->link_config.active_duplex == DUPLEX_HALF)
3920 tw32(MAC_TX_LENGTHS,
3921 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3922 (6 << TX_LENGTHS_IPG_SHIFT) |
3923 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3924 else
3925 tw32(MAC_TX_LENGTHS,
3926 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3927 (6 << TX_LENGTHS_IPG_SHIFT) |
3928 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3929
3930 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3931 if (netif_carrier_ok(tp->dev)) {
3932 tw32(HOSTCC_STAT_COAL_TICKS,
David S. Miller15f98502005-05-18 22:49:26 -07003933 tp->coal.stats_block_coalesce_usecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003934 } else {
3935 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3936 }
3937 }
3938
Matt Carlson8ed5d972007-05-07 00:25:49 -07003939 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3940 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3941 if (!netif_carrier_ok(tp->dev))
3942 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3943 tp->pwrmgmt_thresh;
3944 else
3945 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3946 tw32(PCIE_PWR_MGMT_THRESH, val);
3947 }
3948
Linus Torvalds1da177e2005-04-16 15:20:36 -07003949 return err;
3950}
3951
Michael Chandf3e6542006-05-26 17:48:07 -07003952/* This is called whenever we suspect that the system chipset is re-
3953 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3954 * is bogus tx completions. We try to recover by setting the
3955 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3956 * in the workqueue.
3957 */
3958static void tg3_tx_recover(struct tg3 *tp)
3959{
3960 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3961 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3962
3963 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3964 "mapped I/O cycles to the network device, attempting to "
3965 "recover. Please report the problem to the driver maintainer "
3966 "and include system chipset information.\n", tp->dev->name);
3967
3968 spin_lock(&tp->lock);
Michael Chandf3e6542006-05-26 17:48:07 -07003969 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
Michael Chandf3e6542006-05-26 17:48:07 -07003970 spin_unlock(&tp->lock);
3971}
3972
Michael Chan1b2a7202006-08-07 21:46:02 -07003973static inline u32 tg3_tx_avail(struct tg3 *tp)
3974{
3975 smp_mb();
3976 return (tp->tx_pending -
3977 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3978}
3979
Linus Torvalds1da177e2005-04-16 15:20:36 -07003980/* Tigon3 never reports partial packet sends. So we do not
3981 * need special logic to handle SKBs that have not had all
3982 * of their frags sent yet, like SunGEM does.
3983 */
3984static void tg3_tx(struct tg3 *tp)
3985{
3986 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3987 u32 sw_idx = tp->tx_cons;
3988
3989 while (sw_idx != hw_idx) {
3990 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3991 struct sk_buff *skb = ri->skb;
Michael Chandf3e6542006-05-26 17:48:07 -07003992 int i, tx_bug = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003993
Michael Chandf3e6542006-05-26 17:48:07 -07003994 if (unlikely(skb == NULL)) {
3995 tg3_tx_recover(tp);
3996 return;
3997 }
3998
David S. Miller90079ce2008-09-11 04:52:51 -07003999 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004000
4001 ri->skb = NULL;
4002
4003 sw_idx = NEXT_TX(sw_idx);
4004
4005 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004006 ri = &tp->tx_buffers[sw_idx];
Michael Chandf3e6542006-05-26 17:48:07 -07004007 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4008 tx_bug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004009 sw_idx = NEXT_TX(sw_idx);
4010 }
4011
David S. Millerf47c11e2005-06-24 20:18:35 -07004012 dev_kfree_skb(skb);
Michael Chandf3e6542006-05-26 17:48:07 -07004013
4014 if (unlikely(tx_bug)) {
4015 tg3_tx_recover(tp);
4016 return;
4017 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004018 }
4019
4020 tp->tx_cons = sw_idx;
4021
Michael Chan1b2a7202006-08-07 21:46:02 -07004022 /* Need to make the tx_cons update visible to tg3_start_xmit()
4023 * before checking for netif_queue_stopped(). Without the
4024 * memory barrier, there is a small possibility that tg3_start_xmit()
4025 * will miss it and cause the queue to be stopped forever.
4026 */
4027 smp_mb();
4028
4029 if (unlikely(netif_queue_stopped(tp->dev) &&
Ranjit Manomohan42952232006-10-18 20:54:26 -07004030 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
Michael Chan1b2a7202006-08-07 21:46:02 -07004031 netif_tx_lock(tp->dev);
Michael Chan51b91462005-09-01 17:41:28 -07004032 if (netif_queue_stopped(tp->dev) &&
Ranjit Manomohan42952232006-10-18 20:54:26 -07004033 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
Michael Chan51b91462005-09-01 17:41:28 -07004034 netif_wake_queue(tp->dev);
Michael Chan1b2a7202006-08-07 21:46:02 -07004035 netif_tx_unlock(tp->dev);
Michael Chan51b91462005-09-01 17:41:28 -07004036 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004037}
4038
4039/* Returns size of skb allocated or < 0 on error.
4040 *
4041 * We only need to fill in the address because the other members
4042 * of the RX descriptor are invariant, see tg3_init_rings.
4043 *
4044 * Note the purposeful assymetry of cpu vs. chip accesses. For
4045 * posting buffers we only dirty the first cache line of the RX
4046 * descriptor (containing the address). Whereas for the RX status
4047 * buffers the cpu only reads the last cacheline of the RX descriptor
4048 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4049 */
4050static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
4051 int src_idx, u32 dest_idx_unmasked)
4052{
4053 struct tg3_rx_buffer_desc *desc;
4054 struct ring_info *map, *src_map;
4055 struct sk_buff *skb;
4056 dma_addr_t mapping;
4057 int skb_size, dest_idx;
4058
4059 src_map = NULL;
4060 switch (opaque_key) {
4061 case RXD_OPAQUE_RING_STD:
4062 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4063 desc = &tp->rx_std[dest_idx];
4064 map = &tp->rx_std_buffers[dest_idx];
4065 if (src_idx >= 0)
4066 src_map = &tp->rx_std_buffers[src_idx];
Michael Chan7e72aad2005-07-25 12:31:17 -07004067 skb_size = tp->rx_pkt_buf_sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004068 break;
4069
4070 case RXD_OPAQUE_RING_JUMBO:
4071 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4072 desc = &tp->rx_jumbo[dest_idx];
4073 map = &tp->rx_jumbo_buffers[dest_idx];
4074 if (src_idx >= 0)
4075 src_map = &tp->rx_jumbo_buffers[src_idx];
4076 skb_size = RX_JUMBO_PKT_BUF_SZ;
4077 break;
4078
4079 default:
4080 return -EINVAL;
Stephen Hemminger855e1112008-04-16 16:37:28 -07004081 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004082
4083 /* Do not overwrite any of the map or rp information
4084 * until we are sure we can commit to a new buffer.
4085 *
4086 * Callers depend upon this behavior and assume that
4087 * we leave everything unchanged if we fail.
4088 */
David S. Millera20e9c62006-07-31 22:38:16 -07004089 skb = netdev_alloc_skb(tp->dev, skb_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004090 if (skb == NULL)
4091 return -ENOMEM;
4092
Linus Torvalds1da177e2005-04-16 15:20:36 -07004093 skb_reserve(skb, tp->rx_offset);
4094
4095 mapping = pci_map_single(tp->pdev, skb->data,
4096 skb_size - tp->rx_offset,
4097 PCI_DMA_FROMDEVICE);
4098
4099 map->skb = skb;
4100 pci_unmap_addr_set(map, mapping, mapping);
4101
4102 if (src_map != NULL)
4103 src_map->skb = NULL;
4104
4105 desc->addr_hi = ((u64)mapping >> 32);
4106 desc->addr_lo = ((u64)mapping & 0xffffffff);
4107
4108 return skb_size;
4109}
4110
4111/* We only need to move over in the address because the other
4112 * members of the RX descriptor are invariant. See notes above
4113 * tg3_alloc_rx_skb for full details.
4114 */
4115static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
4116 int src_idx, u32 dest_idx_unmasked)
4117{
4118 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4119 struct ring_info *src_map, *dest_map;
4120 int dest_idx;
4121
4122 switch (opaque_key) {
4123 case RXD_OPAQUE_RING_STD:
4124 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4125 dest_desc = &tp->rx_std[dest_idx];
4126 dest_map = &tp->rx_std_buffers[dest_idx];
4127 src_desc = &tp->rx_std[src_idx];
4128 src_map = &tp->rx_std_buffers[src_idx];
4129 break;
4130
4131 case RXD_OPAQUE_RING_JUMBO:
4132 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4133 dest_desc = &tp->rx_jumbo[dest_idx];
4134 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4135 src_desc = &tp->rx_jumbo[src_idx];
4136 src_map = &tp->rx_jumbo_buffers[src_idx];
4137 break;
4138
4139 default:
4140 return;
Stephen Hemminger855e1112008-04-16 16:37:28 -07004141 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004142
4143 dest_map->skb = src_map->skb;
4144 pci_unmap_addr_set(dest_map, mapping,
4145 pci_unmap_addr(src_map, mapping));
4146 dest_desc->addr_hi = src_desc->addr_hi;
4147 dest_desc->addr_lo = src_desc->addr_lo;
4148
4149 src_map->skb = NULL;
4150}
4151
4152#if TG3_VLAN_TAG_USED
4153static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4154{
4155 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4156}
4157#endif
4158
4159/* The RX ring scheme is composed of multiple rings which post fresh
4160 * buffers to the chip, and one special ring the chip uses to report
4161 * status back to the host.
4162 *
4163 * The special ring reports the status of received packets to the
4164 * host. The chip does not write into the original descriptor the
4165 * RX buffer was obtained from. The chip simply takes the original
4166 * descriptor as provided by the host, updates the status and length
4167 * field, then writes this into the next status ring entry.
4168 *
4169 * Each ring the host uses to post buffers to the chip is described
4170 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4171 * it is first placed into the on-chip ram. When the packet's length
4172 * is known, it walks down the TG3_BDINFO entries to select the ring.
4173 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4174 * which is within the range of the new packet's length is chosen.
4175 *
4176 * The "separate ring for rx status" scheme may sound queer, but it makes
4177 * sense from a cache coherency perspective. If only the host writes
4178 * to the buffer post rings, and only the chip writes to the rx status
4179 * rings, then cache lines never move beyond shared-modified state.
4180 * If both the host and chip were to write into the same ring, cache line
4181 * eviction could occur since both entities want it in an exclusive state.
4182 */
4183static int tg3_rx(struct tg3 *tp, int budget)
4184{
Michael Chanf92905d2006-06-29 20:14:29 -07004185 u32 work_mask, rx_std_posted = 0;
Michael Chan483ba502005-04-25 15:14:03 -07004186 u32 sw_idx = tp->rx_rcb_ptr;
4187 u16 hw_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004188 int received;
4189
4190 hw_idx = tp->hw_status->idx[0].rx_producer;
4191 /*
4192 * We need to order the read of hw_idx and the read of
4193 * the opaque cookie.
4194 */
4195 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004196 work_mask = 0;
4197 received = 0;
4198 while (sw_idx != hw_idx && budget > 0) {
4199 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4200 unsigned int len;
4201 struct sk_buff *skb;
4202 dma_addr_t dma_addr;
4203 u32 opaque_key, desc_idx, *post_ptr;
4204
4205 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4206 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4207 if (opaque_key == RXD_OPAQUE_RING_STD) {
4208 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4209 mapping);
4210 skb = tp->rx_std_buffers[desc_idx].skb;
4211 post_ptr = &tp->rx_std_ptr;
Michael Chanf92905d2006-06-29 20:14:29 -07004212 rx_std_posted++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004213 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4214 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4215 mapping);
4216 skb = tp->rx_jumbo_buffers[desc_idx].skb;
4217 post_ptr = &tp->rx_jumbo_ptr;
4218 }
4219 else {
4220 goto next_pkt_nopost;
4221 }
4222
4223 work_mask |= opaque_key;
4224
4225 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4226 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4227 drop_it:
4228 tg3_recycle_rx(tp, opaque_key,
4229 desc_idx, *post_ptr);
4230 drop_it_no_recycle:
4231 /* Other statistics kept track of by card. */
4232 tp->net_stats.rx_dropped++;
4233 goto next_pkt;
4234 }
4235
Matt Carlsonad829262008-11-21 17:16:16 -08004236 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4237 ETH_FCS_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004238
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004239 if (len > RX_COPY_THRESHOLD
Matt Carlsonad829262008-11-21 17:16:16 -08004240 && tp->rx_offset == NET_IP_ALIGN
4241 /* rx_offset will likely not equal NET_IP_ALIGN
4242 * if this is a 5701 card running in PCI-X mode
4243 * [see tg3_get_invariants()]
4244 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245 ) {
4246 int skb_size;
4247
4248 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4249 desc_idx, *post_ptr);
4250 if (skb_size < 0)
4251 goto drop_it;
4252
4253 pci_unmap_single(tp->pdev, dma_addr,
4254 skb_size - tp->rx_offset,
4255 PCI_DMA_FROMDEVICE);
4256
4257 skb_put(skb, len);
4258 } else {
4259 struct sk_buff *copy_skb;
4260
4261 tg3_recycle_rx(tp, opaque_key,
4262 desc_idx, *post_ptr);
4263
Matt Carlsonad829262008-11-21 17:16:16 -08004264 copy_skb = netdev_alloc_skb(tp->dev,
4265 len + TG3_RAW_IP_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004266 if (copy_skb == NULL)
4267 goto drop_it_no_recycle;
4268
Matt Carlsonad829262008-11-21 17:16:16 -08004269 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004270 skb_put(copy_skb, len);
4271 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03004272 skb_copy_from_linear_data(skb, copy_skb->data, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004273 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4274
4275 /* We'll reuse the original ring buffer. */
4276 skb = copy_skb;
4277 }
4278
4279 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4280 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4281 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4282 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4283 skb->ip_summed = CHECKSUM_UNNECESSARY;
4284 else
4285 skb->ip_summed = CHECKSUM_NONE;
4286
4287 skb->protocol = eth_type_trans(skb, tp->dev);
4288#if TG3_VLAN_TAG_USED
4289 if (tp->vlgrp != NULL &&
4290 desc->type_flags & RXD_FLAG_VLAN) {
4291 tg3_vlan_rx(tp, skb,
4292 desc->err_vlan & RXD_VLAN_MASK);
4293 } else
4294#endif
4295 netif_receive_skb(skb);
4296
Linus Torvalds1da177e2005-04-16 15:20:36 -07004297 received++;
4298 budget--;
4299
4300next_pkt:
4301 (*post_ptr)++;
Michael Chanf92905d2006-06-29 20:14:29 -07004302
4303 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4304 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4305
4306 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4307 TG3_64BIT_REG_LOW, idx);
4308 work_mask &= ~RXD_OPAQUE_RING_STD;
4309 rx_std_posted = 0;
4310 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004311next_pkt_nopost:
Michael Chan483ba502005-04-25 15:14:03 -07004312 sw_idx++;
Eric Dumazet6b31a512007-02-06 13:29:21 -08004313 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
Michael Chan52f6d692005-04-25 15:14:32 -07004314
4315 /* Refresh hw_idx to see if there is new work */
4316 if (sw_idx == hw_idx) {
4317 hw_idx = tp->hw_status->idx[0].rx_producer;
4318 rmb();
4319 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004320 }
4321
4322 /* ACK the status ring. */
Michael Chan483ba502005-04-25 15:14:03 -07004323 tp->rx_rcb_ptr = sw_idx;
4324 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004325
4326 /* Refill RX ring(s). */
4327 if (work_mask & RXD_OPAQUE_RING_STD) {
4328 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4329 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4330 sw_idx);
4331 }
4332 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4333 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4334 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4335 sw_idx);
4336 }
4337 mmiowb();
4338
4339 return received;
4340}
4341
David S. Miller6f535762007-10-11 18:08:29 -07004342static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004343{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004344 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004345
Linus Torvalds1da177e2005-04-16 15:20:36 -07004346 /* handle link change and other phy events */
4347 if (!(tp->tg3_flags &
4348 (TG3_FLAG_USE_LINKCHG_REG |
4349 TG3_FLAG_POLL_SERDES))) {
4350 if (sblk->status & SD_STATUS_LINK_CHG) {
4351 sblk->status = SD_STATUS_UPDATED |
4352 (sblk->status & ~SD_STATUS_LINK_CHG);
David S. Millerf47c11e2005-06-24 20:18:35 -07004353 spin_lock(&tp->lock);
Matt Carlsondd477002008-05-25 23:45:58 -07004354 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4355 tw32_f(MAC_STATUS,
4356 (MAC_STATUS_SYNC_CHANGED |
4357 MAC_STATUS_CFG_CHANGED |
4358 MAC_STATUS_MI_COMPLETION |
4359 MAC_STATUS_LNKSTATE_CHANGED));
4360 udelay(40);
4361 } else
4362 tg3_setup_phy(tp, 0);
David S. Millerf47c11e2005-06-24 20:18:35 -07004363 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004364 }
4365 }
4366
4367 /* run TX completion thread */
4368 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004369 tg3_tx(tp);
David S. Miller6f535762007-10-11 18:08:29 -07004370 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
Michael Chan4fd7ab52007-10-12 01:39:50 -07004371 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004372 }
4373
Linus Torvalds1da177e2005-04-16 15:20:36 -07004374 /* run RX thread, within the bounds set by NAPI.
4375 * All RX "locking" is done by ensuring outside
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004376 * code synchronizes with tg3->napi.poll()
Linus Torvalds1da177e2005-04-16 15:20:36 -07004377 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004378 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
David S. Miller6f535762007-10-11 18:08:29 -07004379 work_done += tg3_rx(tp, budget - work_done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004380
David S. Miller6f535762007-10-11 18:08:29 -07004381 return work_done;
4382}
David S. Millerf7383c22005-05-18 22:50:53 -07004383
David S. Miller6f535762007-10-11 18:08:29 -07004384static int tg3_poll(struct napi_struct *napi, int budget)
4385{
4386 struct tg3 *tp = container_of(napi, struct tg3, napi);
4387 int work_done = 0;
Michael Chan4fd7ab52007-10-12 01:39:50 -07004388 struct tg3_hw_status *sblk = tp->hw_status;
David S. Miller6f535762007-10-11 18:08:29 -07004389
4390 while (1) {
4391 work_done = tg3_poll_work(tp, work_done, budget);
4392
4393 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4394 goto tx_recovery;
4395
4396 if (unlikely(work_done >= budget))
4397 break;
4398
Michael Chan4fd7ab52007-10-12 01:39:50 -07004399 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4400 /* tp->last_tag is used in tg3_restart_ints() below
4401 * to tell the hw how much work has been processed,
4402 * so we must read it before checking for more work.
4403 */
4404 tp->last_tag = sblk->status_tag;
4405 rmb();
4406 } else
4407 sblk->status &= ~SD_STATUS_UPDATED;
4408
David S. Miller6f535762007-10-11 18:08:29 -07004409 if (likely(!tg3_has_work(tp))) {
David S. Miller6f535762007-10-11 18:08:29 -07004410 netif_rx_complete(tp->dev, napi);
4411 tg3_restart_ints(tp);
4412 break;
4413 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004414 }
4415
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004416 return work_done;
David S. Miller6f535762007-10-11 18:08:29 -07004417
4418tx_recovery:
Michael Chan4fd7ab52007-10-12 01:39:50 -07004419 /* work_done is guaranteed to be less than budget. */
David S. Miller6f535762007-10-11 18:08:29 -07004420 netif_rx_complete(tp->dev, napi);
4421 schedule_work(&tp->reset_task);
Michael Chan4fd7ab52007-10-12 01:39:50 -07004422 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004423}
4424
David S. Millerf47c11e2005-06-24 20:18:35 -07004425static void tg3_irq_quiesce(struct tg3 *tp)
4426{
4427 BUG_ON(tp->irq_sync);
4428
4429 tp->irq_sync = 1;
4430 smp_mb();
4431
4432 synchronize_irq(tp->pdev->irq);
4433}
4434
4435static inline int tg3_irq_sync(struct tg3 *tp)
4436{
4437 return tp->irq_sync;
4438}
4439
4440/* Fully shutdown all tg3 driver activity elsewhere in the system.
4441 * If irq_sync is non-zero, then the IRQ handler must be synchronized
4442 * with as well. Most of the time, this is not necessary except when
4443 * shutting down the device.
4444 */
4445static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4446{
Michael Chan46966542007-07-11 19:47:19 -07004447 spin_lock_bh(&tp->lock);
David S. Millerf47c11e2005-06-24 20:18:35 -07004448 if (irq_sync)
4449 tg3_irq_quiesce(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07004450}
4451
4452static inline void tg3_full_unlock(struct tg3 *tp)
4453{
David S. Millerf47c11e2005-06-24 20:18:35 -07004454 spin_unlock_bh(&tp->lock);
4455}
4456
Michael Chanfcfa0a32006-03-20 22:28:41 -08004457/* One-shot MSI handler - Chip automatically disables interrupt
4458 * after sending MSI so driver doesn't have to do it.
4459 */
David Howells7d12e782006-10-05 14:55:46 +01004460static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
Michael Chanfcfa0a32006-03-20 22:28:41 -08004461{
4462 struct net_device *dev = dev_id;
4463 struct tg3 *tp = netdev_priv(dev);
4464
4465 prefetch(tp->hw_status);
4466 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4467
4468 if (likely(!tg3_irq_sync(tp)))
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004469 netif_rx_schedule(dev, &tp->napi);
Michael Chanfcfa0a32006-03-20 22:28:41 -08004470
4471 return IRQ_HANDLED;
4472}
4473
Michael Chan88b06bc2005-04-21 17:13:25 -07004474/* MSI ISR - No need to check for interrupt sharing and no need to
4475 * flush status block and interrupt mailbox. PCI ordering rules
4476 * guarantee that MSI will arrive after the status block.
4477 */
David Howells7d12e782006-10-05 14:55:46 +01004478static irqreturn_t tg3_msi(int irq, void *dev_id)
Michael Chan88b06bc2005-04-21 17:13:25 -07004479{
4480 struct net_device *dev = dev_id;
4481 struct tg3 *tp = netdev_priv(dev);
Michael Chan88b06bc2005-04-21 17:13:25 -07004482
Michael Chan61487482005-09-05 17:53:19 -07004483 prefetch(tp->hw_status);
4484 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Michael Chan88b06bc2005-04-21 17:13:25 -07004485 /*
David S. Millerfac9b832005-05-18 22:46:34 -07004486 * Writing any value to intr-mbox-0 clears PCI INTA# and
Michael Chan88b06bc2005-04-21 17:13:25 -07004487 * chip-internal interrupt pending events.
David S. Millerfac9b832005-05-18 22:46:34 -07004488 * Writing non-zero to intr-mbox-0 additional tells the
Michael Chan88b06bc2005-04-21 17:13:25 -07004489 * NIC to stop sending us irqs, engaging "in-intr-handler"
4490 * event coalescing.
4491 */
4492 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chan61487482005-09-05 17:53:19 -07004493 if (likely(!tg3_irq_sync(tp)))
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004494 netif_rx_schedule(dev, &tp->napi);
Michael Chan61487482005-09-05 17:53:19 -07004495
Michael Chan88b06bc2005-04-21 17:13:25 -07004496 return IRQ_RETVAL(1);
4497}
4498
David Howells7d12e782006-10-05 14:55:46 +01004499static irqreturn_t tg3_interrupt(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004500{
4501 struct net_device *dev = dev_id;
4502 struct tg3 *tp = netdev_priv(dev);
4503 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004504 unsigned int handled = 1;
4505
Linus Torvalds1da177e2005-04-16 15:20:36 -07004506 /* In INTx mode, it is possible for the interrupt to arrive at
4507 * the CPU before the status block posted prior to the interrupt.
4508 * Reading the PCI State register will confirm whether the
4509 * interrupt is ours and will flush the status block.
4510 */
Michael Chand18edcb2007-03-24 20:57:11 -07004511 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4512 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4513 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4514 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07004515 goto out;
David S. Millerfac9b832005-05-18 22:46:34 -07004516 }
Michael Chand18edcb2007-03-24 20:57:11 -07004517 }
4518
4519 /*
4520 * Writing any value to intr-mbox-0 clears PCI INTA# and
4521 * chip-internal interrupt pending events.
4522 * Writing non-zero to intr-mbox-0 additional tells the
4523 * NIC to stop sending us irqs, engaging "in-intr-handler"
4524 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07004525 *
4526 * Flush the mailbox to de-assert the IRQ immediately to prevent
4527 * spurious interrupts. The flush impacts performance but
4528 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07004529 */
Michael Chanc04cb342007-05-07 00:26:15 -07004530 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chand18edcb2007-03-24 20:57:11 -07004531 if (tg3_irq_sync(tp))
4532 goto out;
4533 sblk->status &= ~SD_STATUS_UPDATED;
4534 if (likely(tg3_has_work(tp))) {
4535 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004536 netif_rx_schedule(dev, &tp->napi);
Michael Chand18edcb2007-03-24 20:57:11 -07004537 } else {
4538 /* No work, shared interrupt perhaps? re-enable
4539 * interrupts, and flush that PCI write
4540 */
4541 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4542 0x00000000);
David S. Millerfac9b832005-05-18 22:46:34 -07004543 }
David S. Millerf47c11e2005-06-24 20:18:35 -07004544out:
David S. Millerfac9b832005-05-18 22:46:34 -07004545 return IRQ_RETVAL(handled);
4546}
4547
David Howells7d12e782006-10-05 14:55:46 +01004548static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
David S. Millerfac9b832005-05-18 22:46:34 -07004549{
4550 struct net_device *dev = dev_id;
4551 struct tg3 *tp = netdev_priv(dev);
4552 struct tg3_hw_status *sblk = tp->hw_status;
David S. Millerfac9b832005-05-18 22:46:34 -07004553 unsigned int handled = 1;
4554
David S. Millerfac9b832005-05-18 22:46:34 -07004555 /* In INTx mode, it is possible for the interrupt to arrive at
4556 * the CPU before the status block posted prior to the interrupt.
4557 * Reading the PCI State register will confirm whether the
4558 * interrupt is ours and will flush the status block.
4559 */
Michael Chand18edcb2007-03-24 20:57:11 -07004560 if (unlikely(sblk->status_tag == tp->last_tag)) {
4561 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4562 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4563 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07004564 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004565 }
Michael Chand18edcb2007-03-24 20:57:11 -07004566 }
4567
4568 /*
4569 * writing any value to intr-mbox-0 clears PCI INTA# and
4570 * chip-internal interrupt pending events.
4571 * writing non-zero to intr-mbox-0 additional tells the
4572 * NIC to stop sending us irqs, engaging "in-intr-handler"
4573 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07004574 *
4575 * Flush the mailbox to de-assert the IRQ immediately to prevent
4576 * spurious interrupts. The flush impacts performance but
4577 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07004578 */
Michael Chanc04cb342007-05-07 00:26:15 -07004579 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chand18edcb2007-03-24 20:57:11 -07004580 if (tg3_irq_sync(tp))
4581 goto out;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004582 if (netif_rx_schedule_prep(dev, &tp->napi)) {
Michael Chand18edcb2007-03-24 20:57:11 -07004583 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4584 /* Update last_tag to mark that this status has been
4585 * seen. Because interrupt may be shared, we may be
4586 * racing with tg3_poll(), so only update last_tag
4587 * if tg3_poll() is not scheduled.
4588 */
4589 tp->last_tag = sblk->status_tag;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004590 __netif_rx_schedule(dev, &tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004591 }
David S. Millerf47c11e2005-06-24 20:18:35 -07004592out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004593 return IRQ_RETVAL(handled);
4594}
4595
Michael Chan79381092005-04-21 17:13:59 -07004596/* ISR for interrupt test */
David Howells7d12e782006-10-05 14:55:46 +01004597static irqreturn_t tg3_test_isr(int irq, void *dev_id)
Michael Chan79381092005-04-21 17:13:59 -07004598{
4599 struct net_device *dev = dev_id;
4600 struct tg3 *tp = netdev_priv(dev);
4601 struct tg3_hw_status *sblk = tp->hw_status;
4602
Michael Chanf9804dd2005-09-27 12:13:10 -07004603 if ((sblk->status & SD_STATUS_UPDATED) ||
4604 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
Michael Chanb16250e2006-09-27 16:10:14 -07004605 tg3_disable_ints(tp);
Michael Chan79381092005-04-21 17:13:59 -07004606 return IRQ_RETVAL(1);
4607 }
4608 return IRQ_RETVAL(0);
4609}
4610
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07004611static int tg3_init_hw(struct tg3 *, int);
Michael Chan944d9802005-05-29 14:57:48 -07004612static int tg3_halt(struct tg3 *, int, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004613
Michael Chanb9ec6c12006-07-25 16:37:27 -07004614/* Restart hardware after configuration changes, self-test, etc.
4615 * Invoked with tp->lock held.
4616 */
4617static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
Eric Dumazet78c61462008-04-24 23:33:06 -07004618 __releases(tp->lock)
4619 __acquires(tp->lock)
Michael Chanb9ec6c12006-07-25 16:37:27 -07004620{
4621 int err;
4622
4623 err = tg3_init_hw(tp, reset_phy);
4624 if (err) {
4625 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4626 "aborting.\n", tp->dev->name);
4627 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4628 tg3_full_unlock(tp);
4629 del_timer_sync(&tp->timer);
4630 tp->irq_sync = 0;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004631 napi_enable(&tp->napi);
Michael Chanb9ec6c12006-07-25 16:37:27 -07004632 dev_close(tp->dev);
4633 tg3_full_lock(tp, 0);
4634 }
4635 return err;
4636}
4637
Linus Torvalds1da177e2005-04-16 15:20:36 -07004638#ifdef CONFIG_NET_POLL_CONTROLLER
4639static void tg3_poll_controller(struct net_device *dev)
4640{
Michael Chan88b06bc2005-04-21 17:13:25 -07004641 struct tg3 *tp = netdev_priv(dev);
4642
David Howells7d12e782006-10-05 14:55:46 +01004643 tg3_interrupt(tp->pdev->irq, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004644}
4645#endif
4646
David Howellsc4028952006-11-22 14:57:56 +00004647static void tg3_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004648{
David Howellsc4028952006-11-22 14:57:56 +00004649 struct tg3 *tp = container_of(work, struct tg3, reset_task);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004650 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004651 unsigned int restart_timer;
4652
Michael Chan7faa0062006-02-02 17:29:28 -08004653 tg3_full_lock(tp, 0);
Michael Chan7faa0062006-02-02 17:29:28 -08004654
4655 if (!netif_running(tp->dev)) {
Michael Chan7faa0062006-02-02 17:29:28 -08004656 tg3_full_unlock(tp);
4657 return;
4658 }
4659
4660 tg3_full_unlock(tp);
4661
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004662 tg3_phy_stop(tp);
4663
Linus Torvalds1da177e2005-04-16 15:20:36 -07004664 tg3_netif_stop(tp);
4665
David S. Millerf47c11e2005-06-24 20:18:35 -07004666 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004667
4668 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4669 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4670
Michael Chandf3e6542006-05-26 17:48:07 -07004671 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4672 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4673 tp->write32_rx_mbox = tg3_write_flush_reg32;
4674 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4675 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4676 }
4677
Michael Chan944d9802005-05-29 14:57:48 -07004678 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004679 err = tg3_init_hw(tp, 1);
4680 if (err)
Michael Chanb9ec6c12006-07-25 16:37:27 -07004681 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004682
4683 tg3_netif_start(tp);
4684
Linus Torvalds1da177e2005-04-16 15:20:36 -07004685 if (restart_timer)
4686 mod_timer(&tp->timer, jiffies + 1);
Michael Chan7faa0062006-02-02 17:29:28 -08004687
Michael Chanb9ec6c12006-07-25 16:37:27 -07004688out:
Michael Chan7faa0062006-02-02 17:29:28 -08004689 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004690
4691 if (!err)
4692 tg3_phy_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004693}
4694
Michael Chanb0408752007-02-13 12:18:30 -08004695static void tg3_dump_short_state(struct tg3 *tp)
4696{
4697 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4698 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4699 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4700 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4701}
4702
Linus Torvalds1da177e2005-04-16 15:20:36 -07004703static void tg3_tx_timeout(struct net_device *dev)
4704{
4705 struct tg3 *tp = netdev_priv(dev);
4706
Michael Chanb0408752007-02-13 12:18:30 -08004707 if (netif_msg_tx_err(tp)) {
Michael Chan9f88f292006-12-07 00:22:54 -08004708 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4709 dev->name);
Michael Chanb0408752007-02-13 12:18:30 -08004710 tg3_dump_short_state(tp);
4711 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004712
4713 schedule_work(&tp->reset_task);
4714}
4715
Michael Chanc58ec932005-09-17 00:46:27 -07004716/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4717static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4718{
4719 u32 base = (u32) mapping & 0xffffffff;
4720
4721 return ((base > 0xffffdcc0) &&
4722 (base + len + 8 < base));
4723}
4724
Michael Chan72f2afb2006-03-06 19:28:35 -08004725/* Test for DMA addresses > 40-bit */
4726static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4727 int len)
4728{
4729#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
Michael Chan6728a8e2006-03-27 23:16:49 -08004730 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
Michael Chan72f2afb2006-03-06 19:28:35 -08004731 return (((u64) mapping + len) > DMA_40BIT_MASK);
4732 return 0;
4733#else
4734 return 0;
4735#endif
4736}
4737
Linus Torvalds1da177e2005-04-16 15:20:36 -07004738static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4739
Michael Chan72f2afb2006-03-06 19:28:35 -08004740/* Workaround 4GB and 40-bit hardware DMA bugs. */
4741static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
Michael Chanc58ec932005-09-17 00:46:27 -07004742 u32 last_plus_one, u32 *start,
4743 u32 base_flags, u32 mss)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004744{
Matt Carlson41588ba2008-04-19 18:12:33 -07004745 struct sk_buff *new_skb;
Michael Chanc58ec932005-09-17 00:46:27 -07004746 dma_addr_t new_addr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004747 u32 entry = *start;
Michael Chanc58ec932005-09-17 00:46:27 -07004748 int i, ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004749
Matt Carlson41588ba2008-04-19 18:12:33 -07004750 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4751 new_skb = skb_copy(skb, GFP_ATOMIC);
4752 else {
4753 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4754
4755 new_skb = skb_copy_expand(skb,
4756 skb_headroom(skb) + more_headroom,
4757 skb_tailroom(skb), GFP_ATOMIC);
4758 }
4759
Linus Torvalds1da177e2005-04-16 15:20:36 -07004760 if (!new_skb) {
Michael Chanc58ec932005-09-17 00:46:27 -07004761 ret = -1;
4762 } else {
4763 /* New SKB is guaranteed to be linear. */
4764 entry = *start;
David S. Miller90079ce2008-09-11 04:52:51 -07004765 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4766 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4767
Michael Chanc58ec932005-09-17 00:46:27 -07004768 /* Make sure new skb does not cross any 4G boundaries.
4769 * Drop the packet if it does.
4770 */
David S. Miller90079ce2008-09-11 04:52:51 -07004771 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
David S. Miller638266f2008-09-11 15:45:19 -07004772 if (!ret)
4773 skb_dma_unmap(&tp->pdev->dev, new_skb,
4774 DMA_TO_DEVICE);
Michael Chanc58ec932005-09-17 00:46:27 -07004775 ret = -1;
4776 dev_kfree_skb(new_skb);
4777 new_skb = NULL;
4778 } else {
4779 tg3_set_txd(tp, entry, new_addr, new_skb->len,
4780 base_flags, 1 | (mss << 1));
4781 *start = NEXT_TX(entry);
4782 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004783 }
4784
Linus Torvalds1da177e2005-04-16 15:20:36 -07004785 /* Now clean up the sw ring entries. */
4786 i = 0;
4787 while (entry != last_plus_one) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004788 if (i == 0) {
4789 tp->tx_buffers[entry].skb = new_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004790 } else {
4791 tp->tx_buffers[entry].skb = NULL;
4792 }
4793 entry = NEXT_TX(entry);
4794 i++;
4795 }
4796
David S. Miller90079ce2008-09-11 04:52:51 -07004797 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004798 dev_kfree_skb(skb);
4799
Michael Chanc58ec932005-09-17 00:46:27 -07004800 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004801}
4802
4803static void tg3_set_txd(struct tg3 *tp, int entry,
4804 dma_addr_t mapping, int len, u32 flags,
4805 u32 mss_and_is_end)
4806{
4807 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4808 int is_end = (mss_and_is_end & 0x1);
4809 u32 mss = (mss_and_is_end >> 1);
4810 u32 vlan_tag = 0;
4811
4812 if (is_end)
4813 flags |= TXD_FLAG_END;
4814 if (flags & TXD_FLAG_VLAN) {
4815 vlan_tag = flags >> 16;
4816 flags &= 0xffff;
4817 }
4818 vlan_tag |= (mss << TXD_MSS_SHIFT);
4819
4820 txd->addr_hi = ((u64) mapping >> 32);
4821 txd->addr_lo = ((u64) mapping & 0xffffffff);
4822 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4823 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4824}
4825
Michael Chan5a6f3072006-03-20 22:28:05 -08004826/* hard_start_xmit for devices that don't have any bugs and
4827 * support TG3_FLG2_HW_TSO_2 only.
4828 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004829static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4830{
4831 struct tg3 *tp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004832 u32 len, entry, base_flags, mss;
David S. Miller90079ce2008-09-11 04:52:51 -07004833 struct skb_shared_info *sp;
4834 dma_addr_t mapping;
Michael Chan5a6f3072006-03-20 22:28:05 -08004835
4836 len = skb_headlen(skb);
4837
Michael Chan00b70502006-06-17 21:58:45 -07004838 /* We are running in BH disabled context with netif_tx_lock
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004839 * and TX reclaim runs via tp->napi.poll inside of a software
Michael Chan5a6f3072006-03-20 22:28:05 -08004840 * interrupt. Furthermore, IRQ processing runs lockless so we have
4841 * no IRQ context deadlocks to worry about either. Rejoice!
4842 */
Michael Chan1b2a7202006-08-07 21:46:02 -07004843 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004844 if (!netif_queue_stopped(dev)) {
4845 netif_stop_queue(dev);
4846
4847 /* This is a hard error, log it. */
4848 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4849 "queue awake!\n", dev->name);
4850 }
Michael Chan5a6f3072006-03-20 22:28:05 -08004851 return NETDEV_TX_BUSY;
4852 }
4853
4854 entry = tp->tx_prod;
4855 base_flags = 0;
Michael Chan5a6f3072006-03-20 22:28:05 -08004856 mss = 0;
Matt Carlsonc13e3712007-05-05 11:50:04 -07004857 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004858 int tcp_opt_len, ip_tcp_len;
4859
4860 if (skb_header_cloned(skb) &&
4861 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4862 dev_kfree_skb(skb);
4863 goto out_unlock;
4864 }
4865
Michael Chanb0026622006-07-03 19:42:14 -07004866 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4867 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4868 else {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004869 struct iphdr *iph = ip_hdr(skb);
4870
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004871 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -03004872 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
Michael Chanb0026622006-07-03 19:42:14 -07004873
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004874 iph->check = 0;
4875 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
Michael Chanb0026622006-07-03 19:42:14 -07004876 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4877 }
Michael Chan5a6f3072006-03-20 22:28:05 -08004878
4879 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4880 TXD_FLAG_CPU_POST_DMA);
4881
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004882 tcp_hdr(skb)->check = 0;
Michael Chan5a6f3072006-03-20 22:28:05 -08004883
Michael Chan5a6f3072006-03-20 22:28:05 -08004884 }
Patrick McHardy84fa7932006-08-29 16:44:56 -07004885 else if (skb->ip_summed == CHECKSUM_PARTIAL)
Michael Chan5a6f3072006-03-20 22:28:05 -08004886 base_flags |= TXD_FLAG_TCPUDP_CSUM;
Michael Chan5a6f3072006-03-20 22:28:05 -08004887#if TG3_VLAN_TAG_USED
4888 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4889 base_flags |= (TXD_FLAG_VLAN |
4890 (vlan_tx_tag_get(skb) << 16));
4891#endif
4892
David S. Miller90079ce2008-09-11 04:52:51 -07004893 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4894 dev_kfree_skb(skb);
4895 goto out_unlock;
4896 }
4897
4898 sp = skb_shinfo(skb);
4899
4900 mapping = sp->dma_maps[0];
Michael Chan5a6f3072006-03-20 22:28:05 -08004901
4902 tp->tx_buffers[entry].skb = skb;
Michael Chan5a6f3072006-03-20 22:28:05 -08004903
4904 tg3_set_txd(tp, entry, mapping, len, base_flags,
4905 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4906
4907 entry = NEXT_TX(entry);
4908
4909 /* Now loop through additional data fragments, and queue them. */
4910 if (skb_shinfo(skb)->nr_frags > 0) {
4911 unsigned int i, last;
4912
4913 last = skb_shinfo(skb)->nr_frags - 1;
4914 for (i = 0; i <= last; i++) {
4915 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4916
4917 len = frag->size;
David S. Miller90079ce2008-09-11 04:52:51 -07004918 mapping = sp->dma_maps[i + 1];
Michael Chan5a6f3072006-03-20 22:28:05 -08004919 tp->tx_buffers[entry].skb = NULL;
Michael Chan5a6f3072006-03-20 22:28:05 -08004920
4921 tg3_set_txd(tp, entry, mapping, len,
4922 base_flags, (i == last) | (mss << 1));
4923
4924 entry = NEXT_TX(entry);
4925 }
4926 }
4927
4928 /* Packets are ready, update Tx producer idx local and on card. */
4929 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4930
4931 tp->tx_prod = entry;
Michael Chan1b2a7202006-08-07 21:46:02 -07004932 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004933 netif_stop_queue(dev);
Ranjit Manomohan42952232006-10-18 20:54:26 -07004934 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
Michael Chan5a6f3072006-03-20 22:28:05 -08004935 netif_wake_queue(tp->dev);
4936 }
4937
4938out_unlock:
4939 mmiowb();
Michael Chan5a6f3072006-03-20 22:28:05 -08004940
4941 dev->trans_start = jiffies;
4942
4943 return NETDEV_TX_OK;
4944}
4945
Michael Chan52c0fd82006-06-29 20:15:54 -07004946static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4947
4948/* Use GSO to workaround a rare TSO bug that may be triggered when the
4949 * TSO header is greater than 80 bytes.
4950 */
4951static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4952{
4953 struct sk_buff *segs, *nskb;
4954
4955 /* Estimate the number of fragments in the worst case */
Michael Chan1b2a7202006-08-07 21:46:02 -07004956 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
Michael Chan52c0fd82006-06-29 20:15:54 -07004957 netif_stop_queue(tp->dev);
Michael Chan7f62ad52007-02-20 23:25:40 -08004958 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4959 return NETDEV_TX_BUSY;
4960
4961 netif_wake_queue(tp->dev);
Michael Chan52c0fd82006-06-29 20:15:54 -07004962 }
4963
4964 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07004965 if (IS_ERR(segs))
Michael Chan52c0fd82006-06-29 20:15:54 -07004966 goto tg3_tso_bug_end;
4967
4968 do {
4969 nskb = segs;
4970 segs = segs->next;
4971 nskb->next = NULL;
4972 tg3_start_xmit_dma_bug(nskb, tp->dev);
4973 } while (segs);
4974
4975tg3_tso_bug_end:
4976 dev_kfree_skb(skb);
4977
4978 return NETDEV_TX_OK;
4979}
Michael Chan52c0fd82006-06-29 20:15:54 -07004980
Michael Chan5a6f3072006-03-20 22:28:05 -08004981/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4982 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4983 */
4984static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4985{
4986 struct tg3 *tp = netdev_priv(dev);
Michael Chan5a6f3072006-03-20 22:28:05 -08004987 u32 len, entry, base_flags, mss;
David S. Miller90079ce2008-09-11 04:52:51 -07004988 struct skb_shared_info *sp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004989 int would_hit_hwbug;
David S. Miller90079ce2008-09-11 04:52:51 -07004990 dma_addr_t mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004991
4992 len = skb_headlen(skb);
4993
Michael Chan00b70502006-06-17 21:58:45 -07004994 /* We are running in BH disabled context with netif_tx_lock
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004995 * and TX reclaim runs via tp->napi.poll inside of a software
David S. Millerf47c11e2005-06-24 20:18:35 -07004996 * interrupt. Furthermore, IRQ processing runs lockless so we have
4997 * no IRQ context deadlocks to worry about either. Rejoice!
Linus Torvalds1da177e2005-04-16 15:20:36 -07004998 */
Michael Chan1b2a7202006-08-07 21:46:02 -07004999 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
Stephen Hemminger1f064a82005-12-06 17:36:44 -08005000 if (!netif_queue_stopped(dev)) {
5001 netif_stop_queue(dev);
5002
5003 /* This is a hard error, log it. */
5004 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5005 "queue awake!\n", dev->name);
5006 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005007 return NETDEV_TX_BUSY;
5008 }
5009
5010 entry = tp->tx_prod;
5011 base_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07005012 if (skb->ip_summed == CHECKSUM_PARTIAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005013 base_flags |= TXD_FLAG_TCPUDP_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005014 mss = 0;
Matt Carlsonc13e3712007-05-05 11:50:04 -07005015 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005016 struct iphdr *iph;
Michael Chan52c0fd82006-06-29 20:15:54 -07005017 int tcp_opt_len, ip_tcp_len, hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005018
5019 if (skb_header_cloned(skb) &&
5020 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5021 dev_kfree_skb(skb);
5022 goto out_unlock;
5023 }
5024
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07005025 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -03005026 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005027
Michael Chan52c0fd82006-06-29 20:15:54 -07005028 hdr_len = ip_tcp_len + tcp_opt_len;
5029 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
Michael Chan7f62ad52007-02-20 23:25:40 -08005030 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
Michael Chan52c0fd82006-06-29 20:15:54 -07005031 return (tg3_tso_bug(tp, skb));
5032
Linus Torvalds1da177e2005-04-16 15:20:36 -07005033 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5034 TXD_FLAG_CPU_POST_DMA);
5035
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005036 iph = ip_hdr(skb);
5037 iph->check = 0;
5038 iph->tot_len = htons(mss + hdr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005039 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07005040 tcp_hdr(skb)->check = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005041 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07005042 } else
5043 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5044 iph->daddr, 0,
5045 IPPROTO_TCP,
5046 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005047
5048 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
5049 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005050 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005051 int tsflags;
5052
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005053 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005054 mss |= (tsflags << 11);
5055 }
5056 } else {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005057 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005058 int tsflags;
5059
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005060 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005061 base_flags |= tsflags << 12;
5062 }
5063 }
5064 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005065#if TG3_VLAN_TAG_USED
5066 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5067 base_flags |= (TXD_FLAG_VLAN |
5068 (vlan_tx_tag_get(skb) << 16));
5069#endif
5070
David S. Miller90079ce2008-09-11 04:52:51 -07005071 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5072 dev_kfree_skb(skb);
5073 goto out_unlock;
5074 }
5075
5076 sp = skb_shinfo(skb);
5077
5078 mapping = sp->dma_maps[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -07005079
5080 tp->tx_buffers[entry].skb = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005081
5082 would_hit_hwbug = 0;
5083
Matt Carlson41588ba2008-04-19 18:12:33 -07005084 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5085 would_hit_hwbug = 1;
5086 else if (tg3_4g_overflow_test(mapping, len))
Michael Chanc58ec932005-09-17 00:46:27 -07005087 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005088
5089 tg3_set_txd(tp, entry, mapping, len, base_flags,
5090 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5091
5092 entry = NEXT_TX(entry);
5093
5094 /* Now loop through additional data fragments, and queue them. */
5095 if (skb_shinfo(skb)->nr_frags > 0) {
5096 unsigned int i, last;
5097
5098 last = skb_shinfo(skb)->nr_frags - 1;
5099 for (i = 0; i <= last; i++) {
5100 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5101
5102 len = frag->size;
David S. Miller90079ce2008-09-11 04:52:51 -07005103 mapping = sp->dma_maps[i + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07005104
5105 tp->tx_buffers[entry].skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005106
Michael Chanc58ec932005-09-17 00:46:27 -07005107 if (tg3_4g_overflow_test(mapping, len))
5108 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005109
Michael Chan72f2afb2006-03-06 19:28:35 -08005110 if (tg3_40bit_overflow_test(tp, mapping, len))
5111 would_hit_hwbug = 1;
5112
Linus Torvalds1da177e2005-04-16 15:20:36 -07005113 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5114 tg3_set_txd(tp, entry, mapping, len,
5115 base_flags, (i == last)|(mss << 1));
5116 else
5117 tg3_set_txd(tp, entry, mapping, len,
5118 base_flags, (i == last));
5119
5120 entry = NEXT_TX(entry);
5121 }
5122 }
5123
5124 if (would_hit_hwbug) {
5125 u32 last_plus_one = entry;
5126 u32 start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005127
Michael Chanc58ec932005-09-17 00:46:27 -07005128 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5129 start &= (TG3_TX_RING_SIZE - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005130
5131 /* If the workaround fails due to memory/mapping
5132 * failure, silently drop this packet.
5133 */
Michael Chan72f2afb2006-03-06 19:28:35 -08005134 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
Michael Chanc58ec932005-09-17 00:46:27 -07005135 &start, base_flags, mss))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005136 goto out_unlock;
5137
5138 entry = start;
5139 }
5140
5141 /* Packets are ready, update Tx producer idx local and on card. */
5142 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5143
5144 tp->tx_prod = entry;
Michael Chan1b2a7202006-08-07 21:46:02 -07005145 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005146 netif_stop_queue(dev);
Ranjit Manomohan42952232006-10-18 20:54:26 -07005147 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
Michael Chan51b91462005-09-01 17:41:28 -07005148 netif_wake_queue(tp->dev);
5149 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005150
5151out_unlock:
5152 mmiowb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005153
5154 dev->trans_start = jiffies;
5155
5156 return NETDEV_TX_OK;
5157}
5158
5159static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5160 int new_mtu)
5161{
5162 dev->mtu = new_mtu;
5163
Michael Chanef7f5ec2005-07-25 12:32:25 -07005164 if (new_mtu > ETH_DATA_LEN) {
Michael Chana4e2b342005-10-26 15:46:52 -07005165 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chanef7f5ec2005-07-25 12:32:25 -07005166 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5167 ethtool_op_set_tso(dev, 0);
5168 }
5169 else
5170 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5171 } else {
Michael Chana4e2b342005-10-26 15:46:52 -07005172 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chanef7f5ec2005-07-25 12:32:25 -07005173 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
Michael Chan0f893dc2005-07-25 12:30:38 -07005174 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
Michael Chanef7f5ec2005-07-25 12:32:25 -07005175 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005176}
5177
5178static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5179{
5180 struct tg3 *tp = netdev_priv(dev);
Michael Chanb9ec6c12006-07-25 16:37:27 -07005181 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005182
5183 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5184 return -EINVAL;
5185
5186 if (!netif_running(dev)) {
5187 /* We'll just catch it later when the
5188 * device is up'd.
5189 */
5190 tg3_set_mtu(dev, tp, new_mtu);
5191 return 0;
5192 }
5193
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07005194 tg3_phy_stop(tp);
5195
Linus Torvalds1da177e2005-04-16 15:20:36 -07005196 tg3_netif_stop(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07005197
5198 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005199
Michael Chan944d9802005-05-29 14:57:48 -07005200 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005201
5202 tg3_set_mtu(dev, tp, new_mtu);
5203
Michael Chanb9ec6c12006-07-25 16:37:27 -07005204 err = tg3_restart_hw(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005205
Michael Chanb9ec6c12006-07-25 16:37:27 -07005206 if (!err)
5207 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005208
David S. Millerf47c11e2005-06-24 20:18:35 -07005209 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005210
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07005211 if (!err)
5212 tg3_phy_start(tp);
5213
Michael Chanb9ec6c12006-07-25 16:37:27 -07005214 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005215}
5216
5217/* Free up pending packets in all rx/tx rings.
5218 *
5219 * The chip has been shut down and the driver detached from
5220 * the networking, so no interrupts or new tx packets will
5221 * end up in the driver. tp->{tx,}lock is not held and we are not
5222 * in an interrupt context and thus may sleep.
5223 */
5224static void tg3_free_rings(struct tg3 *tp)
5225{
5226 struct ring_info *rxp;
5227 int i;
5228
5229 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5230 rxp = &tp->rx_std_buffers[i];
5231
5232 if (rxp->skb == NULL)
5233 continue;
5234 pci_unmap_single(tp->pdev,
5235 pci_unmap_addr(rxp, mapping),
Michael Chan7e72aad2005-07-25 12:31:17 -07005236 tp->rx_pkt_buf_sz - tp->rx_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005237 PCI_DMA_FROMDEVICE);
5238 dev_kfree_skb_any(rxp->skb);
5239 rxp->skb = NULL;
5240 }
5241
5242 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5243 rxp = &tp->rx_jumbo_buffers[i];
5244
5245 if (rxp->skb == NULL)
5246 continue;
5247 pci_unmap_single(tp->pdev,
5248 pci_unmap_addr(rxp, mapping),
5249 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5250 PCI_DMA_FROMDEVICE);
5251 dev_kfree_skb_any(rxp->skb);
5252 rxp->skb = NULL;
5253 }
5254
5255 for (i = 0; i < TG3_TX_RING_SIZE; ) {
5256 struct tx_ring_info *txp;
5257 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005258
5259 txp = &tp->tx_buffers[i];
5260 skb = txp->skb;
5261
5262 if (skb == NULL) {
5263 i++;
5264 continue;
5265 }
5266
David S. Miller90079ce2008-09-11 04:52:51 -07005267 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5268
Linus Torvalds1da177e2005-04-16 15:20:36 -07005269 txp->skb = NULL;
5270
David S. Miller90079ce2008-09-11 04:52:51 -07005271 i += skb_shinfo(skb)->nr_frags + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005272
5273 dev_kfree_skb_any(skb);
5274 }
5275}
5276
5277/* Initialize tx/rx rings for packet processing.
5278 *
5279 * The chip has been shut down and the driver detached from
5280 * the networking, so no interrupts or new tx packets will
5281 * end up in the driver. tp->{tx,}lock are held and thus
5282 * we may not sleep.
5283 */
Michael Chan32d8c572006-07-25 16:38:29 -07005284static int tg3_init_rings(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005285{
5286 u32 i;
5287
5288 /* Free up all the SKBs. */
5289 tg3_free_rings(tp);
5290
5291 /* Zero out all descriptors. */
5292 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5293 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5294 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5295 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5296
Michael Chan7e72aad2005-07-25 12:31:17 -07005297 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
Michael Chana4e2b342005-10-26 15:46:52 -07005298 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
Michael Chan7e72aad2005-07-25 12:31:17 -07005299 (tp->dev->mtu > ETH_DATA_LEN))
5300 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5301
Linus Torvalds1da177e2005-04-16 15:20:36 -07005302 /* Initialize invariants of the rings, we only set this
5303 * stuff once. This works because the card does not
5304 * write into the rx buffer posting rings.
5305 */
5306 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5307 struct tg3_rx_buffer_desc *rxd;
5308
5309 rxd = &tp->rx_std[i];
Michael Chan7e72aad2005-07-25 12:31:17 -07005310 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005311 << RXD_LEN_SHIFT;
5312 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5313 rxd->opaque = (RXD_OPAQUE_RING_STD |
5314 (i << RXD_OPAQUE_INDEX_SHIFT));
5315 }
5316
Michael Chan0f893dc2005-07-25 12:30:38 -07005317 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005318 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5319 struct tg3_rx_buffer_desc *rxd;
5320
5321 rxd = &tp->rx_jumbo[i];
5322 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5323 << RXD_LEN_SHIFT;
5324 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5325 RXD_FLAG_JUMBO;
5326 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5327 (i << RXD_OPAQUE_INDEX_SHIFT));
5328 }
5329 }
5330
5331 /* Now allocate fresh SKBs for each rx ring. */
5332 for (i = 0; i < tp->rx_pending; i++) {
Michael Chan32d8c572006-07-25 16:38:29 -07005333 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5334 printk(KERN_WARNING PFX
5335 "%s: Using a smaller RX standard ring, "
5336 "only %d out of %d buffers were allocated "
5337 "successfully.\n",
5338 tp->dev->name, i, tp->rx_pending);
5339 if (i == 0)
5340 return -ENOMEM;
5341 tp->rx_pending = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005342 break;
Michael Chan32d8c572006-07-25 16:38:29 -07005343 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005344 }
5345
Michael Chan0f893dc2005-07-25 12:30:38 -07005346 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005347 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5348 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
Michael Chan32d8c572006-07-25 16:38:29 -07005349 -1, i) < 0) {
5350 printk(KERN_WARNING PFX
5351 "%s: Using a smaller RX jumbo ring, "
5352 "only %d out of %d buffers were "
5353 "allocated successfully.\n",
5354 tp->dev->name, i, tp->rx_jumbo_pending);
5355 if (i == 0) {
5356 tg3_free_rings(tp);
5357 return -ENOMEM;
5358 }
5359 tp->rx_jumbo_pending = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005360 break;
Michael Chan32d8c572006-07-25 16:38:29 -07005361 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005362 }
5363 }
Michael Chan32d8c572006-07-25 16:38:29 -07005364 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005365}
5366
5367/*
5368 * Must not be invoked with interrupt sources disabled and
5369 * the hardware shutdown down.
5370 */
5371static void tg3_free_consistent(struct tg3 *tp)
5372{
Jesper Juhlb4558ea2005-10-28 16:53:13 -04005373 kfree(tp->rx_std_buffers);
5374 tp->rx_std_buffers = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005375 if (tp->rx_std) {
5376 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5377 tp->rx_std, tp->rx_std_mapping);
5378 tp->rx_std = NULL;
5379 }
5380 if (tp->rx_jumbo) {
5381 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5382 tp->rx_jumbo, tp->rx_jumbo_mapping);
5383 tp->rx_jumbo = NULL;
5384 }
5385 if (tp->rx_rcb) {
5386 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5387 tp->rx_rcb, tp->rx_rcb_mapping);
5388 tp->rx_rcb = NULL;
5389 }
5390 if (tp->tx_ring) {
5391 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5392 tp->tx_ring, tp->tx_desc_mapping);
5393 tp->tx_ring = NULL;
5394 }
5395 if (tp->hw_status) {
5396 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5397 tp->hw_status, tp->status_mapping);
5398 tp->hw_status = NULL;
5399 }
5400 if (tp->hw_stats) {
5401 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5402 tp->hw_stats, tp->stats_mapping);
5403 tp->hw_stats = NULL;
5404 }
5405}
5406
5407/*
5408 * Must not be invoked with interrupt sources disabled and
5409 * the hardware shutdown down. Can sleep.
5410 */
5411static int tg3_alloc_consistent(struct tg3 *tp)
5412{
Yan Burmanbd2b3342006-12-14 15:25:00 -08005413 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005414 (TG3_RX_RING_SIZE +
5415 TG3_RX_JUMBO_RING_SIZE)) +
5416 (sizeof(struct tx_ring_info) *
5417 TG3_TX_RING_SIZE),
5418 GFP_KERNEL);
5419 if (!tp->rx_std_buffers)
5420 return -ENOMEM;
5421
Linus Torvalds1da177e2005-04-16 15:20:36 -07005422 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5423 tp->tx_buffers = (struct tx_ring_info *)
5424 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5425
5426 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5427 &tp->rx_std_mapping);
5428 if (!tp->rx_std)
5429 goto err_out;
5430
5431 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5432 &tp->rx_jumbo_mapping);
5433
5434 if (!tp->rx_jumbo)
5435 goto err_out;
5436
5437 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5438 &tp->rx_rcb_mapping);
5439 if (!tp->rx_rcb)
5440 goto err_out;
5441
5442 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5443 &tp->tx_desc_mapping);
5444 if (!tp->tx_ring)
5445 goto err_out;
5446
5447 tp->hw_status = pci_alloc_consistent(tp->pdev,
5448 TG3_HW_STATUS_SIZE,
5449 &tp->status_mapping);
5450 if (!tp->hw_status)
5451 goto err_out;
5452
5453 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5454 sizeof(struct tg3_hw_stats),
5455 &tp->stats_mapping);
5456 if (!tp->hw_stats)
5457 goto err_out;
5458
5459 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5460 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5461
5462 return 0;
5463
5464err_out:
5465 tg3_free_consistent(tp);
5466 return -ENOMEM;
5467}
5468
5469#define MAX_WAIT_CNT 1000
5470
5471/* To stop a block, clear the enable bit and poll till it
5472 * clears. tp->lock is held.
5473 */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005474static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005475{
5476 unsigned int i;
5477 u32 val;
5478
5479 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5480 switch (ofs) {
5481 case RCVLSC_MODE:
5482 case DMAC_MODE:
5483 case MBFREE_MODE:
5484 case BUFMGR_MODE:
5485 case MEMARB_MODE:
5486 /* We can't enable/disable these bits of the
5487 * 5705/5750, just say success.
5488 */
5489 return 0;
5490
5491 default:
5492 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005493 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005494 }
5495
5496 val = tr32(ofs);
5497 val &= ~enable_bit;
5498 tw32_f(ofs, val);
5499
5500 for (i = 0; i < MAX_WAIT_CNT; i++) {
5501 udelay(100);
5502 val = tr32(ofs);
5503 if ((val & enable_bit) == 0)
5504 break;
5505 }
5506
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005507 if (i == MAX_WAIT_CNT && !silent) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005508 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5509 "ofs=%lx enable_bit=%x\n",
5510 ofs, enable_bit);
5511 return -ENODEV;
5512 }
5513
5514 return 0;
5515}
5516
5517/* tp->lock is held. */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005518static int tg3_abort_hw(struct tg3 *tp, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005519{
5520 int i, err;
5521
5522 tg3_disable_ints(tp);
5523
5524 tp->rx_mode &= ~RX_MODE_ENABLE;
5525 tw32_f(MAC_RX_MODE, tp->rx_mode);
5526 udelay(10);
5527
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005528 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5529 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5530 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5531 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5532 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5533 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005534
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005535 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5536 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5537 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5538 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5539 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5540 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5541 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005542
5543 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5544 tw32_f(MAC_MODE, tp->mac_mode);
5545 udelay(40);
5546
5547 tp->tx_mode &= ~TX_MODE_ENABLE;
5548 tw32_f(MAC_TX_MODE, tp->tx_mode);
5549
5550 for (i = 0; i < MAX_WAIT_CNT; i++) {
5551 udelay(100);
5552 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5553 break;
5554 }
5555 if (i >= MAX_WAIT_CNT) {
5556 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5557 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5558 tp->dev->name, tr32(MAC_TX_MODE));
Michael Chane6de8ad2005-05-05 14:42:41 -07005559 err |= -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005560 }
5561
Michael Chane6de8ad2005-05-05 14:42:41 -07005562 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005563 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5564 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005565
5566 tw32(FTQ_RESET, 0xffffffff);
5567 tw32(FTQ_RESET, 0x00000000);
5568
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005569 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5570 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005571
5572 if (tp->hw_status)
5573 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5574 if (tp->hw_stats)
5575 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5576
Linus Torvalds1da177e2005-04-16 15:20:36 -07005577 return err;
5578}
5579
5580/* tp->lock is held. */
5581static int tg3_nvram_lock(struct tg3 *tp)
5582{
5583 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5584 int i;
5585
Michael Chanec41c7d2006-01-17 02:40:55 -08005586 if (tp->nvram_lock_cnt == 0) {
5587 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5588 for (i = 0; i < 8000; i++) {
5589 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5590 break;
5591 udelay(20);
5592 }
5593 if (i == 8000) {
5594 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5595 return -ENODEV;
5596 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005597 }
Michael Chanec41c7d2006-01-17 02:40:55 -08005598 tp->nvram_lock_cnt++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005599 }
5600 return 0;
5601}
5602
5603/* tp->lock is held. */
5604static void tg3_nvram_unlock(struct tg3 *tp)
5605{
Michael Chanec41c7d2006-01-17 02:40:55 -08005606 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5607 if (tp->nvram_lock_cnt > 0)
5608 tp->nvram_lock_cnt--;
5609 if (tp->nvram_lock_cnt == 0)
5610 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5611 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005612}
5613
5614/* tp->lock is held. */
Michael Chane6af3012005-04-21 17:12:05 -07005615static void tg3_enable_nvram_access(struct tg3 *tp)
5616{
5617 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5618 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5619 u32 nvaccess = tr32(NVRAM_ACCESS);
5620
5621 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5622 }
5623}
5624
5625/* tp->lock is held. */
5626static void tg3_disable_nvram_access(struct tg3 *tp)
5627{
5628 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5629 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5630 u32 nvaccess = tr32(NVRAM_ACCESS);
5631
5632 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5633 }
5634}
5635
Matt Carlson0d3031d2007-10-10 18:02:43 -07005636static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5637{
5638 int i;
5639 u32 apedata;
5640
5641 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5642 if (apedata != APE_SEG_SIG_MAGIC)
5643 return;
5644
5645 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
Matt Carlson731fd792008-08-15 14:07:51 -07005646 if (!(apedata & APE_FW_STATUS_READY))
Matt Carlson0d3031d2007-10-10 18:02:43 -07005647 return;
5648
5649 /* Wait for up to 1 millisecond for APE to service previous event. */
5650 for (i = 0; i < 10; i++) {
5651 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5652 return;
5653
5654 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5655
5656 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5657 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5658 event | APE_EVENT_STATUS_EVENT_PENDING);
5659
5660 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5661
5662 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5663 break;
5664
5665 udelay(100);
5666 }
5667
5668 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5669 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5670}
5671
5672static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5673{
5674 u32 event;
5675 u32 apedata;
5676
5677 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5678 return;
5679
5680 switch (kind) {
5681 case RESET_KIND_INIT:
5682 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5683 APE_HOST_SEG_SIG_MAGIC);
5684 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5685 APE_HOST_SEG_LEN_MAGIC);
5686 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5687 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5688 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5689 APE_HOST_DRIVER_ID_MAGIC);
5690 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5691 APE_HOST_BEHAV_NO_PHYLOCK);
5692
5693 event = APE_EVENT_STATUS_STATE_START;
5694 break;
5695 case RESET_KIND_SHUTDOWN:
Matt Carlsonb2aee152008-11-03 16:51:11 -08005696 /* With the interface we are currently using,
5697 * APE does not track driver state. Wiping
5698 * out the HOST SEGMENT SIGNATURE forces
5699 * the APE to assume OS absent status.
5700 */
5701 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
5702
Matt Carlson0d3031d2007-10-10 18:02:43 -07005703 event = APE_EVENT_STATUS_STATE_UNLOAD;
5704 break;
5705 case RESET_KIND_SUSPEND:
5706 event = APE_EVENT_STATUS_STATE_SUSPEND;
5707 break;
5708 default:
5709 return;
5710 }
5711
5712 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5713
5714 tg3_ape_send_event(tp, event);
5715}
5716
Michael Chane6af3012005-04-21 17:12:05 -07005717/* tp->lock is held. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005718static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5719{
David S. Millerf49639e2006-06-09 11:58:36 -07005720 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5721 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005722
5723 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5724 switch (kind) {
5725 case RESET_KIND_INIT:
5726 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5727 DRV_STATE_START);
5728 break;
5729
5730 case RESET_KIND_SHUTDOWN:
5731 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5732 DRV_STATE_UNLOAD);
5733 break;
5734
5735 case RESET_KIND_SUSPEND:
5736 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5737 DRV_STATE_SUSPEND);
5738 break;
5739
5740 default:
5741 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005742 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005743 }
Matt Carlson0d3031d2007-10-10 18:02:43 -07005744
5745 if (kind == RESET_KIND_INIT ||
5746 kind == RESET_KIND_SUSPEND)
5747 tg3_ape_driver_state_change(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005748}
5749
5750/* tp->lock is held. */
5751static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5752{
5753 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5754 switch (kind) {
5755 case RESET_KIND_INIT:
5756 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5757 DRV_STATE_START_DONE);
5758 break;
5759
5760 case RESET_KIND_SHUTDOWN:
5761 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5762 DRV_STATE_UNLOAD_DONE);
5763 break;
5764
5765 default:
5766 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005767 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005768 }
Matt Carlson0d3031d2007-10-10 18:02:43 -07005769
5770 if (kind == RESET_KIND_SHUTDOWN)
5771 tg3_ape_driver_state_change(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005772}
5773
5774/* tp->lock is held. */
5775static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5776{
5777 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5778 switch (kind) {
5779 case RESET_KIND_INIT:
5780 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5781 DRV_STATE_START);
5782 break;
5783
5784 case RESET_KIND_SHUTDOWN:
5785 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5786 DRV_STATE_UNLOAD);
5787 break;
5788
5789 case RESET_KIND_SUSPEND:
5790 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5791 DRV_STATE_SUSPEND);
5792 break;
5793
5794 default:
5795 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005796 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005797 }
5798}
5799
Michael Chan7a6f4362006-09-27 16:03:31 -07005800static int tg3_poll_fw(struct tg3 *tp)
5801{
5802 int i;
5803 u32 val;
5804
Michael Chanb5d37722006-09-27 16:06:21 -07005805 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Gary Zambrano0ccead12006-11-14 16:34:00 -08005806 /* Wait up to 20ms for init done. */
5807 for (i = 0; i < 200; i++) {
Michael Chanb5d37722006-09-27 16:06:21 -07005808 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5809 return 0;
Gary Zambrano0ccead12006-11-14 16:34:00 -08005810 udelay(100);
Michael Chanb5d37722006-09-27 16:06:21 -07005811 }
5812 return -ENODEV;
5813 }
5814
Michael Chan7a6f4362006-09-27 16:03:31 -07005815 /* Wait for firmware initialization to complete. */
5816 for (i = 0; i < 100000; i++) {
5817 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5818 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5819 break;
5820 udelay(10);
5821 }
5822
5823 /* Chip might not be fitted with firmware. Some Sun onboard
5824 * parts are configured like that. So don't signal the timeout
5825 * of the above loop as an error, but do report the lack of
5826 * running firmware once.
5827 */
5828 if (i >= 100000 &&
5829 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5830 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5831
5832 printk(KERN_INFO PFX "%s: No firmware running.\n",
5833 tp->dev->name);
5834 }
5835
5836 return 0;
5837}
5838
Michael Chanee6a99b2007-07-18 21:49:10 -07005839/* Save PCI command register before chip reset */
5840static void tg3_save_pci_state(struct tg3 *tp)
5841{
Matt Carlson8a6eac92007-10-21 16:17:55 -07005842 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07005843}
5844
5845/* Restore PCI state after chip reset */
5846static void tg3_restore_pci_state(struct tg3 *tp)
5847{
5848 u32 val;
5849
5850 /* Re-enable indirect register accesses. */
5851 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5852 tp->misc_host_ctrl);
5853
5854 /* Set MAX PCI retry to zero. */
5855 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5856 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5857 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5858 val |= PCISTATE_RETRY_SAME_DMA;
Matt Carlson0d3031d2007-10-10 18:02:43 -07005859 /* Allow reads and writes to the APE register and memory space. */
5860 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5861 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5862 PCISTATE_ALLOW_APE_SHMEM_WR;
Michael Chanee6a99b2007-07-18 21:49:10 -07005863 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5864
Matt Carlson8a6eac92007-10-21 16:17:55 -07005865 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07005866
Matt Carlsonfcb389d2008-11-03 16:55:44 -08005867 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
5868 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5869 pcie_set_readrq(tp->pdev, 4096);
5870 else {
5871 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5872 tp->pci_cacheline_sz);
5873 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5874 tp->pci_lat_timer);
5875 }
Michael Chan114342f2007-10-15 02:12:26 -07005876 }
Matt Carlson5f5c51e2007-11-12 21:19:37 -08005877
Michael Chanee6a99b2007-07-18 21:49:10 -07005878 /* Make sure PCI-X relaxed ordering bit is clear. */
Matt Carlson9974a352007-10-07 23:27:28 -07005879 if (tp->pcix_cap) {
5880 u16 pcix_cmd;
5881
5882 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5883 &pcix_cmd);
5884 pcix_cmd &= ~PCI_X_CMD_ERO;
5885 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5886 pcix_cmd);
5887 }
Michael Chanee6a99b2007-07-18 21:49:10 -07005888
5889 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chanee6a99b2007-07-18 21:49:10 -07005890
5891 /* Chip reset on 5780 will reset MSI enable bit,
5892 * so need to restore it.
5893 */
5894 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5895 u16 ctrl;
5896
5897 pci_read_config_word(tp->pdev,
5898 tp->msi_cap + PCI_MSI_FLAGS,
5899 &ctrl);
5900 pci_write_config_word(tp->pdev,
5901 tp->msi_cap + PCI_MSI_FLAGS,
5902 ctrl | PCI_MSI_FLAGS_ENABLE);
5903 val = tr32(MSGINT_MODE);
5904 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5905 }
5906 }
5907}
5908
Linus Torvalds1da177e2005-04-16 15:20:36 -07005909static void tg3_stop_fw(struct tg3 *);
5910
5911/* tp->lock is held. */
5912static int tg3_chip_reset(struct tg3 *tp)
5913{
5914 u32 val;
Michael Chan1ee582d2005-08-09 20:16:46 -07005915 void (*write_op)(struct tg3 *, u32, u32);
Michael Chan7a6f4362006-09-27 16:03:31 -07005916 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005917
David S. Millerf49639e2006-06-09 11:58:36 -07005918 tg3_nvram_lock(tp);
5919
Matt Carlson158d7ab2008-05-29 01:37:54 -07005920 tg3_mdio_stop(tp);
5921
Matt Carlson77b483f2008-08-15 14:07:24 -07005922 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5923
David S. Millerf49639e2006-06-09 11:58:36 -07005924 /* No matching tg3_nvram_unlock() after this because
5925 * chip reset below will undo the nvram lock.
5926 */
5927 tp->nvram_lock_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005928
Michael Chanee6a99b2007-07-18 21:49:10 -07005929 /* GRC_MISC_CFG core clock reset will clear the memory
5930 * enable bit in PCI register 4 and the MSI enable bit
5931 * on some chips, so we save relevant registers here.
5932 */
5933 tg3_save_pci_state(tp);
5934
Michael Chand9ab5ad2006-03-20 22:27:35 -08005935 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -08005936 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07005937 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07005938 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -07005939 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5940 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chand9ab5ad2006-03-20 22:27:35 -08005941 tw32(GRC_FASTBOOT_PC, 0);
5942
Linus Torvalds1da177e2005-04-16 15:20:36 -07005943 /*
5944 * We must avoid the readl() that normally takes place.
5945 * It locks machines, causes machine checks, and other
5946 * fun things. So, temporarily disable the 5701
5947 * hardware workaround, while we do the reset.
5948 */
Michael Chan1ee582d2005-08-09 20:16:46 -07005949 write_op = tp->write32;
5950 if (write_op == tg3_write_flush_reg32)
5951 tp->write32 = tg3_write32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005952
Michael Chand18edcb2007-03-24 20:57:11 -07005953 /* Prevent the irq handler from reading or writing PCI registers
5954 * during chip reset when the memory enable bit in the PCI command
5955 * register may be cleared. The chip does not generate interrupt
5956 * at this time, but the irq handler may still be called due to irq
5957 * sharing or irqpoll.
5958 */
5959 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
Michael Chanb8fa2f32007-04-06 17:35:37 -07005960 if (tp->hw_status) {
5961 tp->hw_status->status = 0;
5962 tp->hw_status->status_tag = 0;
5963 }
Michael Chand18edcb2007-03-24 20:57:11 -07005964 tp->last_tag = 0;
5965 smp_mb();
5966 synchronize_irq(tp->pdev->irq);
5967
Linus Torvalds1da177e2005-04-16 15:20:36 -07005968 /* do the reset */
5969 val = GRC_MISC_CFG_CORECLK_RESET;
5970
5971 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5972 if (tr32(0x7e2c) == 0x60) {
5973 tw32(0x7e2c, 0x20);
5974 }
5975 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5976 tw32(GRC_MISC_CFG, (1 << 29));
5977 val |= (1 << 29);
5978 }
5979 }
5980
Michael Chanb5d37722006-09-27 16:06:21 -07005981 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5982 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5983 tw32(GRC_VCPU_EXT_CTRL,
5984 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5985 }
5986
Linus Torvalds1da177e2005-04-16 15:20:36 -07005987 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5988 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5989 tw32(GRC_MISC_CFG, val);
5990
Michael Chan1ee582d2005-08-09 20:16:46 -07005991 /* restore 5701 hardware bug workaround write method */
5992 tp->write32 = write_op;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005993
5994 /* Unfortunately, we have to delay before the PCI read back.
5995 * Some 575X chips even will not respond to a PCI cfg access
5996 * when the reset command is given to the chip.
5997 *
5998 * How do these hardware designers expect things to work
5999 * properly if the PCI write is posted for a long period
6000 * of time? It is always necessary to have some method by
6001 * which a register read back can occur to push the write
6002 * out which does the reset.
6003 *
6004 * For most tg3 variants the trick below was working.
6005 * Ho hum...
6006 */
6007 udelay(120);
6008
6009 /* Flush PCI posted writes. The normal MMIO registers
6010 * are inaccessible at this time so this is the only
6011 * way to make this reliably (actually, this is no longer
6012 * the case, see above). I tried to use indirect
6013 * register read/write but this upset some 5701 variants.
6014 */
6015 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6016
6017 udelay(120);
6018
6019 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6020 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6021 int i;
6022 u32 cfg_val;
6023
6024 /* Wait for link training to complete. */
6025 for (i = 0; i < 5000; i++)
6026 udelay(100);
6027
6028 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6029 pci_write_config_dword(tp->pdev, 0xc4,
6030 cfg_val | (1 << 15));
6031 }
Matt Carlsonfcb389d2008-11-03 16:55:44 -08006032 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
6033 /* Set PCIE max payload size and clear error status. */
6034 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006035 }
6036
Michael Chanee6a99b2007-07-18 21:49:10 -07006037 tg3_restore_pci_state(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006038
Michael Chand18edcb2007-03-24 20:57:11 -07006039 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6040
Michael Chanee6a99b2007-07-18 21:49:10 -07006041 val = 0;
6042 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan4cf78e42005-07-25 12:29:19 -07006043 val = tr32(MEMARB_MODE);
Michael Chanee6a99b2007-07-18 21:49:10 -07006044 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006045
6046 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6047 tg3_stop_fw(tp);
6048 tw32(0x5000, 0x400);
6049 }
6050
6051 tw32(GRC_MODE, tp->grc_mode);
6052
6053 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01006054 val = tr32(0xc4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006055
6056 tw32(0xc4, val | (1 << 15));
6057 }
6058
6059 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6060 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6061 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6062 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6063 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6064 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6065 }
6066
6067 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6068 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6069 tw32_f(MAC_MODE, tp->mac_mode);
Michael Chan747e8f82005-07-25 12:33:22 -07006070 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6071 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6072 tw32_f(MAC_MODE, tp->mac_mode);
Matt Carlson3bda1252008-08-15 14:08:22 -07006073 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6074 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6075 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6076 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6077 tw32_f(MAC_MODE, tp->mac_mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006078 } else
6079 tw32_f(MAC_MODE, 0);
6080 udelay(40);
6081
Matt Carlson158d7ab2008-05-29 01:37:54 -07006082 tg3_mdio_start(tp);
6083
Matt Carlson77b483f2008-08-15 14:07:24 -07006084 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6085
Michael Chan7a6f4362006-09-27 16:03:31 -07006086 err = tg3_poll_fw(tp);
6087 if (err)
6088 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006089
6090 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6091 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01006092 val = tr32(0x7c00);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006093
6094 tw32(0x7c00, val | (1 << 25));
6095 }
6096
6097 /* Reprobe ASF enable state. */
6098 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6099 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6100 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6101 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6102 u32 nic_cfg;
6103
6104 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6105 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6106 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
Matt Carlson4ba526c2008-08-15 14:10:04 -07006107 tp->last_event_jiffies = jiffies;
John W. Linvillecbf46852005-04-21 17:01:29 -07006108 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006109 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6110 }
6111 }
6112
6113 return 0;
6114}
6115
6116/* tp->lock is held. */
6117static void tg3_stop_fw(struct tg3 *tp)
6118{
Matt Carlson0d3031d2007-10-10 18:02:43 -07006119 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6120 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Matt Carlson7c5026a2008-05-02 16:49:29 -07006121 /* Wait for RX cpu to ACK the previous event. */
6122 tg3_wait_for_event_ack(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006123
6124 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
Matt Carlson4ba526c2008-08-15 14:10:04 -07006125
6126 tg3_generate_fw_event(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006127
Matt Carlson7c5026a2008-05-02 16:49:29 -07006128 /* Wait for RX cpu to ACK this event. */
6129 tg3_wait_for_event_ack(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006130 }
6131}
6132
6133/* tp->lock is held. */
Michael Chan944d9802005-05-29 14:57:48 -07006134static int tg3_halt(struct tg3 *tp, int kind, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006135{
6136 int err;
6137
6138 tg3_stop_fw(tp);
6139
Michael Chan944d9802005-05-29 14:57:48 -07006140 tg3_write_sig_pre_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006141
David S. Millerb3b7d6b2005-05-05 14:40:20 -07006142 tg3_abort_hw(tp, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006143 err = tg3_chip_reset(tp);
6144
Michael Chan944d9802005-05-29 14:57:48 -07006145 tg3_write_sig_legacy(tp, kind);
6146 tg3_write_sig_post_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006147
6148 if (err)
6149 return err;
6150
6151 return 0;
6152}
6153
6154#define TG3_FW_RELEASE_MAJOR 0x0
6155#define TG3_FW_RELASE_MINOR 0x0
6156#define TG3_FW_RELEASE_FIX 0x0
6157#define TG3_FW_START_ADDR 0x08000000
6158#define TG3_FW_TEXT_ADDR 0x08000000
6159#define TG3_FW_TEXT_LEN 0x9c0
6160#define TG3_FW_RODATA_ADDR 0x080009c0
6161#define TG3_FW_RODATA_LEN 0x60
6162#define TG3_FW_DATA_ADDR 0x08000a40
6163#define TG3_FW_DATA_LEN 0x20
6164#define TG3_FW_SBSS_ADDR 0x08000a60
6165#define TG3_FW_SBSS_LEN 0xc
6166#define TG3_FW_BSS_ADDR 0x08000a70
6167#define TG3_FW_BSS_LEN 0x10
6168
Andreas Mohr50da8592006-08-14 23:54:30 -07006169static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006170 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6171 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6172 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6173 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6174 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6175 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6176 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6177 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6178 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6179 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6180 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6181 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6182 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6183 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6184 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6185 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6186 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6187 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6188 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6189 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6190 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6191 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6192 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6193 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6194 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6195 0, 0, 0, 0, 0, 0,
6196 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6197 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6198 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6199 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6200 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6201 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6202 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6203 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6204 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6205 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6206 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6207 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6208 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6209 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6210 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6211 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6212 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6213 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6214 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6215 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6216 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6217 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6218 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6219 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6220 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6221 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6222 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6223 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6224 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6225 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6226 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6227 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6228 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6229 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6230 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6231 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6232 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6233 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6234 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6235 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6236 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6237 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6238 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6239 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6240 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6241 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6242 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6243 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6244 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6245 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6246 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6247 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6248 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6249 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6250 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6251 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6252 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6253 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6254 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6255 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6256 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6257 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6258 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6259 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6260 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6261};
6262
Andreas Mohr50da8592006-08-14 23:54:30 -07006263static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006264 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6265 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6266 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6267 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6268 0x00000000
6269};
6270
6271#if 0 /* All zeros, don't eat up space with it. */
6272u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6273 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6274 0x00000000, 0x00000000, 0x00000000, 0x00000000
6275};
6276#endif
6277
6278#define RX_CPU_SCRATCH_BASE 0x30000
6279#define RX_CPU_SCRATCH_SIZE 0x04000
6280#define TX_CPU_SCRATCH_BASE 0x34000
6281#define TX_CPU_SCRATCH_SIZE 0x04000
6282
6283/* tp->lock is held. */
6284static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6285{
6286 int i;
6287
Eric Sesterhenn5d9428d2006-04-02 13:52:48 +02006288 BUG_ON(offset == TX_CPU_BASE &&
6289 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006290
Michael Chanb5d37722006-09-27 16:06:21 -07006291 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6292 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6293
6294 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6295 return 0;
6296 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006297 if (offset == RX_CPU_BASE) {
6298 for (i = 0; i < 10000; i++) {
6299 tw32(offset + CPU_STATE, 0xffffffff);
6300 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6301 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6302 break;
6303 }
6304
6305 tw32(offset + CPU_STATE, 0xffffffff);
6306 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
6307 udelay(10);
6308 } else {
6309 for (i = 0; i < 10000; i++) {
6310 tw32(offset + CPU_STATE, 0xffffffff);
6311 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6312 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6313 break;
6314 }
6315 }
6316
6317 if (i >= 10000) {
6318 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6319 "and %s CPU\n",
6320 tp->dev->name,
6321 (offset == RX_CPU_BASE ? "RX" : "TX"));
6322 return -ENODEV;
6323 }
Michael Chanec41c7d2006-01-17 02:40:55 -08006324
6325 /* Clear firmware's nvram arbitration. */
6326 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6327 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006328 return 0;
6329}
6330
6331struct fw_info {
6332 unsigned int text_base;
6333 unsigned int text_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006334 const u32 *text_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006335 unsigned int rodata_base;
6336 unsigned int rodata_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006337 const u32 *rodata_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006338 unsigned int data_base;
6339 unsigned int data_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006340 const u32 *data_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006341};
6342
6343/* tp->lock is held. */
6344static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6345 int cpu_scratch_size, struct fw_info *info)
6346{
Michael Chanec41c7d2006-01-17 02:40:55 -08006347 int err, lock_err, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006348 void (*write_op)(struct tg3 *, u32, u32);
6349
6350 if (cpu_base == TX_CPU_BASE &&
6351 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6352 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6353 "TX cpu firmware on %s which is 5705.\n",
6354 tp->dev->name);
6355 return -EINVAL;
6356 }
6357
6358 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6359 write_op = tg3_write_mem;
6360 else
6361 write_op = tg3_write_indirect_reg32;
6362
Michael Chan1b628152005-05-29 14:59:49 -07006363 /* It is possible that bootcode is still loading at this point.
6364 * Get the nvram lock first before halting the cpu.
6365 */
Michael Chanec41c7d2006-01-17 02:40:55 -08006366 lock_err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006367 err = tg3_halt_cpu(tp, cpu_base);
Michael Chanec41c7d2006-01-17 02:40:55 -08006368 if (!lock_err)
6369 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006370 if (err)
6371 goto out;
6372
6373 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6374 write_op(tp, cpu_scratch_base + i, 0);
6375 tw32(cpu_base + CPU_STATE, 0xffffffff);
6376 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6377 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6378 write_op(tp, (cpu_scratch_base +
6379 (info->text_base & 0xffff) +
6380 (i * sizeof(u32))),
6381 (info->text_data ?
6382 info->text_data[i] : 0));
6383 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6384 write_op(tp, (cpu_scratch_base +
6385 (info->rodata_base & 0xffff) +
6386 (i * sizeof(u32))),
6387 (info->rodata_data ?
6388 info->rodata_data[i] : 0));
6389 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6390 write_op(tp, (cpu_scratch_base +
6391 (info->data_base & 0xffff) +
6392 (i * sizeof(u32))),
6393 (info->data_data ?
6394 info->data_data[i] : 0));
6395
6396 err = 0;
6397
6398out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006399 return err;
6400}
6401
6402/* tp->lock is held. */
6403static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6404{
6405 struct fw_info info;
6406 int err, i;
6407
6408 info.text_base = TG3_FW_TEXT_ADDR;
6409 info.text_len = TG3_FW_TEXT_LEN;
6410 info.text_data = &tg3FwText[0];
6411 info.rodata_base = TG3_FW_RODATA_ADDR;
6412 info.rodata_len = TG3_FW_RODATA_LEN;
6413 info.rodata_data = &tg3FwRodata[0];
6414 info.data_base = TG3_FW_DATA_ADDR;
6415 info.data_len = TG3_FW_DATA_LEN;
6416 info.data_data = NULL;
6417
6418 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6419 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6420 &info);
6421 if (err)
6422 return err;
6423
6424 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6425 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6426 &info);
6427 if (err)
6428 return err;
6429
6430 /* Now startup only the RX cpu. */
6431 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6432 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6433
6434 for (i = 0; i < 5; i++) {
6435 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6436 break;
6437 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6438 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
6439 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6440 udelay(1000);
6441 }
6442 if (i >= 5) {
6443 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6444 "to set RX CPU PC, is %08x should be %08x\n",
6445 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6446 TG3_FW_TEXT_ADDR);
6447 return -ENODEV;
6448 }
6449 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6450 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
6451
6452 return 0;
6453}
6454
Linus Torvalds1da177e2005-04-16 15:20:36 -07006455
6456#define TG3_TSO_FW_RELEASE_MAJOR 0x1
6457#define TG3_TSO_FW_RELASE_MINOR 0x6
6458#define TG3_TSO_FW_RELEASE_FIX 0x0
6459#define TG3_TSO_FW_START_ADDR 0x08000000
6460#define TG3_TSO_FW_TEXT_ADDR 0x08000000
6461#define TG3_TSO_FW_TEXT_LEN 0x1aa0
6462#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
6463#define TG3_TSO_FW_RODATA_LEN 0x60
6464#define TG3_TSO_FW_DATA_ADDR 0x08001b20
6465#define TG3_TSO_FW_DATA_LEN 0x30
6466#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
6467#define TG3_TSO_FW_SBSS_LEN 0x2c
6468#define TG3_TSO_FW_BSS_ADDR 0x08001b80
6469#define TG3_TSO_FW_BSS_LEN 0x894
6470
Andreas Mohr50da8592006-08-14 23:54:30 -07006471static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006472 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6473 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6474 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6475 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6476 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6477 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6478 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6479 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6480 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6481 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6482 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6483 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6484 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6485 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6486 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6487 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6488 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6489 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6490 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6491 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6492 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6493 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6494 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6495 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6496 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6497 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6498 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6499 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6500 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6501 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6502 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6503 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6504 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6505 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6506 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6507 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6508 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6509 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6510 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6511 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6512 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6513 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6514 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6515 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6516 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6517 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6518 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6519 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6520 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6521 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6522 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6523 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6524 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6525 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6526 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6527 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6528 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6529 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6530 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6531 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6532 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6533 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6534 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6535 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6536 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6537 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6538 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6539 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6540 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6541 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6542 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6543 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6544 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6545 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6546 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6547 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6548 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6549 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6550 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6551 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6552 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6553 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6554 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6555 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6556 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6557 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6558 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6559 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6560 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6561 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6562 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6563 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6564 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6565 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6566 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6567 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6568 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6569 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6570 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6571 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6572 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6573 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6574 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6575 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6576 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6577 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6578 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6579 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6580 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6581 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6582 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6583 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6584 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6585 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6586 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6587 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6588 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6589 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6590 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6591 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6592 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6593 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6594 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6595 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6596 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6597 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6598 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6599 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6600 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6601 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6602 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6603 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6604 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6605 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6606 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6607 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6608 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6609 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6610 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6611 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6612 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6613 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6614 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6615 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6616 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6617 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6618 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6619 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6620 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6621 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6622 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6623 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6624 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6625 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6626 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6627 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6628 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6629 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6630 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6631 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6632 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6633 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6634 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6635 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6636 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6637 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6638 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6639 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6640 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6641 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6642 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6643 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6644 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6645 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6646 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6647 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6648 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6649 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6650 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6651 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6652 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6653 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6654 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6655 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6656 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6657 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6658 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6659 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6660 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6661 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6662 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6663 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6664 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6665 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6666 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6667 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6668 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6669 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6670 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6671 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6672 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6673 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6674 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6675 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6676 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6677 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6678 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6679 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6680 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6681 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6682 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6683 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6684 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6685 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6686 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6687 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6688 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6689 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6690 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6691 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6692 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6693 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6694 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6695 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6696 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6697 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6698 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6699 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6700 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6701 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6702 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6703 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6704 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6705 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6706 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6707 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6708 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6709 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6710 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6711 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6712 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6713 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6714 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6715 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6716 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6717 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6718 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6719 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6720 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6721 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6722 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6723 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6724 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6725 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6726 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6727 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6728 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6729 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6730 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6731 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6732 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6733 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6734 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6735 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6736 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6737 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6738 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6739 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6740 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6741 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6742 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6743 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6744 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6745 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6746 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6747 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6748 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6749 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6750 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6751 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6752 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6753 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6754 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6755 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6756};
6757
Andreas Mohr50da8592006-08-14 23:54:30 -07006758static const u32 tg3TsoFwRodata[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006759 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6760 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6761 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6762 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6763 0x00000000,
6764};
6765
Andreas Mohr50da8592006-08-14 23:54:30 -07006766static const u32 tg3TsoFwData[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006767 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6768 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6769 0x00000000,
6770};
6771
6772/* 5705 needs a special version of the TSO firmware. */
6773#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
6774#define TG3_TSO5_FW_RELASE_MINOR 0x2
6775#define TG3_TSO5_FW_RELEASE_FIX 0x0
6776#define TG3_TSO5_FW_START_ADDR 0x00010000
6777#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
6778#define TG3_TSO5_FW_TEXT_LEN 0xe90
6779#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
6780#define TG3_TSO5_FW_RODATA_LEN 0x50
6781#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
6782#define TG3_TSO5_FW_DATA_LEN 0x20
6783#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
6784#define TG3_TSO5_FW_SBSS_LEN 0x28
6785#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
6786#define TG3_TSO5_FW_BSS_LEN 0x88
6787
Andreas Mohr50da8592006-08-14 23:54:30 -07006788static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006789 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6790 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6791 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6792 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6793 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6794 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6795 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6796 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6797 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6798 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6799 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6800 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6801 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6802 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6803 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6804 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6805 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6806 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6807 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6808 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6809 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6810 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6811 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6812 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6813 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6814 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6815 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6816 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6817 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6818 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6819 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6820 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6821 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6822 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6823 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6824 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6825 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6826 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6827 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6828 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6829 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6830 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6831 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6832 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6833 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6834 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6835 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6836 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6837 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6838 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6839 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6840 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6841 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6842 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6843 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6844 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6845 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6846 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6847 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6848 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6849 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6850 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6851 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6852 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6853 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6854 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6855 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6856 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6857 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6858 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6859 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6860 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6861 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6862 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6863 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6864 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6865 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6866 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6867 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6868 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6869 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6870 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6871 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6872 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6873 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6874 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6875 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6876 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6877 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6878 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6879 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6880 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6881 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6882 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6883 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6884 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6885 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6886 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6887 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6888 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6889 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6890 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6891 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6892 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6893 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6894 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6895 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6896 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6897 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6898 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6899 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6900 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6901 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6902 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6903 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6904 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6905 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6906 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6907 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6908 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6909 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6910 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6911 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6912 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6913 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6914 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6915 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6916 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6917 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6918 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6919 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6920 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6921 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6922 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6923 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6924 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6925 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6926 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6927 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6928 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6929 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6930 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6931 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6932 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6933 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6934 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6935 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6936 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6937 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6938 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6939 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6940 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6941 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6942 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6943 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6944 0x00000000, 0x00000000, 0x00000000,
6945};
6946
Andreas Mohr50da8592006-08-14 23:54:30 -07006947static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006948 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6949 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6950 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6951 0x00000000, 0x00000000, 0x00000000,
6952};
6953
Andreas Mohr50da8592006-08-14 23:54:30 -07006954static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006955 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6956 0x00000000, 0x00000000, 0x00000000,
6957};
6958
6959/* tp->lock is held. */
6960static int tg3_load_tso_firmware(struct tg3 *tp)
6961{
6962 struct fw_info info;
6963 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6964 int err, i;
6965
6966 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6967 return 0;
6968
6969 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6970 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6971 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6972 info.text_data = &tg3Tso5FwText[0];
6973 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6974 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6975 info.rodata_data = &tg3Tso5FwRodata[0];
6976 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6977 info.data_len = TG3_TSO5_FW_DATA_LEN;
6978 info.data_data = &tg3Tso5FwData[0];
6979 cpu_base = RX_CPU_BASE;
6980 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6981 cpu_scratch_size = (info.text_len +
6982 info.rodata_len +
6983 info.data_len +
6984 TG3_TSO5_FW_SBSS_LEN +
6985 TG3_TSO5_FW_BSS_LEN);
6986 } else {
6987 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6988 info.text_len = TG3_TSO_FW_TEXT_LEN;
6989 info.text_data = &tg3TsoFwText[0];
6990 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6991 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6992 info.rodata_data = &tg3TsoFwRodata[0];
6993 info.data_base = TG3_TSO_FW_DATA_ADDR;
6994 info.data_len = TG3_TSO_FW_DATA_LEN;
6995 info.data_data = &tg3TsoFwData[0];
6996 cpu_base = TX_CPU_BASE;
6997 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6998 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6999 }
7000
7001 err = tg3_load_firmware_cpu(tp, cpu_base,
7002 cpu_scratch_base, cpu_scratch_size,
7003 &info);
7004 if (err)
7005 return err;
7006
7007 /* Now startup the cpu. */
7008 tw32(cpu_base + CPU_STATE, 0xffffffff);
7009 tw32_f(cpu_base + CPU_PC, info.text_base);
7010
7011 for (i = 0; i < 5; i++) {
7012 if (tr32(cpu_base + CPU_PC) == info.text_base)
7013 break;
7014 tw32(cpu_base + CPU_STATE, 0xffffffff);
7015 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7016 tw32_f(cpu_base + CPU_PC, info.text_base);
7017 udelay(1000);
7018 }
7019 if (i >= 5) {
7020 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
7021 "to set CPU PC, is %08x should be %08x\n",
7022 tp->dev->name, tr32(cpu_base + CPU_PC),
7023 info.text_base);
7024 return -ENODEV;
7025 }
7026 tw32(cpu_base + CPU_STATE, 0xffffffff);
7027 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7028 return 0;
7029}
7030
Linus Torvalds1da177e2005-04-16 15:20:36 -07007031
Linus Torvalds1da177e2005-04-16 15:20:36 -07007032static int tg3_set_mac_addr(struct net_device *dev, void *p)
7033{
7034 struct tg3 *tp = netdev_priv(dev);
7035 struct sockaddr *addr = p;
Michael Chan986e0ae2007-05-05 12:10:20 -07007036 int err = 0, skip_mac_1 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007037
Michael Chanf9804dd2005-09-27 12:13:10 -07007038 if (!is_valid_ether_addr(addr->sa_data))
7039 return -EINVAL;
7040
Linus Torvalds1da177e2005-04-16 15:20:36 -07007041 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7042
Michael Chane75f7c92006-03-20 21:33:26 -08007043 if (!netif_running(dev))
7044 return 0;
7045
Michael Chan58712ef2006-04-29 18:58:01 -07007046 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
Michael Chan986e0ae2007-05-05 12:10:20 -07007047 u32 addr0_high, addr0_low, addr1_high, addr1_low;
Michael Chan58712ef2006-04-29 18:58:01 -07007048
Michael Chan986e0ae2007-05-05 12:10:20 -07007049 addr0_high = tr32(MAC_ADDR_0_HIGH);
7050 addr0_low = tr32(MAC_ADDR_0_LOW);
7051 addr1_high = tr32(MAC_ADDR_1_HIGH);
7052 addr1_low = tr32(MAC_ADDR_1_LOW);
7053
7054 /* Skip MAC addr 1 if ASF is using it. */
7055 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7056 !(addr1_high == 0 && addr1_low == 0))
7057 skip_mac_1 = 1;
Michael Chan58712ef2006-04-29 18:58:01 -07007058 }
Michael Chan986e0ae2007-05-05 12:10:20 -07007059 spin_lock_bh(&tp->lock);
7060 __tg3_set_mac_addr(tp, skip_mac_1);
7061 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007062
Michael Chanb9ec6c12006-07-25 16:37:27 -07007063 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007064}
7065
7066/* tp->lock is held. */
7067static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7068 dma_addr_t mapping, u32 maxlen_flags,
7069 u32 nic_addr)
7070{
7071 tg3_write_mem(tp,
7072 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7073 ((u64) mapping >> 32));
7074 tg3_write_mem(tp,
7075 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7076 ((u64) mapping & 0xffffffff));
7077 tg3_write_mem(tp,
7078 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7079 maxlen_flags);
7080
7081 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7082 tg3_write_mem(tp,
7083 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7084 nic_addr);
7085}
7086
7087static void __tg3_set_rx_mode(struct net_device *);
Michael Chand244c892005-07-05 14:42:33 -07007088static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
David S. Miller15f98502005-05-18 22:49:26 -07007089{
7090 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7091 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7092 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7093 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7094 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7095 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7096 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7097 }
7098 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7099 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7100 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7101 u32 val = ec->stats_block_coalesce_usecs;
7102
7103 if (!netif_carrier_ok(tp->dev))
7104 val = 0;
7105
7106 tw32(HOSTCC_STAT_COAL_TICKS, val);
7107 }
7108}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007109
7110/* tp->lock is held. */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007111static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007112{
7113 u32 val, rdmac_mode;
7114 int i, err, limit;
7115
7116 tg3_disable_ints(tp);
7117
7118 tg3_stop_fw(tp);
7119
7120 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7121
7122 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
Michael Chane6de8ad2005-05-05 14:42:41 -07007123 tg3_abort_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007124 }
7125
Matt Carlsondd477002008-05-25 23:45:58 -07007126 if (reset_phy &&
7127 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
Michael Chand4d2c552006-03-20 17:47:20 -08007128 tg3_phy_reset(tp);
7129
Linus Torvalds1da177e2005-04-16 15:20:36 -07007130 err = tg3_chip_reset(tp);
7131 if (err)
7132 return err;
7133
7134 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7135
Matt Carlsonbcb37f62008-11-03 16:52:09 -08007136 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07007137 val = tr32(TG3_CPMU_CTRL);
7138 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7139 tw32(TG3_CPMU_CTRL, val);
Matt Carlson9acb9612007-11-12 21:10:06 -08007140
7141 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7142 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7143 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7144 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7145
7146 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7147 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7148 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7149 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7150
7151 val = tr32(TG3_CPMU_HST_ACC);
7152 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7153 val |= CPMU_HST_ACC_MACCLK_6_25;
7154 tw32(TG3_CPMU_HST_ACC, val);
Matt Carlsond30cdd22007-10-07 23:28:35 -07007155 }
7156
Linus Torvalds1da177e2005-04-16 15:20:36 -07007157 /* This works around an issue with Athlon chipsets on
7158 * B3 tigon3 silicon. This bit has no effect on any
7159 * other revision. But do not set this on PCI Express
Matt Carlson795d01c2007-10-07 23:28:17 -07007160 * chips and don't even touch the clocks if the CPMU is present.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007161 */
Matt Carlson795d01c2007-10-07 23:28:17 -07007162 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7163 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7164 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7165 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7166 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007167
7168 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7169 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7170 val = tr32(TG3PCI_PCISTATE);
7171 val |= PCISTATE_RETRY_SAME_DMA;
7172 tw32(TG3PCI_PCISTATE, val);
7173 }
7174
Matt Carlson0d3031d2007-10-10 18:02:43 -07007175 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7176 /* Allow reads and writes to the
7177 * APE register and memory space.
7178 */
7179 val = tr32(TG3PCI_PCISTATE);
7180 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7181 PCISTATE_ALLOW_APE_SHMEM_WR;
7182 tw32(TG3PCI_PCISTATE, val);
7183 }
7184
Linus Torvalds1da177e2005-04-16 15:20:36 -07007185 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7186 /* Enable some hw fixes. */
7187 val = tr32(TG3PCI_MSI_DATA);
7188 val |= (1 << 26) | (1 << 28) | (1 << 29);
7189 tw32(TG3PCI_MSI_DATA, val);
7190 }
7191
7192 /* Descriptor ring init may make accesses to the
7193 * NIC SRAM area to setup the TX descriptors, so we
7194 * can only do this after the hardware has been
7195 * successfully reset.
7196 */
Michael Chan32d8c572006-07-25 16:38:29 -07007197 err = tg3_init_rings(tp);
7198 if (err)
7199 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007200
Matt Carlson9936bcf2007-10-10 18:03:07 -07007201 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
Matt Carlsonfcb389d2008-11-03 16:55:44 -08007202 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07007203 /* This value is determined during the probe time DMA
7204 * engine test, tg3_test_dma.
7205 */
7206 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7207 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007208
7209 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7210 GRC_MODE_4X_NIC_SEND_RINGS |
7211 GRC_MODE_NO_TX_PHDR_CSUM |
7212 GRC_MODE_NO_RX_PHDR_CSUM);
7213 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
Michael Chand2d746f2006-04-06 21:45:39 -07007214
7215 /* Pseudo-header checksum is done by hardware logic and not
7216 * the offload processers, so make the chip do the pseudo-
7217 * header checksums on receive. For transmit it is more
7218 * convenient to do the pseudo-header checksum in software
7219 * as Linux does that on transmit for us in all cases.
7220 */
7221 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007222
7223 tw32(GRC_MODE,
7224 tp->grc_mode |
7225 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7226
7227 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7228 val = tr32(GRC_MISC_CFG);
7229 val &= ~0xff;
7230 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7231 tw32(GRC_MISC_CFG, val);
7232
7233 /* Initialize MBUF/DESC pool. */
John W. Linvillecbf46852005-04-21 17:01:29 -07007234 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007235 /* Do nothing. */
7236 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7237 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7238 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7239 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7240 else
7241 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7242 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7243 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7244 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007245 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7246 int fw_len;
7247
7248 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7249 TG3_TSO5_FW_RODATA_LEN +
7250 TG3_TSO5_FW_DATA_LEN +
7251 TG3_TSO5_FW_SBSS_LEN +
7252 TG3_TSO5_FW_BSS_LEN);
7253 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7254 tw32(BUFMGR_MB_POOL_ADDR,
7255 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7256 tw32(BUFMGR_MB_POOL_SIZE,
7257 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7258 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007259
Michael Chan0f893dc2005-07-25 12:30:38 -07007260 if (tp->dev->mtu <= ETH_DATA_LEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007261 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7262 tp->bufmgr_config.mbuf_read_dma_low_water);
7263 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7264 tp->bufmgr_config.mbuf_mac_rx_low_water);
7265 tw32(BUFMGR_MB_HIGH_WATER,
7266 tp->bufmgr_config.mbuf_high_water);
7267 } else {
7268 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7269 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7270 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7271 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7272 tw32(BUFMGR_MB_HIGH_WATER,
7273 tp->bufmgr_config.mbuf_high_water_jumbo);
7274 }
7275 tw32(BUFMGR_DMA_LOW_WATER,
7276 tp->bufmgr_config.dma_low_water);
7277 tw32(BUFMGR_DMA_HIGH_WATER,
7278 tp->bufmgr_config.dma_high_water);
7279
7280 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7281 for (i = 0; i < 2000; i++) {
7282 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7283 break;
7284 udelay(10);
7285 }
7286 if (i >= 2000) {
7287 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7288 tp->dev->name);
7289 return -ENODEV;
7290 }
7291
7292 /* Setup replenish threshold. */
Michael Chanf92905d2006-06-29 20:14:29 -07007293 val = tp->rx_pending / 8;
7294 if (val == 0)
7295 val = 1;
7296 else if (val > tp->rx_std_max_post)
7297 val = tp->rx_std_max_post;
Michael Chanb5d37722006-09-27 16:06:21 -07007298 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7299 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7300 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7301
7302 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7303 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7304 }
Michael Chanf92905d2006-06-29 20:14:29 -07007305
7306 tw32(RCVBDI_STD_THRESH, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007307
7308 /* Initialize TG3_BDINFO's at:
7309 * RCVDBDI_STD_BD: standard eth size rx ring
7310 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7311 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7312 *
7313 * like so:
7314 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7315 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7316 * ring attribute flags
7317 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7318 *
7319 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7320 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7321 *
7322 * The size of each ring is fixed in the firmware, but the location is
7323 * configurable.
7324 */
7325 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7326 ((u64) tp->rx_std_mapping >> 32));
7327 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7328 ((u64) tp->rx_std_mapping & 0xffffffff));
7329 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7330 NIC_SRAM_RX_BUFFER_DESC);
7331
7332 /* Don't even try to program the JUMBO/MINI buffer descriptor
7333 * configs on 5705.
7334 */
7335 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7336 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7337 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7338 } else {
7339 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7340 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7341
7342 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7343 BDINFO_FLAGS_DISABLED);
7344
7345 /* Setup replenish threshold. */
7346 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7347
Michael Chan0f893dc2005-07-25 12:30:38 -07007348 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007349 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7350 ((u64) tp->rx_jumbo_mapping >> 32));
7351 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7352 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7353 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7354 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7355 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7356 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7357 } else {
7358 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7359 BDINFO_FLAGS_DISABLED);
7360 }
7361
7362 }
7363
7364 /* There is only one send ring on 5705/5750, no need to explicitly
7365 * disable the others.
7366 */
7367 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7368 /* Clear out send RCB ring in SRAM. */
7369 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7370 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7371 BDINFO_FLAGS_DISABLED);
7372 }
7373
7374 tp->tx_prod = 0;
7375 tp->tx_cons = 0;
7376 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7377 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7378
7379 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7380 tp->tx_desc_mapping,
7381 (TG3_TX_RING_SIZE <<
7382 BDINFO_FLAGS_MAXLEN_SHIFT),
7383 NIC_SRAM_TX_BUFFER_DESC);
7384
7385 /* There is only one receive return ring on 5705/5750, no need
7386 * to explicitly disable the others.
7387 */
7388 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7389 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7390 i += TG3_BDINFO_SIZE) {
7391 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7392 BDINFO_FLAGS_DISABLED);
7393 }
7394 }
7395
7396 tp->rx_rcb_ptr = 0;
7397 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7398
7399 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7400 tp->rx_rcb_mapping,
7401 (TG3_RX_RCB_RING_SIZE(tp) <<
7402 BDINFO_FLAGS_MAXLEN_SHIFT),
7403 0);
7404
7405 tp->rx_std_ptr = tp->rx_pending;
7406 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7407 tp->rx_std_ptr);
7408
Michael Chan0f893dc2005-07-25 12:30:38 -07007409 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
Linus Torvalds1da177e2005-04-16 15:20:36 -07007410 tp->rx_jumbo_pending : 0;
7411 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7412 tp->rx_jumbo_ptr);
7413
7414 /* Initialize MAC address and backoff seed. */
Michael Chan986e0ae2007-05-05 12:10:20 -07007415 __tg3_set_mac_addr(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007416
7417 /* MTU + ethernet header + FCS + optional VLAN tag */
7418 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7419
7420 /* The slot time is changed by tg3_setup_phy if we
7421 * run at gigabit with half duplex.
7422 */
7423 tw32(MAC_TX_LENGTHS,
7424 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7425 (6 << TX_LENGTHS_IPG_SHIFT) |
7426 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7427
7428 /* Receive rules. */
7429 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7430 tw32(RCVLPC_CONFIG, 0x0181);
7431
7432 /* Calculate RDMAC_MODE setting early, we need it to determine
7433 * the RCVLPC_STATE_ENABLE mask.
7434 */
7435 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7436 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7437 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7438 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7439 RDMAC_MODE_LNGREAD_ENAB);
Michael Chan85e94ce2005-04-21 17:05:28 -07007440
Matt Carlson57e69832008-05-25 23:48:31 -07007441 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7442 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsond30cdd22007-10-07 23:28:35 -07007443 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7444 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7445 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7446
Michael Chan85e94ce2005-04-21 17:05:28 -07007447 /* If statement applies to 5705 and 5750 PCI devices only */
7448 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7449 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7450 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007451 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
Matt Carlsonc13e3712007-05-05 11:50:04 -07007452 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007453 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7454 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7455 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7456 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7457 }
7458 }
7459
Michael Chan85e94ce2005-04-21 17:05:28 -07007460 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7461 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7462
Linus Torvalds1da177e2005-04-16 15:20:36 -07007463 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7464 rdmac_mode |= (1 << 27);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007465
7466 /* Receive/send statistics. */
Michael Chan16613942006-06-29 20:15:13 -07007467 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7468 val = tr32(RCVLPC_STATS_ENABLE);
7469 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7470 tw32(RCVLPC_STATS_ENABLE, val);
7471 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7472 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007473 val = tr32(RCVLPC_STATS_ENABLE);
7474 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7475 tw32(RCVLPC_STATS_ENABLE, val);
7476 } else {
7477 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7478 }
7479 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7480 tw32(SNDDATAI_STATSENAB, 0xffffff);
7481 tw32(SNDDATAI_STATSCTRL,
7482 (SNDDATAI_SCTRL_ENABLE |
7483 SNDDATAI_SCTRL_FASTUPD));
7484
7485 /* Setup host coalescing engine. */
7486 tw32(HOSTCC_MODE, 0);
7487 for (i = 0; i < 2000; i++) {
7488 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7489 break;
7490 udelay(10);
7491 }
7492
Michael Chand244c892005-07-05 14:42:33 -07007493 __tg3_set_coalesce(tp, &tp->coal);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007494
7495 /* set status block DMA address */
7496 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7497 ((u64) tp->status_mapping >> 32));
7498 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7499 ((u64) tp->status_mapping & 0xffffffff));
7500
7501 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7502 /* Status/statistics block address. See tg3_timer,
7503 * the tg3_periodic_fetch_stats call there, and
7504 * tg3_get_stats to see how this works for 5705/5750 chips.
7505 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007506 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7507 ((u64) tp->stats_mapping >> 32));
7508 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7509 ((u64) tp->stats_mapping & 0xffffffff));
7510 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7511 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7512 }
7513
7514 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7515
7516 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7517 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7518 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7519 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7520
7521 /* Clear statistics/status block in chip, and status block in ram. */
7522 for (i = NIC_SRAM_STATS_BLK;
7523 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7524 i += sizeof(u32)) {
7525 tg3_write_mem(tp, i, 0);
7526 udelay(40);
7527 }
7528 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7529
Michael Chanc94e3942005-09-27 12:12:42 -07007530 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7531 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7532 /* reset to prevent losing 1st rx packet intermittently */
7533 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7534 udelay(10);
7535 }
7536
Matt Carlson3bda1252008-08-15 14:08:22 -07007537 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7538 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7539 else
7540 tp->mac_mode = 0;
7541 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
Linus Torvalds1da177e2005-04-16 15:20:36 -07007542 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07007543 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7544 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7545 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7546 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007547 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7548 udelay(40);
7549
Michael Chan314fba32005-04-21 17:07:04 -07007550 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
Michael Chan9d26e212006-12-07 00:21:14 -08007551 * If TG3_FLG2_IS_NIC is zero, we should read the
Michael Chan314fba32005-04-21 17:07:04 -07007552 * register to preserve the GPIO settings for LOMs. The GPIOs,
7553 * whether used as inputs or outputs, are set by boot code after
7554 * reset.
7555 */
Michael Chan9d26e212006-12-07 00:21:14 -08007556 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
Michael Chan314fba32005-04-21 17:07:04 -07007557 u32 gpio_mask;
7558
Michael Chan9d26e212006-12-07 00:21:14 -08007559 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7560 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7561 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chan3e7d83b2005-04-21 17:10:36 -07007562
7563 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7564 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7565 GRC_LCLCTRL_GPIO_OUTPUT3;
7566
Michael Chanaf36e6b2006-03-23 01:28:06 -08007567 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7568 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7569
Gary Zambranoaaf84462007-05-05 11:51:45 -07007570 tp->grc_local_ctrl &= ~gpio_mask;
Michael Chan314fba32005-04-21 17:07:04 -07007571 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7572
7573 /* GPIO1 must be driven high for eeprom write protect */
Michael Chan9d26e212006-12-07 00:21:14 -08007574 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7575 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7576 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan314fba32005-04-21 17:07:04 -07007577 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007578 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7579 udelay(100);
7580
Michael Chan09ee9292005-08-09 20:17:00 -07007581 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
David S. Millerfac9b832005-05-18 22:46:34 -07007582 tp->last_tag = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007583
7584 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7585 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7586 udelay(40);
7587 }
7588
7589 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7590 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7591 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7592 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7593 WDMAC_MODE_LNGREAD_ENAB);
7594
Michael Chan85e94ce2005-04-21 17:05:28 -07007595 /* If statement applies to 5705 and 5750 PCI devices only */
7596 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7597 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7598 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007599 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7600 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7601 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7602 /* nothing */
7603 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7604 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7605 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7606 val |= WDMAC_MODE_RX_ACCEL;
7607 }
7608 }
7609
Michael Chand9ab5ad2006-03-20 22:27:35 -08007610 /* Enable host coalescing bug fix */
Michael Chanaf36e6b2006-03-23 01:28:06 -08007611 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07007612 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07007613 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
Matt Carlson57e69832008-05-25 23:48:31 -07007614 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7615 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
Matt Carlsonf51f3562008-05-25 23:45:08 -07007616 val |= WDMAC_MODE_STATUS_TAG_FIX;
Michael Chand9ab5ad2006-03-20 22:27:35 -08007617
Linus Torvalds1da177e2005-04-16 15:20:36 -07007618 tw32_f(WDMAC_MODE, val);
7619 udelay(40);
7620
Matt Carlson9974a352007-10-07 23:27:28 -07007621 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7622 u16 pcix_cmd;
7623
7624 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7625 &pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007626 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
Matt Carlson9974a352007-10-07 23:27:28 -07007627 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7628 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007629 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
Matt Carlson9974a352007-10-07 23:27:28 -07007630 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7631 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007632 }
Matt Carlson9974a352007-10-07 23:27:28 -07007633 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7634 pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007635 }
7636
7637 tw32_f(RDMAC_MODE, rdmac_mode);
7638 udelay(40);
7639
7640 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7641 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7642 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
Matt Carlson9936bcf2007-10-10 18:03:07 -07007643
7644 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7645 tw32(SNDDATAC_MODE,
7646 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7647 else
7648 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7649
Linus Torvalds1da177e2005-04-16 15:20:36 -07007650 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7651 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7652 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7653 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007654 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7655 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007656 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7657 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7658
7659 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7660 err = tg3_load_5701_a0_firmware_fix(tp);
7661 if (err)
7662 return err;
7663 }
7664
Linus Torvalds1da177e2005-04-16 15:20:36 -07007665 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7666 err = tg3_load_tso_firmware(tp);
7667 if (err)
7668 return err;
7669 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007670
7671 tp->tx_mode = TX_MODE_ENABLE;
7672 tw32_f(MAC_TX_MODE, tp->tx_mode);
7673 udelay(100);
7674
7675 tp->rx_mode = RX_MODE_ENABLE;
Matt Carlson9936bcf2007-10-10 18:03:07 -07007676 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlson57e69832008-05-25 23:48:31 -07007677 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7678 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7679 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chanaf36e6b2006-03-23 01:28:06 -08007680 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7681
Linus Torvalds1da177e2005-04-16 15:20:36 -07007682 tw32_f(MAC_RX_MODE, tp->rx_mode);
7683 udelay(10);
7684
Linus Torvalds1da177e2005-04-16 15:20:36 -07007685 tw32(MAC_LED_CTRL, tp->led_ctrl);
7686
7687 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
Michael Chanc94e3942005-09-27 12:12:42 -07007688 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007689 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7690 udelay(10);
7691 }
7692 tw32_f(MAC_RX_MODE, tp->rx_mode);
7693 udelay(10);
7694
7695 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7696 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7697 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7698 /* Set drive transmission level to 1.2V */
7699 /* only if the signal pre-emphasis bit is not set */
7700 val = tr32(MAC_SERDES_CFG);
7701 val &= 0xfffff000;
7702 val |= 0x880;
7703 tw32(MAC_SERDES_CFG, val);
7704 }
7705 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7706 tw32(MAC_SERDES_CFG, 0x616000);
7707 }
7708
7709 /* Prevent chip from dropping frames when flow control
7710 * is enabled.
7711 */
7712 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7713
7714 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7715 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7716 /* Use hardware link auto-negotiation */
7717 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7718 }
7719
Michael Chand4d2c552006-03-20 17:47:20 -08007720 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7721 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7722 u32 tmp;
7723
7724 tmp = tr32(SERDES_RX_CTRL);
7725 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7726 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7727 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7728 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7729 }
7730
Matt Carlsondd477002008-05-25 23:45:58 -07007731 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7732 if (tp->link_config.phy_is_low_power) {
7733 tp->link_config.phy_is_low_power = 0;
7734 tp->link_config.speed = tp->link_config.orig_speed;
7735 tp->link_config.duplex = tp->link_config.orig_duplex;
7736 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7737 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007738
Matt Carlsondd477002008-05-25 23:45:58 -07007739 err = tg3_setup_phy(tp, 0);
7740 if (err)
7741 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007742
Matt Carlsondd477002008-05-25 23:45:58 -07007743 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7744 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7745 u32 tmp;
7746
7747 /* Clear CRC stats. */
7748 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7749 tg3_writephy(tp, MII_TG3_TEST1,
7750 tmp | MII_TG3_TEST1_CRC_EN);
7751 tg3_readphy(tp, 0x14, &tmp);
7752 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007753 }
7754 }
7755
7756 __tg3_set_rx_mode(tp->dev);
7757
7758 /* Initialize receive rules. */
7759 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7760 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7761 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7762 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7763
Michael Chan4cf78e42005-07-25 12:29:19 -07007764 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
Michael Chana4e2b342005-10-26 15:46:52 -07007765 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007766 limit = 8;
7767 else
7768 limit = 16;
7769 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7770 limit -= 4;
7771 switch (limit) {
7772 case 16:
7773 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7774 case 15:
7775 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7776 case 14:
7777 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7778 case 13:
7779 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7780 case 12:
7781 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7782 case 11:
7783 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7784 case 10:
7785 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7786 case 9:
7787 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7788 case 8:
7789 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7790 case 7:
7791 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7792 case 6:
7793 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7794 case 5:
7795 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7796 case 4:
7797 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7798 case 3:
7799 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7800 case 2:
7801 case 1:
7802
7803 default:
7804 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07007805 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007806
Matt Carlson9ce768e2007-10-11 19:49:11 -07007807 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7808 /* Write our heartbeat update interval to APE. */
7809 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7810 APE_HOST_HEARTBEAT_INT_DISABLE);
Matt Carlson0d3031d2007-10-10 18:02:43 -07007811
Linus Torvalds1da177e2005-04-16 15:20:36 -07007812 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7813
Linus Torvalds1da177e2005-04-16 15:20:36 -07007814 return 0;
7815}
7816
7817/* Called at device open time to get the chip ready for
7818 * packet processing. Invoked with tp->lock held.
7819 */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007820static int tg3_init_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007821{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007822 tg3_switch_clocks(tp);
7823
7824 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7825
Matt Carlson2f751b62008-08-04 23:17:34 -07007826 return tg3_reset_hw(tp, reset_phy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007827}
7828
7829#define TG3_STAT_ADD32(PSTAT, REG) \
7830do { u32 __val = tr32(REG); \
7831 (PSTAT)->low += __val; \
7832 if ((PSTAT)->low < __val) \
7833 (PSTAT)->high += 1; \
7834} while (0)
7835
7836static void tg3_periodic_fetch_stats(struct tg3 *tp)
7837{
7838 struct tg3_hw_stats *sp = tp->hw_stats;
7839
7840 if (!netif_carrier_ok(tp->dev))
7841 return;
7842
7843 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7844 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7845 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7846 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7847 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7848 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7849 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7850 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7851 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7852 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7853 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7854 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7855 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7856
7857 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7858 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7859 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7860 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7861 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7862 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7863 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7864 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7865 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7866 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7867 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7868 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7869 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7870 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
Michael Chan463d3052006-05-22 16:36:27 -07007871
7872 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7873 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7874 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007875}
7876
7877static void tg3_timer(unsigned long __opaque)
7878{
7879 struct tg3 *tp = (struct tg3 *) __opaque;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007880
Michael Chanf475f162006-03-27 23:20:14 -08007881 if (tp->irq_sync)
7882 goto restart_timer;
7883
David S. Millerf47c11e2005-06-24 20:18:35 -07007884 spin_lock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007885
David S. Millerfac9b832005-05-18 22:46:34 -07007886 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7887 /* All of this garbage is because when using non-tagged
7888 * IRQ status the mailbox/status_block protocol the chip
7889 * uses with the cpu is race prone.
7890 */
7891 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7892 tw32(GRC_LOCAL_CTRL,
7893 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7894 } else {
7895 tw32(HOSTCC_MODE, tp->coalesce_mode |
7896 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7897 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007898
David S. Millerfac9b832005-05-18 22:46:34 -07007899 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7900 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
David S. Millerf47c11e2005-06-24 20:18:35 -07007901 spin_unlock(&tp->lock);
David S. Millerfac9b832005-05-18 22:46:34 -07007902 schedule_work(&tp->reset_task);
7903 return;
7904 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007905 }
7906
Linus Torvalds1da177e2005-04-16 15:20:36 -07007907 /* This part only runs once per second. */
7908 if (!--tp->timer_counter) {
David S. Millerfac9b832005-05-18 22:46:34 -07007909 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7910 tg3_periodic_fetch_stats(tp);
7911
Linus Torvalds1da177e2005-04-16 15:20:36 -07007912 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7913 u32 mac_stat;
7914 int phy_event;
7915
7916 mac_stat = tr32(MAC_STATUS);
7917
7918 phy_event = 0;
7919 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7920 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7921 phy_event = 1;
7922 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7923 phy_event = 1;
7924
7925 if (phy_event)
7926 tg3_setup_phy(tp, 0);
7927 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7928 u32 mac_stat = tr32(MAC_STATUS);
7929 int need_setup = 0;
7930
7931 if (netif_carrier_ok(tp->dev) &&
7932 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7933 need_setup = 1;
7934 }
7935 if (! netif_carrier_ok(tp->dev) &&
7936 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7937 MAC_STATUS_SIGNAL_DET))) {
7938 need_setup = 1;
7939 }
7940 if (need_setup) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07007941 if (!tp->serdes_counter) {
7942 tw32_f(MAC_MODE,
7943 (tp->mac_mode &
7944 ~MAC_MODE_PORT_MODE_MASK));
7945 udelay(40);
7946 tw32_f(MAC_MODE, tp->mac_mode);
7947 udelay(40);
7948 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007949 tg3_setup_phy(tp, 0);
7950 }
Michael Chan747e8f82005-07-25 12:33:22 -07007951 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7952 tg3_serdes_parallel_detect(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007953
7954 tp->timer_counter = tp->timer_multiplier;
7955 }
7956
Michael Chan130b8e42006-09-27 16:00:40 -07007957 /* Heartbeat is only sent once every 2 seconds.
7958 *
7959 * The heartbeat is to tell the ASF firmware that the host
7960 * driver is still alive. In the event that the OS crashes,
7961 * ASF needs to reset the hardware to free up the FIFO space
7962 * that may be filled with rx packets destined for the host.
7963 * If the FIFO is full, ASF will no longer function properly.
7964 *
7965 * Unintended resets have been reported on real time kernels
7966 * where the timer doesn't run on time. Netpoll will also have
7967 * same problem.
7968 *
7969 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7970 * to check the ring condition when the heartbeat is expiring
7971 * before doing the reset. This will prevent most unintended
7972 * resets.
7973 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007974 if (!--tp->asf_counter) {
Matt Carlsonbc7959b2008-08-15 14:08:55 -07007975 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7976 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Matt Carlson7c5026a2008-05-02 16:49:29 -07007977 tg3_wait_for_event_ack(tp);
7978
Michael Chanbbadf502006-04-06 21:46:34 -07007979 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
Michael Chan130b8e42006-09-27 16:00:40 -07007980 FWCMD_NICDRV_ALIVE3);
Michael Chanbbadf502006-04-06 21:46:34 -07007981 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
Michael Chan28fbef72005-10-26 15:48:35 -07007982 /* 5 seconds timeout */
Michael Chanbbadf502006-04-06 21:46:34 -07007983 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
Matt Carlson4ba526c2008-08-15 14:10:04 -07007984
7985 tg3_generate_fw_event(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007986 }
7987 tp->asf_counter = tp->asf_multiplier;
7988 }
7989
David S. Millerf47c11e2005-06-24 20:18:35 -07007990 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007991
Michael Chanf475f162006-03-27 23:20:14 -08007992restart_timer:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007993 tp->timer.expires = jiffies + tp->timer_offset;
7994 add_timer(&tp->timer);
7995}
7996
Adrian Bunk81789ef2006-03-20 23:00:14 -08007997static int tg3_request_irq(struct tg3 *tp)
Michael Chanfcfa0a32006-03-20 22:28:41 -08007998{
David Howells7d12e782006-10-05 14:55:46 +01007999 irq_handler_t fn;
Michael Chanfcfa0a32006-03-20 22:28:41 -08008000 unsigned long flags;
8001 struct net_device *dev = tp->dev;
8002
8003 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8004 fn = tg3_msi;
8005 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8006 fn = tg3_msi_1shot;
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07008007 flags = IRQF_SAMPLE_RANDOM;
Michael Chanfcfa0a32006-03-20 22:28:41 -08008008 } else {
8009 fn = tg3_interrupt;
8010 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8011 fn = tg3_interrupt_tagged;
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07008012 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
Michael Chanfcfa0a32006-03-20 22:28:41 -08008013 }
8014 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
8015}
8016
Michael Chan79381092005-04-21 17:13:59 -07008017static int tg3_test_interrupt(struct tg3 *tp)
8018{
8019 struct net_device *dev = tp->dev;
Michael Chanb16250e2006-09-27 16:10:14 -07008020 int err, i, intr_ok = 0;
Michael Chan79381092005-04-21 17:13:59 -07008021
Michael Chand4bc3922005-05-29 14:59:20 -07008022 if (!netif_running(dev))
8023 return -ENODEV;
8024
Michael Chan79381092005-04-21 17:13:59 -07008025 tg3_disable_ints(tp);
8026
8027 free_irq(tp->pdev->irq, dev);
8028
8029 err = request_irq(tp->pdev->irq, tg3_test_isr,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07008030 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
Michael Chan79381092005-04-21 17:13:59 -07008031 if (err)
8032 return err;
8033
Michael Chan38f38432005-09-05 17:53:32 -07008034 tp->hw_status->status &= ~SD_STATUS_UPDATED;
Michael Chan79381092005-04-21 17:13:59 -07008035 tg3_enable_ints(tp);
8036
8037 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8038 HOSTCC_MODE_NOW);
8039
8040 for (i = 0; i < 5; i++) {
Michael Chanb16250e2006-09-27 16:10:14 -07008041 u32 int_mbox, misc_host_ctrl;
8042
Michael Chan09ee9292005-08-09 20:17:00 -07008043 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
8044 TG3_64BIT_REG_LOW);
Michael Chanb16250e2006-09-27 16:10:14 -07008045 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8046
8047 if ((int_mbox != 0) ||
8048 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8049 intr_ok = 1;
Michael Chan79381092005-04-21 17:13:59 -07008050 break;
Michael Chanb16250e2006-09-27 16:10:14 -07008051 }
8052
Michael Chan79381092005-04-21 17:13:59 -07008053 msleep(10);
8054 }
8055
8056 tg3_disable_ints(tp);
8057
8058 free_irq(tp->pdev->irq, dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008059
Michael Chanfcfa0a32006-03-20 22:28:41 -08008060 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07008061
8062 if (err)
8063 return err;
8064
Michael Chanb16250e2006-09-27 16:10:14 -07008065 if (intr_ok)
Michael Chan79381092005-04-21 17:13:59 -07008066 return 0;
8067
8068 return -EIO;
8069}
8070
8071/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8072 * successfully restored
8073 */
8074static int tg3_test_msi(struct tg3 *tp)
8075{
8076 struct net_device *dev = tp->dev;
8077 int err;
8078 u16 pci_cmd;
8079
8080 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8081 return 0;
8082
8083 /* Turn off SERR reporting in case MSI terminates with Master
8084 * Abort.
8085 */
8086 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8087 pci_write_config_word(tp->pdev, PCI_COMMAND,
8088 pci_cmd & ~PCI_COMMAND_SERR);
8089
8090 err = tg3_test_interrupt(tp);
8091
8092 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8093
8094 if (!err)
8095 return 0;
8096
8097 /* other failures */
8098 if (err != -EIO)
8099 return err;
8100
8101 /* MSI test failed, go back to INTx mode */
8102 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
8103 "switching to INTx mode. Please report this failure to "
8104 "the PCI maintainer and include system chipset information.\n",
8105 tp->dev->name);
8106
8107 free_irq(tp->pdev->irq, dev);
8108 pci_disable_msi(tp->pdev);
8109
8110 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8111
Michael Chanfcfa0a32006-03-20 22:28:41 -08008112 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07008113 if (err)
8114 return err;
8115
8116 /* Need to reset the chip because the MSI cycle may have terminated
8117 * with Master Abort.
8118 */
David S. Millerf47c11e2005-06-24 20:18:35 -07008119 tg3_full_lock(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07008120
Michael Chan944d9802005-05-29 14:57:48 -07008121 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07008122 err = tg3_init_hw(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07008123
David S. Millerf47c11e2005-06-24 20:18:35 -07008124 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07008125
8126 if (err)
8127 free_irq(tp->pdev->irq, dev);
8128
8129 return err;
8130}
8131
Linus Torvalds1da177e2005-04-16 15:20:36 -07008132static int tg3_open(struct net_device *dev)
8133{
8134 struct tg3 *tp = netdev_priv(dev);
8135 int err;
8136
Michael Chanc49a1562006-12-17 17:07:29 -08008137 netif_carrier_off(tp->dev);
8138
Michael Chanbc1c7562006-03-20 17:48:03 -08008139 err = tg3_set_power_state(tp, PCI_D0);
Matt Carlson2f751b62008-08-04 23:17:34 -07008140 if (err)
Michael Chanbc1c7562006-03-20 17:48:03 -08008141 return err;
Matt Carlson2f751b62008-08-04 23:17:34 -07008142
8143 tg3_full_lock(tp, 0);
Michael Chanbc1c7562006-03-20 17:48:03 -08008144
Linus Torvalds1da177e2005-04-16 15:20:36 -07008145 tg3_disable_ints(tp);
8146 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8147
David S. Millerf47c11e2005-06-24 20:18:35 -07008148 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008149
8150 /* The placement of this call is tied
8151 * to the setup and use of Host TX descriptors.
8152 */
8153 err = tg3_alloc_consistent(tp);
8154 if (err)
8155 return err;
8156
Michael Chan7544b092007-05-05 13:08:32 -07008157 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
David S. Millerfac9b832005-05-18 22:46:34 -07008158 /* All MSI supporting chips should support tagged
8159 * status. Assert that this is the case.
8160 */
8161 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8162 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8163 "Not using MSI.\n", tp->dev->name);
8164 } else if (pci_enable_msi(tp->pdev) == 0) {
Michael Chan88b06bc2005-04-21 17:13:25 -07008165 u32 msi_mode;
8166
8167 msi_mode = tr32(MSGINT_MODE);
8168 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8169 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8170 }
8171 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08008172 err = tg3_request_irq(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008173
8174 if (err) {
Michael Chan88b06bc2005-04-21 17:13:25 -07008175 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8176 pci_disable_msi(tp->pdev);
8177 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8178 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008179 tg3_free_consistent(tp);
8180 return err;
8181 }
8182
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008183 napi_enable(&tp->napi);
8184
David S. Millerf47c11e2005-06-24 20:18:35 -07008185 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008186
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07008187 err = tg3_init_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008188 if (err) {
Michael Chan944d9802005-05-29 14:57:48 -07008189 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008190 tg3_free_rings(tp);
8191 } else {
David S. Millerfac9b832005-05-18 22:46:34 -07008192 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8193 tp->timer_offset = HZ;
8194 else
8195 tp->timer_offset = HZ / 10;
8196
8197 BUG_ON(tp->timer_offset > HZ);
8198 tp->timer_counter = tp->timer_multiplier =
8199 (HZ / tp->timer_offset);
8200 tp->asf_counter = tp->asf_multiplier =
Michael Chan28fbef72005-10-26 15:48:35 -07008201 ((HZ / tp->timer_offset) * 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008202
8203 init_timer(&tp->timer);
8204 tp->timer.expires = jiffies + tp->timer_offset;
8205 tp->timer.data = (unsigned long) tp;
8206 tp->timer.function = tg3_timer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008207 }
8208
David S. Millerf47c11e2005-06-24 20:18:35 -07008209 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008210
8211 if (err) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008212 napi_disable(&tp->napi);
Michael Chan88b06bc2005-04-21 17:13:25 -07008213 free_irq(tp->pdev->irq, dev);
8214 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8215 pci_disable_msi(tp->pdev);
8216 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8217 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008218 tg3_free_consistent(tp);
8219 return err;
8220 }
8221
Michael Chan79381092005-04-21 17:13:59 -07008222 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8223 err = tg3_test_msi(tp);
David S. Millerfac9b832005-05-18 22:46:34 -07008224
Michael Chan79381092005-04-21 17:13:59 -07008225 if (err) {
David S. Millerf47c11e2005-06-24 20:18:35 -07008226 tg3_full_lock(tp, 0);
Michael Chan79381092005-04-21 17:13:59 -07008227
8228 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8229 pci_disable_msi(tp->pdev);
8230 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8231 }
Michael Chan944d9802005-05-29 14:57:48 -07008232 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan79381092005-04-21 17:13:59 -07008233 tg3_free_rings(tp);
8234 tg3_free_consistent(tp);
8235
David S. Millerf47c11e2005-06-24 20:18:35 -07008236 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07008237
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008238 napi_disable(&tp->napi);
8239
Michael Chan79381092005-04-21 17:13:59 -07008240 return err;
8241 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08008242
8243 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8244 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
Michael Chanb5d37722006-09-27 16:06:21 -07008245 u32 val = tr32(PCIE_TRANSACTION_CFG);
Michael Chanfcfa0a32006-03-20 22:28:41 -08008246
Michael Chanb5d37722006-09-27 16:06:21 -07008247 tw32(PCIE_TRANSACTION_CFG,
8248 val | PCIE_TRANS_CFG_1SHOT_MSI);
Michael Chanfcfa0a32006-03-20 22:28:41 -08008249 }
8250 }
Michael Chan79381092005-04-21 17:13:59 -07008251 }
8252
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07008253 tg3_phy_start(tp);
8254
David S. Millerf47c11e2005-06-24 20:18:35 -07008255 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008256
Michael Chan79381092005-04-21 17:13:59 -07008257 add_timer(&tp->timer);
8258 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008259 tg3_enable_ints(tp);
8260
David S. Millerf47c11e2005-06-24 20:18:35 -07008261 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008262
8263 netif_start_queue(dev);
8264
8265 return 0;
8266}
8267
8268#if 0
8269/*static*/ void tg3_dump_state(struct tg3 *tp)
8270{
8271 u32 val32, val32_2, val32_3, val32_4, val32_5;
8272 u16 val16;
8273 int i;
8274
8275 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8276 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8277 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8278 val16, val32);
8279
8280 /* MAC block */
8281 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8282 tr32(MAC_MODE), tr32(MAC_STATUS));
8283 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8284 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8285 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8286 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8287 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8288 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8289
8290 /* Send data initiator control block */
8291 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8292 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8293 printk(" SNDDATAI_STATSCTRL[%08x]\n",
8294 tr32(SNDDATAI_STATSCTRL));
8295
8296 /* Send data completion control block */
8297 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8298
8299 /* Send BD ring selector block */
8300 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8301 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8302
8303 /* Send BD initiator control block */
8304 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8305 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8306
8307 /* Send BD completion control block */
8308 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8309
8310 /* Receive list placement control block */
8311 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8312 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8313 printk(" RCVLPC_STATSCTRL[%08x]\n",
8314 tr32(RCVLPC_STATSCTRL));
8315
8316 /* Receive data and receive BD initiator control block */
8317 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8318 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8319
8320 /* Receive data completion control block */
8321 printk("DEBUG: RCVDCC_MODE[%08x]\n",
8322 tr32(RCVDCC_MODE));
8323
8324 /* Receive BD initiator control block */
8325 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8326 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8327
8328 /* Receive BD completion control block */
8329 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8330 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8331
8332 /* Receive list selector control block */
8333 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8334 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8335
8336 /* Mbuf cluster free block */
8337 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8338 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8339
8340 /* Host coalescing control block */
8341 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8342 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8343 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8344 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8345 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8346 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8347 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8348 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8349 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8350 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8351 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8352 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8353
8354 /* Memory arbiter control block */
8355 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8356 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8357
8358 /* Buffer manager control block */
8359 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8360 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8361 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8362 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8363 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8364 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8365 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8366 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8367
8368 /* Read DMA control block */
8369 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8370 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8371
8372 /* Write DMA control block */
8373 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8374 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8375
8376 /* DMA completion block */
8377 printk("DEBUG: DMAC_MODE[%08x]\n",
8378 tr32(DMAC_MODE));
8379
8380 /* GRC block */
8381 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8382 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8383 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8384 tr32(GRC_LOCAL_CTRL));
8385
8386 /* TG3_BDINFOs */
8387 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8388 tr32(RCVDBDI_JUMBO_BD + 0x0),
8389 tr32(RCVDBDI_JUMBO_BD + 0x4),
8390 tr32(RCVDBDI_JUMBO_BD + 0x8),
8391 tr32(RCVDBDI_JUMBO_BD + 0xc));
8392 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8393 tr32(RCVDBDI_STD_BD + 0x0),
8394 tr32(RCVDBDI_STD_BD + 0x4),
8395 tr32(RCVDBDI_STD_BD + 0x8),
8396 tr32(RCVDBDI_STD_BD + 0xc));
8397 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8398 tr32(RCVDBDI_MINI_BD + 0x0),
8399 tr32(RCVDBDI_MINI_BD + 0x4),
8400 tr32(RCVDBDI_MINI_BD + 0x8),
8401 tr32(RCVDBDI_MINI_BD + 0xc));
8402
8403 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8404 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8405 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8406 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8407 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8408 val32, val32_2, val32_3, val32_4);
8409
8410 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8411 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8412 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8413 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8414 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8415 val32, val32_2, val32_3, val32_4);
8416
8417 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8418 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8419 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8420 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8421 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8422 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8423 val32, val32_2, val32_3, val32_4, val32_5);
8424
8425 /* SW status block */
8426 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8427 tp->hw_status->status,
8428 tp->hw_status->status_tag,
8429 tp->hw_status->rx_jumbo_consumer,
8430 tp->hw_status->rx_consumer,
8431 tp->hw_status->rx_mini_consumer,
8432 tp->hw_status->idx[0].rx_producer,
8433 tp->hw_status->idx[0].tx_consumer);
8434
8435 /* SW statistics block */
8436 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8437 ((u32 *)tp->hw_stats)[0],
8438 ((u32 *)tp->hw_stats)[1],
8439 ((u32 *)tp->hw_stats)[2],
8440 ((u32 *)tp->hw_stats)[3]);
8441
8442 /* Mailboxes */
8443 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
Michael Chan09ee9292005-08-09 20:17:00 -07008444 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8445 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8446 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8447 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008448
8449 /* NIC side send descriptors. */
8450 for (i = 0; i < 6; i++) {
8451 unsigned long txd;
8452
8453 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8454 + (i * sizeof(struct tg3_tx_buffer_desc));
8455 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8456 i,
8457 readl(txd + 0x0), readl(txd + 0x4),
8458 readl(txd + 0x8), readl(txd + 0xc));
8459 }
8460
8461 /* NIC side RX descriptors. */
8462 for (i = 0; i < 6; i++) {
8463 unsigned long rxd;
8464
8465 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8466 + (i * sizeof(struct tg3_rx_buffer_desc));
8467 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8468 i,
8469 readl(rxd + 0x0), readl(rxd + 0x4),
8470 readl(rxd + 0x8), readl(rxd + 0xc));
8471 rxd += (4 * sizeof(u32));
8472 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8473 i,
8474 readl(rxd + 0x0), readl(rxd + 0x4),
8475 readl(rxd + 0x8), readl(rxd + 0xc));
8476 }
8477
8478 for (i = 0; i < 6; i++) {
8479 unsigned long rxd;
8480
8481 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8482 + (i * sizeof(struct tg3_rx_buffer_desc));
8483 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8484 i,
8485 readl(rxd + 0x0), readl(rxd + 0x4),
8486 readl(rxd + 0x8), readl(rxd + 0xc));
8487 rxd += (4 * sizeof(u32));
8488 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8489 i,
8490 readl(rxd + 0x0), readl(rxd + 0x4),
8491 readl(rxd + 0x8), readl(rxd + 0xc));
8492 }
8493}
8494#endif
8495
8496static struct net_device_stats *tg3_get_stats(struct net_device *);
8497static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8498
8499static int tg3_close(struct net_device *dev)
8500{
8501 struct tg3 *tp = netdev_priv(dev);
8502
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008503 napi_disable(&tp->napi);
Oleg Nesterov28e53bd2007-05-09 02:34:22 -07008504 cancel_work_sync(&tp->reset_task);
Michael Chan7faa0062006-02-02 17:29:28 -08008505
Linus Torvalds1da177e2005-04-16 15:20:36 -07008506 netif_stop_queue(dev);
8507
8508 del_timer_sync(&tp->timer);
8509
David S. Millerf47c11e2005-06-24 20:18:35 -07008510 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008511#if 0
8512 tg3_dump_state(tp);
8513#endif
8514
8515 tg3_disable_ints(tp);
8516
Michael Chan944d9802005-05-29 14:57:48 -07008517 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008518 tg3_free_rings(tp);
Michael Chan5cf64b82007-05-05 12:11:21 -07008519 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008520
David S. Millerf47c11e2005-06-24 20:18:35 -07008521 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008522
Michael Chan88b06bc2005-04-21 17:13:25 -07008523 free_irq(tp->pdev->irq, dev);
8524 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8525 pci_disable_msi(tp->pdev);
8526 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8527 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008528
8529 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8530 sizeof(tp->net_stats_prev));
8531 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8532 sizeof(tp->estats_prev));
8533
8534 tg3_free_consistent(tp);
8535
Michael Chanbc1c7562006-03-20 17:48:03 -08008536 tg3_set_power_state(tp, PCI_D3hot);
8537
8538 netif_carrier_off(tp->dev);
8539
Linus Torvalds1da177e2005-04-16 15:20:36 -07008540 return 0;
8541}
8542
8543static inline unsigned long get_stat64(tg3_stat64_t *val)
8544{
8545 unsigned long ret;
8546
8547#if (BITS_PER_LONG == 32)
8548 ret = val->low;
8549#else
8550 ret = ((u64)val->high << 32) | ((u64)val->low);
8551#endif
8552 return ret;
8553}
8554
Stefan Buehler816f8b82008-08-15 14:10:54 -07008555static inline u64 get_estat64(tg3_stat64_t *val)
8556{
8557 return ((u64)val->high << 32) | ((u64)val->low);
8558}
8559
Linus Torvalds1da177e2005-04-16 15:20:36 -07008560static unsigned long calc_crc_errors(struct tg3 *tp)
8561{
8562 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8563
8564 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8565 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8566 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008567 u32 val;
8568
David S. Millerf47c11e2005-06-24 20:18:35 -07008569 spin_lock_bh(&tp->lock);
Michael Chan569a5df2007-02-13 12:18:15 -08008570 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8571 tg3_writephy(tp, MII_TG3_TEST1,
8572 val | MII_TG3_TEST1_CRC_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008573 tg3_readphy(tp, 0x14, &val);
8574 } else
8575 val = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07008576 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008577
8578 tp->phy_crc_errors += val;
8579
8580 return tp->phy_crc_errors;
8581 }
8582
8583 return get_stat64(&hw_stats->rx_fcs_errors);
8584}
8585
8586#define ESTAT_ADD(member) \
8587 estats->member = old_estats->member + \
Stefan Buehler816f8b82008-08-15 14:10:54 -07008588 get_estat64(&hw_stats->member)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008589
8590static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8591{
8592 struct tg3_ethtool_stats *estats = &tp->estats;
8593 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8594 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8595
8596 if (!hw_stats)
8597 return old_estats;
8598
8599 ESTAT_ADD(rx_octets);
8600 ESTAT_ADD(rx_fragments);
8601 ESTAT_ADD(rx_ucast_packets);
8602 ESTAT_ADD(rx_mcast_packets);
8603 ESTAT_ADD(rx_bcast_packets);
8604 ESTAT_ADD(rx_fcs_errors);
8605 ESTAT_ADD(rx_align_errors);
8606 ESTAT_ADD(rx_xon_pause_rcvd);
8607 ESTAT_ADD(rx_xoff_pause_rcvd);
8608 ESTAT_ADD(rx_mac_ctrl_rcvd);
8609 ESTAT_ADD(rx_xoff_entered);
8610 ESTAT_ADD(rx_frame_too_long_errors);
8611 ESTAT_ADD(rx_jabbers);
8612 ESTAT_ADD(rx_undersize_packets);
8613 ESTAT_ADD(rx_in_length_errors);
8614 ESTAT_ADD(rx_out_length_errors);
8615 ESTAT_ADD(rx_64_or_less_octet_packets);
8616 ESTAT_ADD(rx_65_to_127_octet_packets);
8617 ESTAT_ADD(rx_128_to_255_octet_packets);
8618 ESTAT_ADD(rx_256_to_511_octet_packets);
8619 ESTAT_ADD(rx_512_to_1023_octet_packets);
8620 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8621 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8622 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8623 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8624 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8625
8626 ESTAT_ADD(tx_octets);
8627 ESTAT_ADD(tx_collisions);
8628 ESTAT_ADD(tx_xon_sent);
8629 ESTAT_ADD(tx_xoff_sent);
8630 ESTAT_ADD(tx_flow_control);
8631 ESTAT_ADD(tx_mac_errors);
8632 ESTAT_ADD(tx_single_collisions);
8633 ESTAT_ADD(tx_mult_collisions);
8634 ESTAT_ADD(tx_deferred);
8635 ESTAT_ADD(tx_excessive_collisions);
8636 ESTAT_ADD(tx_late_collisions);
8637 ESTAT_ADD(tx_collide_2times);
8638 ESTAT_ADD(tx_collide_3times);
8639 ESTAT_ADD(tx_collide_4times);
8640 ESTAT_ADD(tx_collide_5times);
8641 ESTAT_ADD(tx_collide_6times);
8642 ESTAT_ADD(tx_collide_7times);
8643 ESTAT_ADD(tx_collide_8times);
8644 ESTAT_ADD(tx_collide_9times);
8645 ESTAT_ADD(tx_collide_10times);
8646 ESTAT_ADD(tx_collide_11times);
8647 ESTAT_ADD(tx_collide_12times);
8648 ESTAT_ADD(tx_collide_13times);
8649 ESTAT_ADD(tx_collide_14times);
8650 ESTAT_ADD(tx_collide_15times);
8651 ESTAT_ADD(tx_ucast_packets);
8652 ESTAT_ADD(tx_mcast_packets);
8653 ESTAT_ADD(tx_bcast_packets);
8654 ESTAT_ADD(tx_carrier_sense_errors);
8655 ESTAT_ADD(tx_discards);
8656 ESTAT_ADD(tx_errors);
8657
8658 ESTAT_ADD(dma_writeq_full);
8659 ESTAT_ADD(dma_write_prioq_full);
8660 ESTAT_ADD(rxbds_empty);
8661 ESTAT_ADD(rx_discards);
8662 ESTAT_ADD(rx_errors);
8663 ESTAT_ADD(rx_threshold_hit);
8664
8665 ESTAT_ADD(dma_readq_full);
8666 ESTAT_ADD(dma_read_prioq_full);
8667 ESTAT_ADD(tx_comp_queue_full);
8668
8669 ESTAT_ADD(ring_set_send_prod_index);
8670 ESTAT_ADD(ring_status_update);
8671 ESTAT_ADD(nic_irqs);
8672 ESTAT_ADD(nic_avoided_irqs);
8673 ESTAT_ADD(nic_tx_threshold_hit);
8674
8675 return estats;
8676}
8677
8678static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8679{
8680 struct tg3 *tp = netdev_priv(dev);
8681 struct net_device_stats *stats = &tp->net_stats;
8682 struct net_device_stats *old_stats = &tp->net_stats_prev;
8683 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8684
8685 if (!hw_stats)
8686 return old_stats;
8687
8688 stats->rx_packets = old_stats->rx_packets +
8689 get_stat64(&hw_stats->rx_ucast_packets) +
8690 get_stat64(&hw_stats->rx_mcast_packets) +
8691 get_stat64(&hw_stats->rx_bcast_packets);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008692
Linus Torvalds1da177e2005-04-16 15:20:36 -07008693 stats->tx_packets = old_stats->tx_packets +
8694 get_stat64(&hw_stats->tx_ucast_packets) +
8695 get_stat64(&hw_stats->tx_mcast_packets) +
8696 get_stat64(&hw_stats->tx_bcast_packets);
8697
8698 stats->rx_bytes = old_stats->rx_bytes +
8699 get_stat64(&hw_stats->rx_octets);
8700 stats->tx_bytes = old_stats->tx_bytes +
8701 get_stat64(&hw_stats->tx_octets);
8702
8703 stats->rx_errors = old_stats->rx_errors +
John W. Linville4f63b872005-09-12 14:43:18 -07008704 get_stat64(&hw_stats->rx_errors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008705 stats->tx_errors = old_stats->tx_errors +
8706 get_stat64(&hw_stats->tx_errors) +
8707 get_stat64(&hw_stats->tx_mac_errors) +
8708 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8709 get_stat64(&hw_stats->tx_discards);
8710
8711 stats->multicast = old_stats->multicast +
8712 get_stat64(&hw_stats->rx_mcast_packets);
8713 stats->collisions = old_stats->collisions +
8714 get_stat64(&hw_stats->tx_collisions);
8715
8716 stats->rx_length_errors = old_stats->rx_length_errors +
8717 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8718 get_stat64(&hw_stats->rx_undersize_packets);
8719
8720 stats->rx_over_errors = old_stats->rx_over_errors +
8721 get_stat64(&hw_stats->rxbds_empty);
8722 stats->rx_frame_errors = old_stats->rx_frame_errors +
8723 get_stat64(&hw_stats->rx_align_errors);
8724 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8725 get_stat64(&hw_stats->tx_discards);
8726 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8727 get_stat64(&hw_stats->tx_carrier_sense_errors);
8728
8729 stats->rx_crc_errors = old_stats->rx_crc_errors +
8730 calc_crc_errors(tp);
8731
John W. Linville4f63b872005-09-12 14:43:18 -07008732 stats->rx_missed_errors = old_stats->rx_missed_errors +
8733 get_stat64(&hw_stats->rx_discards);
8734
Linus Torvalds1da177e2005-04-16 15:20:36 -07008735 return stats;
8736}
8737
8738static inline u32 calc_crc(unsigned char *buf, int len)
8739{
8740 u32 reg;
8741 u32 tmp;
8742 int j, k;
8743
8744 reg = 0xffffffff;
8745
8746 for (j = 0; j < len; j++) {
8747 reg ^= buf[j];
8748
8749 for (k = 0; k < 8; k++) {
8750 tmp = reg & 0x01;
8751
8752 reg >>= 1;
8753
8754 if (tmp) {
8755 reg ^= 0xedb88320;
8756 }
8757 }
8758 }
8759
8760 return ~reg;
8761}
8762
8763static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8764{
8765 /* accept or reject all multicast frames */
8766 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8767 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8768 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8769 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8770}
8771
8772static void __tg3_set_rx_mode(struct net_device *dev)
8773{
8774 struct tg3 *tp = netdev_priv(dev);
8775 u32 rx_mode;
8776
8777 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8778 RX_MODE_KEEP_VLAN_TAG);
8779
8780 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8781 * flag clear.
8782 */
8783#if TG3_VLAN_TAG_USED
8784 if (!tp->vlgrp &&
8785 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8786 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8787#else
8788 /* By definition, VLAN is disabled always in this
8789 * case.
8790 */
8791 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8792 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8793#endif
8794
8795 if (dev->flags & IFF_PROMISC) {
8796 /* Promiscuous mode. */
8797 rx_mode |= RX_MODE_PROMISC;
8798 } else if (dev->flags & IFF_ALLMULTI) {
8799 /* Accept all multicast. */
8800 tg3_set_multi (tp, 1);
8801 } else if (dev->mc_count < 1) {
8802 /* Reject all multicast. */
8803 tg3_set_multi (tp, 0);
8804 } else {
8805 /* Accept one or more multicast(s). */
8806 struct dev_mc_list *mclist;
8807 unsigned int i;
8808 u32 mc_filter[4] = { 0, };
8809 u32 regidx;
8810 u32 bit;
8811 u32 crc;
8812
8813 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8814 i++, mclist = mclist->next) {
8815
8816 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8817 bit = ~crc & 0x7f;
8818 regidx = (bit & 0x60) >> 5;
8819 bit &= 0x1f;
8820 mc_filter[regidx] |= (1 << bit);
8821 }
8822
8823 tw32(MAC_HASH_REG_0, mc_filter[0]);
8824 tw32(MAC_HASH_REG_1, mc_filter[1]);
8825 tw32(MAC_HASH_REG_2, mc_filter[2]);
8826 tw32(MAC_HASH_REG_3, mc_filter[3]);
8827 }
8828
8829 if (rx_mode != tp->rx_mode) {
8830 tp->rx_mode = rx_mode;
8831 tw32_f(MAC_RX_MODE, rx_mode);
8832 udelay(10);
8833 }
8834}
8835
8836static void tg3_set_rx_mode(struct net_device *dev)
8837{
8838 struct tg3 *tp = netdev_priv(dev);
8839
Michael Chane75f7c92006-03-20 21:33:26 -08008840 if (!netif_running(dev))
8841 return;
8842
David S. Millerf47c11e2005-06-24 20:18:35 -07008843 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008844 __tg3_set_rx_mode(dev);
David S. Millerf47c11e2005-06-24 20:18:35 -07008845 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008846}
8847
8848#define TG3_REGDUMP_LEN (32 * 1024)
8849
8850static int tg3_get_regs_len(struct net_device *dev)
8851{
8852 return TG3_REGDUMP_LEN;
8853}
8854
8855static void tg3_get_regs(struct net_device *dev,
8856 struct ethtool_regs *regs, void *_p)
8857{
8858 u32 *p = _p;
8859 struct tg3 *tp = netdev_priv(dev);
8860 u8 *orig_p = _p;
8861 int i;
8862
8863 regs->version = 0;
8864
8865 memset(p, 0, TG3_REGDUMP_LEN);
8866
Michael Chanbc1c7562006-03-20 17:48:03 -08008867 if (tp->link_config.phy_is_low_power)
8868 return;
8869
David S. Millerf47c11e2005-06-24 20:18:35 -07008870 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008871
8872#define __GET_REG32(reg) (*(p)++ = tr32(reg))
8873#define GET_REG32_LOOP(base,len) \
8874do { p = (u32 *)(orig_p + (base)); \
8875 for (i = 0; i < len; i += 4) \
8876 __GET_REG32((base) + i); \
8877} while (0)
8878#define GET_REG32_1(reg) \
8879do { p = (u32 *)(orig_p + (reg)); \
8880 __GET_REG32((reg)); \
8881} while (0)
8882
8883 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8884 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8885 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8886 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8887 GET_REG32_1(SNDDATAC_MODE);
8888 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8889 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8890 GET_REG32_1(SNDBDC_MODE);
8891 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8892 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8893 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8894 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8895 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8896 GET_REG32_1(RCVDCC_MODE);
8897 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8898 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8899 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8900 GET_REG32_1(MBFREE_MODE);
8901 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8902 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8903 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8904 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8905 GET_REG32_LOOP(WDMAC_MODE, 0x08);
Chris Elmquist091465d2005-12-20 13:25:19 -08008906 GET_REG32_1(RX_CPU_MODE);
8907 GET_REG32_1(RX_CPU_STATE);
8908 GET_REG32_1(RX_CPU_PGMCTR);
8909 GET_REG32_1(RX_CPU_HWBKPT);
8910 GET_REG32_1(TX_CPU_MODE);
8911 GET_REG32_1(TX_CPU_STATE);
8912 GET_REG32_1(TX_CPU_PGMCTR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008913 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8914 GET_REG32_LOOP(FTQ_RESET, 0x120);
8915 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8916 GET_REG32_1(DMAC_MODE);
8917 GET_REG32_LOOP(GRC_MODE, 0x4c);
8918 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8919 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8920
8921#undef __GET_REG32
8922#undef GET_REG32_LOOP
8923#undef GET_REG32_1
8924
David S. Millerf47c11e2005-06-24 20:18:35 -07008925 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008926}
8927
8928static int tg3_get_eeprom_len(struct net_device *dev)
8929{
8930 struct tg3 *tp = netdev_priv(dev);
8931
8932 return tp->nvram_size;
8933}
8934
8935static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
Al Virob9fc7dc2007-12-17 22:59:57 -08008936static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
Michael Chan18201802006-03-20 22:29:15 -08008937static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008938
8939static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8940{
8941 struct tg3 *tp = netdev_priv(dev);
8942 int ret;
8943 u8 *pd;
Al Virob9fc7dc2007-12-17 22:59:57 -08008944 u32 i, offset, len, b_offset, b_count;
8945 __le32 val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008946
Michael Chanbc1c7562006-03-20 17:48:03 -08008947 if (tp->link_config.phy_is_low_power)
8948 return -EAGAIN;
8949
Linus Torvalds1da177e2005-04-16 15:20:36 -07008950 offset = eeprom->offset;
8951 len = eeprom->len;
8952 eeprom->len = 0;
8953
8954 eeprom->magic = TG3_EEPROM_MAGIC;
8955
8956 if (offset & 3) {
8957 /* adjustments to start on required 4 byte boundary */
8958 b_offset = offset & 3;
8959 b_count = 4 - b_offset;
8960 if (b_count > len) {
8961 /* i.e. offset=1 len=2 */
8962 b_count = len;
8963 }
Al Virob9fc7dc2007-12-17 22:59:57 -08008964 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008965 if (ret)
8966 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008967 memcpy(data, ((char*)&val) + b_offset, b_count);
8968 len -= b_count;
8969 offset += b_count;
8970 eeprom->len += b_count;
8971 }
8972
8973 /* read bytes upto the last 4 byte boundary */
8974 pd = &data[eeprom->len];
8975 for (i = 0; i < (len - (len & 3)); i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -08008976 ret = tg3_nvram_read_le(tp, offset + i, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008977 if (ret) {
8978 eeprom->len += i;
8979 return ret;
8980 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008981 memcpy(pd + i, &val, 4);
8982 }
8983 eeprom->len += i;
8984
8985 if (len & 3) {
8986 /* read last bytes not ending on 4 byte boundary */
8987 pd = &data[eeprom->len];
8988 b_count = len & 3;
8989 b_offset = offset + len - b_count;
Al Virob9fc7dc2007-12-17 22:59:57 -08008990 ret = tg3_nvram_read_le(tp, b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008991 if (ret)
8992 return ret;
Al Virob9fc7dc2007-12-17 22:59:57 -08008993 memcpy(pd, &val, b_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008994 eeprom->len += b_count;
8995 }
8996 return 0;
8997}
8998
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008999static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009000
9001static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9002{
9003 struct tg3 *tp = netdev_priv(dev);
9004 int ret;
Al Virob9fc7dc2007-12-17 22:59:57 -08009005 u32 offset, len, b_offset, odd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009006 u8 *buf;
Al Virob9fc7dc2007-12-17 22:59:57 -08009007 __le32 start, end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009008
Michael Chanbc1c7562006-03-20 17:48:03 -08009009 if (tp->link_config.phy_is_low_power)
9010 return -EAGAIN;
9011
Linus Torvalds1da177e2005-04-16 15:20:36 -07009012 if (eeprom->magic != TG3_EEPROM_MAGIC)
9013 return -EINVAL;
9014
9015 offset = eeprom->offset;
9016 len = eeprom->len;
9017
9018 if ((b_offset = (offset & 3))) {
9019 /* adjustments to start on required 4 byte boundary */
Al Virob9fc7dc2007-12-17 22:59:57 -08009020 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009021 if (ret)
9022 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009023 len += b_offset;
9024 offset &= ~3;
Michael Chan1c8594b2005-04-21 17:12:46 -07009025 if (len < 4)
9026 len = 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009027 }
9028
9029 odd_len = 0;
Michael Chan1c8594b2005-04-21 17:12:46 -07009030 if (len & 3) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009031 /* adjustments to end on required 4 byte boundary */
9032 odd_len = 1;
9033 len = (len + 3) & ~3;
Al Virob9fc7dc2007-12-17 22:59:57 -08009034 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009035 if (ret)
9036 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009037 }
9038
9039 buf = data;
9040 if (b_offset || odd_len) {
9041 buf = kmalloc(len, GFP_KERNEL);
Andy Gospodarekab0049b2007-09-06 20:42:14 +01009042 if (!buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009043 return -ENOMEM;
9044 if (b_offset)
9045 memcpy(buf, &start, 4);
9046 if (odd_len)
9047 memcpy(buf+len-4, &end, 4);
9048 memcpy(buf + b_offset, data, eeprom->len);
9049 }
9050
9051 ret = tg3_nvram_write_block(tp, offset, len, buf);
9052
9053 if (buf != data)
9054 kfree(buf);
9055
9056 return ret;
9057}
9058
9059static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9060{
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009061 struct tg3 *tp = netdev_priv(dev);
9062
9063 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9064 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9065 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009066 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009067 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009068
Linus Torvalds1da177e2005-04-16 15:20:36 -07009069 cmd->supported = (SUPPORTED_Autoneg);
9070
9071 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9072 cmd->supported |= (SUPPORTED_1000baseT_Half |
9073 SUPPORTED_1000baseT_Full);
9074
Karsten Keilef348142006-05-12 12:49:08 -07009075 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009076 cmd->supported |= (SUPPORTED_100baseT_Half |
9077 SUPPORTED_100baseT_Full |
9078 SUPPORTED_10baseT_Half |
9079 SUPPORTED_10baseT_Full |
Matt Carlson3bebab52007-11-12 21:22:40 -08009080 SUPPORTED_TP);
Karsten Keilef348142006-05-12 12:49:08 -07009081 cmd->port = PORT_TP;
9082 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009083 cmd->supported |= SUPPORTED_FIBRE;
Karsten Keilef348142006-05-12 12:49:08 -07009084 cmd->port = PORT_FIBRE;
9085 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009086
Linus Torvalds1da177e2005-04-16 15:20:36 -07009087 cmd->advertising = tp->link_config.advertising;
9088 if (netif_running(dev)) {
9089 cmd->speed = tp->link_config.active_speed;
9090 cmd->duplex = tp->link_config.active_duplex;
9091 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009092 cmd->phy_address = PHY_ADDR;
9093 cmd->transceiver = 0;
9094 cmd->autoneg = tp->link_config.autoneg;
9095 cmd->maxtxpkt = 0;
9096 cmd->maxrxpkt = 0;
9097 return 0;
9098}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009099
Linus Torvalds1da177e2005-04-16 15:20:36 -07009100static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9101{
9102 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009103
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009104 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9105 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9106 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009107 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009108 }
9109
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009110 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009111 /* These are the only valid advertisement bits allowed. */
9112 if (cmd->autoneg == AUTONEG_ENABLE &&
9113 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
9114 ADVERTISED_1000baseT_Full |
9115 ADVERTISED_Autoneg |
9116 ADVERTISED_FIBRE)))
9117 return -EINVAL;
Michael Chan37ff2382005-10-26 15:49:51 -07009118 /* Fiber can only do SPEED_1000. */
9119 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9120 (cmd->speed != SPEED_1000))
9121 return -EINVAL;
9122 /* Copper cannot force SPEED_1000. */
9123 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9124 (cmd->speed == SPEED_1000))
9125 return -EINVAL;
9126 else if ((cmd->speed == SPEED_1000) &&
Matt Carlson0ba11fb2008-06-09 15:40:26 -07009127 (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
Michael Chan37ff2382005-10-26 15:49:51 -07009128 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009129
David S. Millerf47c11e2005-06-24 20:18:35 -07009130 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009131
9132 tp->link_config.autoneg = cmd->autoneg;
9133 if (cmd->autoneg == AUTONEG_ENABLE) {
Andy Gospodarek405d8e52007-10-08 01:08:47 -07009134 tp->link_config.advertising = (cmd->advertising |
9135 ADVERTISED_Autoneg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009136 tp->link_config.speed = SPEED_INVALID;
9137 tp->link_config.duplex = DUPLEX_INVALID;
9138 } else {
9139 tp->link_config.advertising = 0;
9140 tp->link_config.speed = cmd->speed;
9141 tp->link_config.duplex = cmd->duplex;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009142 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009143
Michael Chan24fcad62006-12-17 17:06:46 -08009144 tp->link_config.orig_speed = tp->link_config.speed;
9145 tp->link_config.orig_duplex = tp->link_config.duplex;
9146 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9147
Linus Torvalds1da177e2005-04-16 15:20:36 -07009148 if (netif_running(dev))
9149 tg3_setup_phy(tp, 1);
9150
David S. Millerf47c11e2005-06-24 20:18:35 -07009151 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009152
Linus Torvalds1da177e2005-04-16 15:20:36 -07009153 return 0;
9154}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009155
Linus Torvalds1da177e2005-04-16 15:20:36 -07009156static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9157{
9158 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009159
Linus Torvalds1da177e2005-04-16 15:20:36 -07009160 strcpy(info->driver, DRV_MODULE_NAME);
9161 strcpy(info->version, DRV_MODULE_VERSION);
Michael Chanc4e65752006-03-20 22:29:32 -08009162 strcpy(info->fw_version, tp->fw_ver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009163 strcpy(info->bus_info, pci_name(tp->pdev));
9164}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009165
Linus Torvalds1da177e2005-04-16 15:20:36 -07009166static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9167{
9168 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009169
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009170 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9171 device_can_wakeup(&tp->pdev->dev))
Gary Zambranoa85feb82007-05-05 11:52:19 -07009172 wol->supported = WAKE_MAGIC;
9173 else
9174 wol->supported = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009175 wol->wolopts = 0;
Matt Carlson05ac4cb2008-11-03 16:53:46 -08009176 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9177 device_can_wakeup(&tp->pdev->dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009178 wol->wolopts = WAKE_MAGIC;
9179 memset(&wol->sopass, 0, sizeof(wol->sopass));
9180}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009181
Linus Torvalds1da177e2005-04-16 15:20:36 -07009182static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9183{
9184 struct tg3 *tp = netdev_priv(dev);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009185 struct device *dp = &tp->pdev->dev;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009186
Linus Torvalds1da177e2005-04-16 15:20:36 -07009187 if (wol->wolopts & ~WAKE_MAGIC)
9188 return -EINVAL;
9189 if ((wol->wolopts & WAKE_MAGIC) &&
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009190 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009191 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009192
David S. Millerf47c11e2005-06-24 20:18:35 -07009193 spin_lock_bh(&tp->lock);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009194 if (wol->wolopts & WAKE_MAGIC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009195 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009196 device_set_wakeup_enable(dp, true);
9197 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009198 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009199 device_set_wakeup_enable(dp, false);
9200 }
David S. Millerf47c11e2005-06-24 20:18:35 -07009201 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009202
Linus Torvalds1da177e2005-04-16 15:20:36 -07009203 return 0;
9204}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009205
Linus Torvalds1da177e2005-04-16 15:20:36 -07009206static u32 tg3_get_msglevel(struct net_device *dev)
9207{
9208 struct tg3 *tp = netdev_priv(dev);
9209 return tp->msg_enable;
9210}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009211
Linus Torvalds1da177e2005-04-16 15:20:36 -07009212static void tg3_set_msglevel(struct net_device *dev, u32 value)
9213{
9214 struct tg3 *tp = netdev_priv(dev);
9215 tp->msg_enable = value;
9216}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009217
Linus Torvalds1da177e2005-04-16 15:20:36 -07009218static int tg3_set_tso(struct net_device *dev, u32 value)
9219{
9220 struct tg3 *tp = netdev_priv(dev);
9221
9222 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9223 if (value)
9224 return -EINVAL;
9225 return 0;
9226 }
Michael Chanb5d37722006-09-27 16:06:21 -07009227 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
9228 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
Matt Carlson9936bcf2007-10-10 18:03:07 -07009229 if (value) {
Michael Chanb0026622006-07-03 19:42:14 -07009230 dev->features |= NETIF_F_TSO6;
Matt Carlson57e69832008-05-25 23:48:31 -07009231 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9232 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9233 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9234 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlson9936bcf2007-10-10 18:03:07 -07009235 dev->features |= NETIF_F_TSO_ECN;
9236 } else
9237 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
Michael Chanb0026622006-07-03 19:42:14 -07009238 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009239 return ethtool_op_set_tso(dev, value);
9240}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009241
Linus Torvalds1da177e2005-04-16 15:20:36 -07009242static int tg3_nway_reset(struct net_device *dev)
9243{
9244 struct tg3 *tp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009245 int r;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009246
Linus Torvalds1da177e2005-04-16 15:20:36 -07009247 if (!netif_running(dev))
9248 return -EAGAIN;
9249
Michael Chanc94e3942005-09-27 12:12:42 -07009250 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9251 return -EINVAL;
9252
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009253 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9254 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9255 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009256 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009257 } else {
9258 u32 bmcr;
9259
9260 spin_lock_bh(&tp->lock);
9261 r = -EINVAL;
9262 tg3_readphy(tp, MII_BMCR, &bmcr);
9263 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9264 ((bmcr & BMCR_ANENABLE) ||
9265 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9266 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9267 BMCR_ANENABLE);
9268 r = 0;
9269 }
9270 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009271 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009272
Linus Torvalds1da177e2005-04-16 15:20:36 -07009273 return r;
9274}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009275
Linus Torvalds1da177e2005-04-16 15:20:36 -07009276static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9277{
9278 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009279
Linus Torvalds1da177e2005-04-16 15:20:36 -07009280 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9281 ering->rx_mini_max_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08009282 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9283 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9284 else
9285 ering->rx_jumbo_max_pending = 0;
9286
9287 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009288
9289 ering->rx_pending = tp->rx_pending;
9290 ering->rx_mini_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08009291 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9292 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9293 else
9294 ering->rx_jumbo_pending = 0;
9295
Linus Torvalds1da177e2005-04-16 15:20:36 -07009296 ering->tx_pending = tp->tx_pending;
9297}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009298
Linus Torvalds1da177e2005-04-16 15:20:36 -07009299static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9300{
9301 struct tg3 *tp = netdev_priv(dev);
Michael Chanb9ec6c12006-07-25 16:37:27 -07009302 int irq_sync = 0, err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009303
Linus Torvalds1da177e2005-04-16 15:20:36 -07009304 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9305 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
Michael Chanbc3a9252006-10-18 20:55:18 -07009306 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9307 (ering->tx_pending <= MAX_SKB_FRAGS) ||
Michael Chan7f62ad52007-02-20 23:25:40 -08009308 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
Michael Chanbc3a9252006-10-18 20:55:18 -07009309 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009310 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009311
Michael Chanbbe832c2005-06-24 20:20:04 -07009312 if (netif_running(dev)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009313 tg3_phy_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009314 tg3_netif_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07009315 irq_sync = 1;
9316 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009317
Michael Chanbbe832c2005-06-24 20:20:04 -07009318 tg3_full_lock(tp, irq_sync);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009319
Linus Torvalds1da177e2005-04-16 15:20:36 -07009320 tp->rx_pending = ering->rx_pending;
9321
9322 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9323 tp->rx_pending > 63)
9324 tp->rx_pending = 63;
9325 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9326 tp->tx_pending = ering->tx_pending;
9327
9328 if (netif_running(dev)) {
Michael Chan944d9802005-05-29 14:57:48 -07009329 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chanb9ec6c12006-07-25 16:37:27 -07009330 err = tg3_restart_hw(tp, 1);
9331 if (!err)
9332 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009333 }
9334
David S. Millerf47c11e2005-06-24 20:18:35 -07009335 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009336
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009337 if (irq_sync && !err)
9338 tg3_phy_start(tp);
9339
Michael Chanb9ec6c12006-07-25 16:37:27 -07009340 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009341}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009342
Linus Torvalds1da177e2005-04-16 15:20:36 -07009343static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9344{
9345 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009346
Linus Torvalds1da177e2005-04-16 15:20:36 -07009347 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
Matt Carlson8d018622007-12-20 20:05:44 -08009348
9349 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
9350 epause->rx_pause = 1;
9351 else
9352 epause->rx_pause = 0;
9353
9354 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
9355 epause->tx_pause = 1;
9356 else
9357 epause->tx_pause = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009358}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009359
Linus Torvalds1da177e2005-04-16 15:20:36 -07009360static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9361{
9362 struct tg3 *tp = netdev_priv(dev);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009363 int err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009364
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009365 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9366 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9367 return -EAGAIN;
9368
9369 if (epause->autoneg) {
9370 u32 newadv;
9371 struct phy_device *phydev;
9372
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009373 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009374
9375 if (epause->rx_pause) {
9376 if (epause->tx_pause)
9377 newadv = ADVERTISED_Pause;
9378 else
9379 newadv = ADVERTISED_Pause |
9380 ADVERTISED_Asym_Pause;
9381 } else if (epause->tx_pause) {
9382 newadv = ADVERTISED_Asym_Pause;
9383 } else
9384 newadv = 0;
9385
9386 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9387 u32 oldadv = phydev->advertising &
9388 (ADVERTISED_Pause |
9389 ADVERTISED_Asym_Pause);
9390 if (oldadv != newadv) {
9391 phydev->advertising &=
9392 ~(ADVERTISED_Pause |
9393 ADVERTISED_Asym_Pause);
9394 phydev->advertising |= newadv;
9395 err = phy_start_aneg(phydev);
9396 }
9397 } else {
9398 tp->link_config.advertising &=
9399 ~(ADVERTISED_Pause |
9400 ADVERTISED_Asym_Pause);
9401 tp->link_config.advertising |= newadv;
9402 }
9403 } else {
9404 if (epause->rx_pause)
9405 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9406 else
9407 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9408
9409 if (epause->tx_pause)
9410 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9411 else
9412 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9413
9414 if (netif_running(dev))
9415 tg3_setup_flow_control(tp, 0, 0);
9416 }
9417 } else {
9418 int irq_sync = 0;
9419
9420 if (netif_running(dev)) {
9421 tg3_netif_stop(tp);
9422 irq_sync = 1;
9423 }
9424
9425 tg3_full_lock(tp, irq_sync);
9426
9427 if (epause->autoneg)
9428 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9429 else
9430 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9431 if (epause->rx_pause)
9432 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9433 else
9434 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9435 if (epause->tx_pause)
9436 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9437 else
9438 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9439
9440 if (netif_running(dev)) {
9441 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9442 err = tg3_restart_hw(tp, 1);
9443 if (!err)
9444 tg3_netif_start(tp);
9445 }
9446
9447 tg3_full_unlock(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07009448 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009449
Michael Chanb9ec6c12006-07-25 16:37:27 -07009450 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009451}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009452
Linus Torvalds1da177e2005-04-16 15:20:36 -07009453static u32 tg3_get_rx_csum(struct net_device *dev)
9454{
9455 struct tg3 *tp = netdev_priv(dev);
9456 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9457}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009458
Linus Torvalds1da177e2005-04-16 15:20:36 -07009459static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9460{
9461 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009462
Linus Torvalds1da177e2005-04-16 15:20:36 -07009463 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9464 if (data != 0)
9465 return -EINVAL;
9466 return 0;
9467 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009468
David S. Millerf47c11e2005-06-24 20:18:35 -07009469 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009470 if (data)
9471 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9472 else
9473 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
David S. Millerf47c11e2005-06-24 20:18:35 -07009474 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009475
Linus Torvalds1da177e2005-04-16 15:20:36 -07009476 return 0;
9477}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009478
Linus Torvalds1da177e2005-04-16 15:20:36 -07009479static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9480{
9481 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009482
Linus Torvalds1da177e2005-04-16 15:20:36 -07009483 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9484 if (data != 0)
9485 return -EINVAL;
9486 return 0;
9487 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009488
Michael Chanaf36e6b2006-03-23 01:28:06 -08009489 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07009490 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07009491 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -07009492 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9493 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan6460d942007-07-14 19:07:52 -07009494 ethtool_op_set_tx_ipv6_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009495 else
Michael Chan9c27dbd2006-03-20 22:28:27 -08009496 ethtool_op_set_tx_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009497
9498 return 0;
9499}
9500
Jeff Garzikb9f2c042007-10-03 18:07:32 -07009501static int tg3_get_sset_count (struct net_device *dev, int sset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009502{
Jeff Garzikb9f2c042007-10-03 18:07:32 -07009503 switch (sset) {
9504 case ETH_SS_TEST:
9505 return TG3_NUM_TEST;
9506 case ETH_SS_STATS:
9507 return TG3_NUM_STATS;
9508 default:
9509 return -EOPNOTSUPP;
9510 }
Michael Chan4cafd3f2005-05-29 14:56:34 -07009511}
9512
Linus Torvalds1da177e2005-04-16 15:20:36 -07009513static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9514{
9515 switch (stringset) {
9516 case ETH_SS_STATS:
9517 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9518 break;
Michael Chan4cafd3f2005-05-29 14:56:34 -07009519 case ETH_SS_TEST:
9520 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9521 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009522 default:
9523 WARN_ON(1); /* we need a WARN() */
9524 break;
9525 }
9526}
9527
Michael Chan4009a932005-09-05 17:52:54 -07009528static int tg3_phys_id(struct net_device *dev, u32 data)
9529{
9530 struct tg3 *tp = netdev_priv(dev);
9531 int i;
9532
9533 if (!netif_running(tp->dev))
9534 return -EAGAIN;
9535
9536 if (data == 0)
Stephen Hemminger759afc32008-02-23 19:51:59 -08009537 data = UINT_MAX / 2;
Michael Chan4009a932005-09-05 17:52:54 -07009538
9539 for (i = 0; i < (data * 2); i++) {
9540 if ((i % 2) == 0)
9541 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9542 LED_CTRL_1000MBPS_ON |
9543 LED_CTRL_100MBPS_ON |
9544 LED_CTRL_10MBPS_ON |
9545 LED_CTRL_TRAFFIC_OVERRIDE |
9546 LED_CTRL_TRAFFIC_BLINK |
9547 LED_CTRL_TRAFFIC_LED);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009548
Michael Chan4009a932005-09-05 17:52:54 -07009549 else
9550 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9551 LED_CTRL_TRAFFIC_OVERRIDE);
9552
9553 if (msleep_interruptible(500))
9554 break;
9555 }
9556 tw32(MAC_LED_CTRL, tp->led_ctrl);
9557 return 0;
9558}
9559
Linus Torvalds1da177e2005-04-16 15:20:36 -07009560static void tg3_get_ethtool_stats (struct net_device *dev,
9561 struct ethtool_stats *estats, u64 *tmp_stats)
9562{
9563 struct tg3 *tp = netdev_priv(dev);
9564 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9565}
9566
Michael Chan566f86a2005-05-29 14:56:58 -07009567#define NVRAM_TEST_SIZE 0x100
Matt Carlsona5767de2007-11-12 21:10:58 -08009568#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
9569#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
9570#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
Michael Chanb16250e2006-09-27 16:10:14 -07009571#define NVRAM_SELFBOOT_HW_SIZE 0x20
9572#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
Michael Chan566f86a2005-05-29 14:56:58 -07009573
9574static int tg3_test_nvram(struct tg3 *tp)
9575{
Al Virob9fc7dc2007-12-17 22:59:57 -08009576 u32 csum, magic;
9577 __le32 *buf;
Andy Gospodarekab0049b2007-09-06 20:42:14 +01009578 int i, j, k, err = 0, size;
Michael Chan566f86a2005-05-29 14:56:58 -07009579
Michael Chan18201802006-03-20 22:29:15 -08009580 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Michael Chan1b277772006-03-20 22:27:48 -08009581 return -EIO;
9582
Michael Chan1b277772006-03-20 22:27:48 -08009583 if (magic == TG3_EEPROM_MAGIC)
9584 size = NVRAM_TEST_SIZE;
Michael Chanb16250e2006-09-27 16:10:14 -07009585 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
Matt Carlsona5767de2007-11-12 21:10:58 -08009586 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9587 TG3_EEPROM_SB_FORMAT_1) {
9588 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9589 case TG3_EEPROM_SB_REVISION_0:
9590 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9591 break;
9592 case TG3_EEPROM_SB_REVISION_2:
9593 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9594 break;
9595 case TG3_EEPROM_SB_REVISION_3:
9596 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9597 break;
9598 default:
9599 return 0;
9600 }
9601 } else
Michael Chan1b277772006-03-20 22:27:48 -08009602 return 0;
Michael Chanb16250e2006-09-27 16:10:14 -07009603 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9604 size = NVRAM_SELFBOOT_HW_SIZE;
9605 else
Michael Chan1b277772006-03-20 22:27:48 -08009606 return -EIO;
9607
9608 buf = kmalloc(size, GFP_KERNEL);
Michael Chan566f86a2005-05-29 14:56:58 -07009609 if (buf == NULL)
9610 return -ENOMEM;
9611
Michael Chan1b277772006-03-20 22:27:48 -08009612 err = -EIO;
9613 for (i = 0, j = 0; i < size; i += 4, j++) {
Al Virob9fc7dc2007-12-17 22:59:57 -08009614 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
Michael Chan566f86a2005-05-29 14:56:58 -07009615 break;
Michael Chan566f86a2005-05-29 14:56:58 -07009616 }
Michael Chan1b277772006-03-20 22:27:48 -08009617 if (i < size)
Michael Chan566f86a2005-05-29 14:56:58 -07009618 goto out;
9619
Michael Chan1b277772006-03-20 22:27:48 -08009620 /* Selfboot format */
Al Virob9fc7dc2007-12-17 22:59:57 -08009621 magic = swab32(le32_to_cpu(buf[0]));
9622 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -07009623 TG3_EEPROM_MAGIC_FW) {
Michael Chan1b277772006-03-20 22:27:48 -08009624 u8 *buf8 = (u8 *) buf, csum8 = 0;
9625
Al Virob9fc7dc2007-12-17 22:59:57 -08009626 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
Matt Carlsona5767de2007-11-12 21:10:58 -08009627 TG3_EEPROM_SB_REVISION_2) {
9628 /* For rev 2, the csum doesn't include the MBA. */
9629 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9630 csum8 += buf8[i];
9631 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9632 csum8 += buf8[i];
9633 } else {
9634 for (i = 0; i < size; i++)
9635 csum8 += buf8[i];
9636 }
Michael Chan1b277772006-03-20 22:27:48 -08009637
Adrian Bunkad96b482006-04-05 22:21:04 -07009638 if (csum8 == 0) {
9639 err = 0;
9640 goto out;
9641 }
9642
9643 err = -EIO;
9644 goto out;
Michael Chan1b277772006-03-20 22:27:48 -08009645 }
Michael Chan566f86a2005-05-29 14:56:58 -07009646
Al Virob9fc7dc2007-12-17 22:59:57 -08009647 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -07009648 TG3_EEPROM_MAGIC_HW) {
9649 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9650 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9651 u8 *buf8 = (u8 *) buf;
Michael Chanb16250e2006-09-27 16:10:14 -07009652
9653 /* Separate the parity bits and the data bytes. */
9654 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9655 if ((i == 0) || (i == 8)) {
9656 int l;
9657 u8 msk;
9658
9659 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9660 parity[k++] = buf8[i] & msk;
9661 i++;
9662 }
9663 else if (i == 16) {
9664 int l;
9665 u8 msk;
9666
9667 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9668 parity[k++] = buf8[i] & msk;
9669 i++;
9670
9671 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9672 parity[k++] = buf8[i] & msk;
9673 i++;
9674 }
9675 data[j++] = buf8[i];
9676 }
9677
9678 err = -EIO;
9679 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9680 u8 hw8 = hweight8(data[i]);
9681
9682 if ((hw8 & 0x1) && parity[i])
9683 goto out;
9684 else if (!(hw8 & 0x1) && !parity[i])
9685 goto out;
9686 }
9687 err = 0;
9688 goto out;
9689 }
9690
Michael Chan566f86a2005-05-29 14:56:58 -07009691 /* Bootstrap checksum at offset 0x10 */
9692 csum = calc_crc((unsigned char *) buf, 0x10);
Al Virob9fc7dc2007-12-17 22:59:57 -08009693 if(csum != le32_to_cpu(buf[0x10/4]))
Michael Chan566f86a2005-05-29 14:56:58 -07009694 goto out;
9695
9696 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9697 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
Al Virob9fc7dc2007-12-17 22:59:57 -08009698 if (csum != le32_to_cpu(buf[0xfc/4]))
Michael Chan566f86a2005-05-29 14:56:58 -07009699 goto out;
9700
9701 err = 0;
9702
9703out:
9704 kfree(buf);
9705 return err;
9706}
9707
Michael Chanca430072005-05-29 14:57:23 -07009708#define TG3_SERDES_TIMEOUT_SEC 2
9709#define TG3_COPPER_TIMEOUT_SEC 6
9710
9711static int tg3_test_link(struct tg3 *tp)
9712{
9713 int i, max;
9714
9715 if (!netif_running(tp->dev))
9716 return -ENODEV;
9717
Michael Chan4c987482005-09-05 17:52:38 -07009718 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Michael Chanca430072005-05-29 14:57:23 -07009719 max = TG3_SERDES_TIMEOUT_SEC;
9720 else
9721 max = TG3_COPPER_TIMEOUT_SEC;
9722
9723 for (i = 0; i < max; i++) {
9724 if (netif_carrier_ok(tp->dev))
9725 return 0;
9726
9727 if (msleep_interruptible(1000))
9728 break;
9729 }
9730
9731 return -EIO;
9732}
9733
Michael Chana71116d2005-05-29 14:58:11 -07009734/* Only test the commonly used registers */
David S. Miller30ca3e32006-03-20 23:02:36 -08009735static int tg3_test_registers(struct tg3 *tp)
Michael Chana71116d2005-05-29 14:58:11 -07009736{
Michael Chanb16250e2006-09-27 16:10:14 -07009737 int i, is_5705, is_5750;
Michael Chana71116d2005-05-29 14:58:11 -07009738 u32 offset, read_mask, write_mask, val, save_val, read_val;
9739 static struct {
9740 u16 offset;
9741 u16 flags;
9742#define TG3_FL_5705 0x1
9743#define TG3_FL_NOT_5705 0x2
9744#define TG3_FL_NOT_5788 0x4
Michael Chanb16250e2006-09-27 16:10:14 -07009745#define TG3_FL_NOT_5750 0x8
Michael Chana71116d2005-05-29 14:58:11 -07009746 u32 read_mask;
9747 u32 write_mask;
9748 } reg_tbl[] = {
9749 /* MAC Control Registers */
9750 { MAC_MODE, TG3_FL_NOT_5705,
9751 0x00000000, 0x00ef6f8c },
9752 { MAC_MODE, TG3_FL_5705,
9753 0x00000000, 0x01ef6b8c },
9754 { MAC_STATUS, TG3_FL_NOT_5705,
9755 0x03800107, 0x00000000 },
9756 { MAC_STATUS, TG3_FL_5705,
9757 0x03800100, 0x00000000 },
9758 { MAC_ADDR_0_HIGH, 0x0000,
9759 0x00000000, 0x0000ffff },
9760 { MAC_ADDR_0_LOW, 0x0000,
9761 0x00000000, 0xffffffff },
9762 { MAC_RX_MTU_SIZE, 0x0000,
9763 0x00000000, 0x0000ffff },
9764 { MAC_TX_MODE, 0x0000,
9765 0x00000000, 0x00000070 },
9766 { MAC_TX_LENGTHS, 0x0000,
9767 0x00000000, 0x00003fff },
9768 { MAC_RX_MODE, TG3_FL_NOT_5705,
9769 0x00000000, 0x000007fc },
9770 { MAC_RX_MODE, TG3_FL_5705,
9771 0x00000000, 0x000007dc },
9772 { MAC_HASH_REG_0, 0x0000,
9773 0x00000000, 0xffffffff },
9774 { MAC_HASH_REG_1, 0x0000,
9775 0x00000000, 0xffffffff },
9776 { MAC_HASH_REG_2, 0x0000,
9777 0x00000000, 0xffffffff },
9778 { MAC_HASH_REG_3, 0x0000,
9779 0x00000000, 0xffffffff },
9780
9781 /* Receive Data and Receive BD Initiator Control Registers. */
9782 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9783 0x00000000, 0xffffffff },
9784 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9785 0x00000000, 0xffffffff },
9786 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9787 0x00000000, 0x00000003 },
9788 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9789 0x00000000, 0xffffffff },
9790 { RCVDBDI_STD_BD+0, 0x0000,
9791 0x00000000, 0xffffffff },
9792 { RCVDBDI_STD_BD+4, 0x0000,
9793 0x00000000, 0xffffffff },
9794 { RCVDBDI_STD_BD+8, 0x0000,
9795 0x00000000, 0xffff0002 },
9796 { RCVDBDI_STD_BD+0xc, 0x0000,
9797 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009798
Michael Chana71116d2005-05-29 14:58:11 -07009799 /* Receive BD Initiator Control Registers. */
9800 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9801 0x00000000, 0xffffffff },
9802 { RCVBDI_STD_THRESH, TG3_FL_5705,
9803 0x00000000, 0x000003ff },
9804 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9805 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009806
Michael Chana71116d2005-05-29 14:58:11 -07009807 /* Host Coalescing Control Registers. */
9808 { HOSTCC_MODE, TG3_FL_NOT_5705,
9809 0x00000000, 0x00000004 },
9810 { HOSTCC_MODE, TG3_FL_5705,
9811 0x00000000, 0x000000f6 },
9812 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9813 0x00000000, 0xffffffff },
9814 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9815 0x00000000, 0x000003ff },
9816 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9817 0x00000000, 0xffffffff },
9818 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9819 0x00000000, 0x000003ff },
9820 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9821 0x00000000, 0xffffffff },
9822 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9823 0x00000000, 0x000000ff },
9824 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9825 0x00000000, 0xffffffff },
9826 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9827 0x00000000, 0x000000ff },
9828 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9829 0x00000000, 0xffffffff },
9830 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9831 0x00000000, 0xffffffff },
9832 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9833 0x00000000, 0xffffffff },
9834 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9835 0x00000000, 0x000000ff },
9836 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9837 0x00000000, 0xffffffff },
9838 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9839 0x00000000, 0x000000ff },
9840 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9841 0x00000000, 0xffffffff },
9842 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9843 0x00000000, 0xffffffff },
9844 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9845 0x00000000, 0xffffffff },
9846 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9847 0x00000000, 0xffffffff },
9848 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9849 0x00000000, 0xffffffff },
9850 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9851 0xffffffff, 0x00000000 },
9852 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9853 0xffffffff, 0x00000000 },
9854
9855 /* Buffer Manager Control Registers. */
Michael Chanb16250e2006-09-27 16:10:14 -07009856 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -07009857 0x00000000, 0x007fff80 },
Michael Chanb16250e2006-09-27 16:10:14 -07009858 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -07009859 0x00000000, 0x007fffff },
9860 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9861 0x00000000, 0x0000003f },
9862 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9863 0x00000000, 0x000001ff },
9864 { BUFMGR_MB_HIGH_WATER, 0x0000,
9865 0x00000000, 0x000001ff },
9866 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9867 0xffffffff, 0x00000000 },
9868 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9869 0xffffffff, 0x00000000 },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009870
Michael Chana71116d2005-05-29 14:58:11 -07009871 /* Mailbox Registers */
9872 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9873 0x00000000, 0x000001ff },
9874 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9875 0x00000000, 0x000001ff },
9876 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9877 0x00000000, 0x000007ff },
9878 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9879 0x00000000, 0x000001ff },
9880
9881 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9882 };
9883
Michael Chanb16250e2006-09-27 16:10:14 -07009884 is_5705 = is_5750 = 0;
9885 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
Michael Chana71116d2005-05-29 14:58:11 -07009886 is_5705 = 1;
Michael Chanb16250e2006-09-27 16:10:14 -07009887 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9888 is_5750 = 1;
9889 }
Michael Chana71116d2005-05-29 14:58:11 -07009890
9891 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9892 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9893 continue;
9894
9895 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9896 continue;
9897
9898 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9899 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9900 continue;
9901
Michael Chanb16250e2006-09-27 16:10:14 -07009902 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9903 continue;
9904
Michael Chana71116d2005-05-29 14:58:11 -07009905 offset = (u32) reg_tbl[i].offset;
9906 read_mask = reg_tbl[i].read_mask;
9907 write_mask = reg_tbl[i].write_mask;
9908
9909 /* Save the original register content */
9910 save_val = tr32(offset);
9911
9912 /* Determine the read-only value. */
9913 read_val = save_val & read_mask;
9914
9915 /* Write zero to the register, then make sure the read-only bits
9916 * are not changed and the read/write bits are all zeros.
9917 */
9918 tw32(offset, 0);
9919
9920 val = tr32(offset);
9921
9922 /* Test the read-only and read/write bits. */
9923 if (((val & read_mask) != read_val) || (val & write_mask))
9924 goto out;
9925
9926 /* Write ones to all the bits defined by RdMask and WrMask, then
9927 * make sure the read-only bits are not changed and the
9928 * read/write bits are all ones.
9929 */
9930 tw32(offset, read_mask | write_mask);
9931
9932 val = tr32(offset);
9933
9934 /* Test the read-only bits. */
9935 if ((val & read_mask) != read_val)
9936 goto out;
9937
9938 /* Test the read/write bits. */
9939 if ((val & write_mask) != write_mask)
9940 goto out;
9941
9942 tw32(offset, save_val);
9943 }
9944
9945 return 0;
9946
9947out:
Michael Chan9f88f292006-12-07 00:22:54 -08009948 if (netif_msg_hw(tp))
9949 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9950 offset);
Michael Chana71116d2005-05-29 14:58:11 -07009951 tw32(offset, save_val);
9952 return -EIO;
9953}
9954
Michael Chan7942e1d2005-05-29 14:58:36 -07009955static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9956{
Arjan van de Venf71e1302006-03-03 21:33:57 -05009957 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
Michael Chan7942e1d2005-05-29 14:58:36 -07009958 int i;
9959 u32 j;
9960
Alejandro Martinez Ruize9edda62007-10-15 03:37:43 +02009961 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
Michael Chan7942e1d2005-05-29 14:58:36 -07009962 for (j = 0; j < len; j += 4) {
9963 u32 val;
9964
9965 tg3_write_mem(tp, offset + j, test_pattern[i]);
9966 tg3_read_mem(tp, offset + j, &val);
9967 if (val != test_pattern[i])
9968 return -EIO;
9969 }
9970 }
9971 return 0;
9972}
9973
9974static int tg3_test_memory(struct tg3 *tp)
9975{
9976 static struct mem_entry {
9977 u32 offset;
9978 u32 len;
9979 } mem_tbl_570x[] = {
Michael Chan38690192005-12-19 16:27:28 -08009980 { 0x00000000, 0x00b50},
Michael Chan7942e1d2005-05-29 14:58:36 -07009981 { 0x00002000, 0x1c000},
9982 { 0xffffffff, 0x00000}
9983 }, mem_tbl_5705[] = {
9984 { 0x00000100, 0x0000c},
9985 { 0x00000200, 0x00008},
Michael Chan7942e1d2005-05-29 14:58:36 -07009986 { 0x00004000, 0x00800},
9987 { 0x00006000, 0x01000},
9988 { 0x00008000, 0x02000},
9989 { 0x00010000, 0x0e000},
9990 { 0xffffffff, 0x00000}
Michael Chan79f4d132006-03-20 22:28:57 -08009991 }, mem_tbl_5755[] = {
9992 { 0x00000200, 0x00008},
9993 { 0x00004000, 0x00800},
9994 { 0x00006000, 0x00800},
9995 { 0x00008000, 0x02000},
9996 { 0x00010000, 0x0c000},
9997 { 0xffffffff, 0x00000}
Michael Chanb16250e2006-09-27 16:10:14 -07009998 }, mem_tbl_5906[] = {
9999 { 0x00000200, 0x00008},
10000 { 0x00004000, 0x00400},
10001 { 0x00006000, 0x00400},
10002 { 0x00008000, 0x01000},
10003 { 0x00010000, 0x01000},
10004 { 0xffffffff, 0x00000}
Michael Chan7942e1d2005-05-29 14:58:36 -070010005 };
10006 struct mem_entry *mem_tbl;
10007 int err = 0;
10008 int i;
10009
Michael Chan79f4d132006-03-20 22:28:57 -080010010 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
Michael Chanaf36e6b2006-03-23 01:28:06 -080010011 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070010012 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070010013 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010014 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10015 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan79f4d132006-03-20 22:28:57 -080010016 mem_tbl = mem_tbl_5755;
Michael Chanb16250e2006-09-27 16:10:14 -070010017 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10018 mem_tbl = mem_tbl_5906;
Michael Chan79f4d132006-03-20 22:28:57 -080010019 else
10020 mem_tbl = mem_tbl_5705;
10021 } else
Michael Chan7942e1d2005-05-29 14:58:36 -070010022 mem_tbl = mem_tbl_570x;
10023
10024 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10025 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
10026 mem_tbl[i].len)) != 0)
10027 break;
10028 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010029
Michael Chan7942e1d2005-05-29 14:58:36 -070010030 return err;
10031}
10032
Michael Chan9f40dea2005-09-05 17:53:06 -070010033#define TG3_MAC_LOOPBACK 0
10034#define TG3_PHY_LOOPBACK 1
10035
10036static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
Michael Chanc76949a2005-05-29 14:58:59 -070010037{
Michael Chan9f40dea2005-09-05 17:53:06 -070010038 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
Michael Chanc76949a2005-05-29 14:58:59 -070010039 u32 desc_idx;
10040 struct sk_buff *skb, *rx_skb;
10041 u8 *tx_data;
10042 dma_addr_t map;
10043 int num_pkts, tx_len, rx_len, i, err;
10044 struct tg3_rx_buffer_desc *desc;
10045
Michael Chan9f40dea2005-09-05 17:53:06 -070010046 if (loopback_mode == TG3_MAC_LOOPBACK) {
Michael Chanc94e3942005-09-27 12:12:42 -070010047 /* HW errata - mac loopback fails in some cases on 5780.
10048 * Normal traffic and PHY loopback are not affected by
10049 * errata.
10050 */
10051 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10052 return 0;
10053
Michael Chan9f40dea2005-09-05 17:53:06 -070010054 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
Matt Carlsone8f3f6c2007-07-11 19:47:55 -070010055 MAC_MODE_PORT_INT_LPBACK;
10056 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10057 mac_mode |= MAC_MODE_LINK_POLARITY;
Michael Chan3f7045c2006-09-27 16:02:29 -070010058 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10059 mac_mode |= MAC_MODE_PORT_MODE_MII;
10060 else
10061 mac_mode |= MAC_MODE_PORT_MODE_GMII;
Michael Chan9f40dea2005-09-05 17:53:06 -070010062 tw32(MAC_MODE, mac_mode);
10063 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
Michael Chan3f7045c2006-09-27 16:02:29 -070010064 u32 val;
10065
Michael Chanb16250e2006-09-27 16:10:14 -070010066 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10067 u32 phytest;
10068
10069 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
10070 u32 phy;
10071
10072 tg3_writephy(tp, MII_TG3_EPHY_TEST,
10073 phytest | MII_TG3_EPHY_SHADOW_EN);
10074 if (!tg3_readphy(tp, 0x1b, &phy))
10075 tg3_writephy(tp, 0x1b, phy & ~0x20);
Michael Chanb16250e2006-09-27 16:10:14 -070010076 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
10077 }
Michael Chan5d64ad32006-12-07 00:19:40 -080010078 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10079 } else
10080 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
Michael Chan3f7045c2006-09-27 16:02:29 -070010081
Matt Carlson9ef8ca92007-07-11 19:48:29 -070010082 tg3_phy_toggle_automdix(tp, 0);
10083
Michael Chan3f7045c2006-09-27 16:02:29 -070010084 tg3_writephy(tp, MII_BMCR, val);
Michael Chanc94e3942005-09-27 12:12:42 -070010085 udelay(40);
Michael Chan5d64ad32006-12-07 00:19:40 -080010086
Matt Carlsone8f3f6c2007-07-11 19:47:55 -070010087 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
Michael Chan5d64ad32006-12-07 00:19:40 -080010088 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chanb16250e2006-09-27 16:10:14 -070010089 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
Michael Chan5d64ad32006-12-07 00:19:40 -080010090 mac_mode |= MAC_MODE_PORT_MODE_MII;
10091 } else
10092 mac_mode |= MAC_MODE_PORT_MODE_GMII;
Michael Chanb16250e2006-09-27 16:10:14 -070010093
Michael Chanc94e3942005-09-27 12:12:42 -070010094 /* reset to prevent losing 1st rx packet intermittently */
10095 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10096 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10097 udelay(10);
10098 tw32_f(MAC_RX_MODE, tp->rx_mode);
10099 }
Matt Carlsone8f3f6c2007-07-11 19:47:55 -070010100 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10101 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10102 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10103 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10104 mac_mode |= MAC_MODE_LINK_POLARITY;
Michael Chanff18ff02006-03-27 23:17:27 -080010105 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10106 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10107 }
Michael Chan9f40dea2005-09-05 17:53:06 -070010108 tw32(MAC_MODE, mac_mode);
Michael Chan9f40dea2005-09-05 17:53:06 -070010109 }
10110 else
10111 return -EINVAL;
Michael Chanc76949a2005-05-29 14:58:59 -070010112
10113 err = -EIO;
10114
Michael Chanc76949a2005-05-29 14:58:59 -070010115 tx_len = 1514;
David S. Millera20e9c62006-07-31 22:38:16 -070010116 skb = netdev_alloc_skb(tp->dev, tx_len);
Jesper Juhla50bb7b2006-05-09 23:14:35 -070010117 if (!skb)
10118 return -ENOMEM;
10119
Michael Chanc76949a2005-05-29 14:58:59 -070010120 tx_data = skb_put(skb, tx_len);
10121 memcpy(tx_data, tp->dev->dev_addr, 6);
10122 memset(tx_data + 6, 0x0, 8);
10123
10124 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10125
10126 for (i = 14; i < tx_len; i++)
10127 tx_data[i] = (u8) (i & 0xff);
10128
10129 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10130
10131 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10132 HOSTCC_MODE_NOW);
10133
10134 udelay(10);
10135
10136 rx_start_idx = tp->hw_status->idx[0].rx_producer;
10137
Michael Chanc76949a2005-05-29 14:58:59 -070010138 num_pkts = 0;
10139
Michael Chan9f40dea2005-09-05 17:53:06 -070010140 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
Michael Chanc76949a2005-05-29 14:58:59 -070010141
Michael Chan9f40dea2005-09-05 17:53:06 -070010142 tp->tx_prod++;
Michael Chanc76949a2005-05-29 14:58:59 -070010143 num_pkts++;
10144
Michael Chan9f40dea2005-09-05 17:53:06 -070010145 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10146 tp->tx_prod);
Michael Chan09ee9292005-08-09 20:17:00 -070010147 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
Michael Chanc76949a2005-05-29 14:58:59 -070010148
10149 udelay(10);
10150
Michael Chan3f7045c2006-09-27 16:02:29 -070010151 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
10152 for (i = 0; i < 25; i++) {
Michael Chanc76949a2005-05-29 14:58:59 -070010153 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10154 HOSTCC_MODE_NOW);
10155
10156 udelay(10);
10157
10158 tx_idx = tp->hw_status->idx[0].tx_consumer;
10159 rx_idx = tp->hw_status->idx[0].rx_producer;
Michael Chan9f40dea2005-09-05 17:53:06 -070010160 if ((tx_idx == tp->tx_prod) &&
Michael Chanc76949a2005-05-29 14:58:59 -070010161 (rx_idx == (rx_start_idx + num_pkts)))
10162 break;
10163 }
10164
10165 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10166 dev_kfree_skb(skb);
10167
Michael Chan9f40dea2005-09-05 17:53:06 -070010168 if (tx_idx != tp->tx_prod)
Michael Chanc76949a2005-05-29 14:58:59 -070010169 goto out;
10170
10171 if (rx_idx != rx_start_idx + num_pkts)
10172 goto out;
10173
10174 desc = &tp->rx_rcb[rx_start_idx];
10175 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10176 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10177 if (opaque_key != RXD_OPAQUE_RING_STD)
10178 goto out;
10179
10180 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10181 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10182 goto out;
10183
10184 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10185 if (rx_len != tx_len)
10186 goto out;
10187
10188 rx_skb = tp->rx_std_buffers[desc_idx].skb;
10189
10190 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10191 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10192
10193 for (i = 14; i < tx_len; i++) {
10194 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10195 goto out;
10196 }
10197 err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010198
Michael Chanc76949a2005-05-29 14:58:59 -070010199 /* tg3_free_rings will unmap and free the rx_skb */
10200out:
10201 return err;
10202}
10203
Michael Chan9f40dea2005-09-05 17:53:06 -070010204#define TG3_MAC_LOOPBACK_FAILED 1
10205#define TG3_PHY_LOOPBACK_FAILED 2
10206#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10207 TG3_PHY_LOOPBACK_FAILED)
10208
10209static int tg3_test_loopback(struct tg3 *tp)
10210{
10211 int err = 0;
Matt Carlson9936bcf2007-10-10 18:03:07 -070010212 u32 cpmuctrl = 0;
Michael Chan9f40dea2005-09-05 17:53:06 -070010213
10214 if (!netif_running(tp->dev))
10215 return TG3_LOOPBACK_FAILED;
10216
Michael Chanb9ec6c12006-07-25 16:37:27 -070010217 err = tg3_reset_hw(tp, 1);
10218 if (err)
10219 return TG3_LOOPBACK_FAILED;
Michael Chan9f40dea2005-09-05 17:53:06 -070010220
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010221 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010222 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10223 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Matt Carlson9936bcf2007-10-10 18:03:07 -070010224 int i;
10225 u32 status;
10226
10227 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10228
10229 /* Wait for up to 40 microseconds to acquire lock. */
10230 for (i = 0; i < 4; i++) {
10231 status = tr32(TG3_CPMU_MUTEX_GNT);
10232 if (status == CPMU_MUTEX_GNT_DRIVER)
10233 break;
10234 udelay(10);
10235 }
10236
10237 if (status != CPMU_MUTEX_GNT_DRIVER)
10238 return TG3_LOOPBACK_FAILED;
10239
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010240 /* Turn off link-based power management. */
Matt Carlsone8750932007-11-12 21:11:51 -080010241 cpmuctrl = tr32(TG3_CPMU_CTRL);
Matt Carlson109115e2008-05-02 16:48:59 -070010242 tw32(TG3_CPMU_CTRL,
10243 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10244 CPMU_CTRL_LINK_AWARE_MODE));
Matt Carlson9936bcf2007-10-10 18:03:07 -070010245 }
10246
Michael Chan9f40dea2005-09-05 17:53:06 -070010247 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10248 err |= TG3_MAC_LOOPBACK_FAILED;
Matt Carlson9936bcf2007-10-10 18:03:07 -070010249
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010250 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010251 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10252 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Matt Carlson9936bcf2007-10-10 18:03:07 -070010253 tw32(TG3_CPMU_CTRL, cpmuctrl);
10254
10255 /* Release the mutex */
10256 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10257 }
10258
Matt Carlsondd477002008-05-25 23:45:58 -070010259 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10260 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
Michael Chan9f40dea2005-09-05 17:53:06 -070010261 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10262 err |= TG3_PHY_LOOPBACK_FAILED;
10263 }
10264
10265 return err;
10266}
10267
Michael Chan4cafd3f2005-05-29 14:56:34 -070010268static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10269 u64 *data)
10270{
Michael Chan566f86a2005-05-29 14:56:58 -070010271 struct tg3 *tp = netdev_priv(dev);
10272
Michael Chanbc1c7562006-03-20 17:48:03 -080010273 if (tp->link_config.phy_is_low_power)
10274 tg3_set_power_state(tp, PCI_D0);
10275
Michael Chan566f86a2005-05-29 14:56:58 -070010276 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10277
10278 if (tg3_test_nvram(tp) != 0) {
10279 etest->flags |= ETH_TEST_FL_FAILED;
10280 data[0] = 1;
10281 }
Michael Chanca430072005-05-29 14:57:23 -070010282 if (tg3_test_link(tp) != 0) {
10283 etest->flags |= ETH_TEST_FL_FAILED;
10284 data[1] = 1;
10285 }
Michael Chana71116d2005-05-29 14:58:11 -070010286 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010287 int err, err2 = 0, irq_sync = 0;
Michael Chana71116d2005-05-29 14:58:11 -070010288
Michael Chanbbe832c2005-06-24 20:20:04 -070010289 if (netif_running(dev)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010290 tg3_phy_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -070010291 tg3_netif_stop(tp);
10292 irq_sync = 1;
10293 }
10294
10295 tg3_full_lock(tp, irq_sync);
Michael Chana71116d2005-05-29 14:58:11 -070010296
10297 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
Michael Chanec41c7d2006-01-17 02:40:55 -080010298 err = tg3_nvram_lock(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010299 tg3_halt_cpu(tp, RX_CPU_BASE);
10300 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10301 tg3_halt_cpu(tp, TX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -080010302 if (!err)
10303 tg3_nvram_unlock(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010304
Michael Chand9ab5ad2006-03-20 22:27:35 -080010305 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10306 tg3_phy_reset(tp);
10307
Michael Chana71116d2005-05-29 14:58:11 -070010308 if (tg3_test_registers(tp) != 0) {
10309 etest->flags |= ETH_TEST_FL_FAILED;
10310 data[2] = 1;
10311 }
Michael Chan7942e1d2005-05-29 14:58:36 -070010312 if (tg3_test_memory(tp) != 0) {
10313 etest->flags |= ETH_TEST_FL_FAILED;
10314 data[3] = 1;
10315 }
Michael Chan9f40dea2005-09-05 17:53:06 -070010316 if ((data[4] = tg3_test_loopback(tp)) != 0)
Michael Chanc76949a2005-05-29 14:58:59 -070010317 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chana71116d2005-05-29 14:58:11 -070010318
David S. Millerf47c11e2005-06-24 20:18:35 -070010319 tg3_full_unlock(tp);
10320
Michael Chand4bc3922005-05-29 14:59:20 -070010321 if (tg3_test_interrupt(tp) != 0) {
10322 etest->flags |= ETH_TEST_FL_FAILED;
10323 data[5] = 1;
10324 }
David S. Millerf47c11e2005-06-24 20:18:35 -070010325
10326 tg3_full_lock(tp, 0);
Michael Chand4bc3922005-05-29 14:59:20 -070010327
Michael Chana71116d2005-05-29 14:58:11 -070010328 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10329 if (netif_running(dev)) {
10330 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010331 err2 = tg3_restart_hw(tp, 1);
10332 if (!err2)
Michael Chanb9ec6c12006-07-25 16:37:27 -070010333 tg3_netif_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010334 }
David S. Millerf47c11e2005-06-24 20:18:35 -070010335
10336 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010337
10338 if (irq_sync && !err2)
10339 tg3_phy_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010340 }
Michael Chanbc1c7562006-03-20 17:48:03 -080010341 if (tp->link_config.phy_is_low_power)
10342 tg3_set_power_state(tp, PCI_D3hot);
10343
Michael Chan4cafd3f2005-05-29 14:56:34 -070010344}
10345
Linus Torvalds1da177e2005-04-16 15:20:36 -070010346static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10347{
10348 struct mii_ioctl_data *data = if_mii(ifr);
10349 struct tg3 *tp = netdev_priv(dev);
10350 int err;
10351
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010352 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10353 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10354 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -070010355 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010356 }
10357
Linus Torvalds1da177e2005-04-16 15:20:36 -070010358 switch(cmd) {
10359 case SIOCGMIIPHY:
10360 data->phy_id = PHY_ADDR;
10361
10362 /* fallthru */
10363 case SIOCGMIIREG: {
10364 u32 mii_regval;
10365
10366 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10367 break; /* We have no PHY */
10368
Michael Chanbc1c7562006-03-20 17:48:03 -080010369 if (tp->link_config.phy_is_low_power)
10370 return -EAGAIN;
10371
David S. Millerf47c11e2005-06-24 20:18:35 -070010372 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010373 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
David S. Millerf47c11e2005-06-24 20:18:35 -070010374 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010375
10376 data->val_out = mii_regval;
10377
10378 return err;
10379 }
10380
10381 case SIOCSMIIREG:
10382 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10383 break; /* We have no PHY */
10384
10385 if (!capable(CAP_NET_ADMIN))
10386 return -EPERM;
10387
Michael Chanbc1c7562006-03-20 17:48:03 -080010388 if (tp->link_config.phy_is_low_power)
10389 return -EAGAIN;
10390
David S. Millerf47c11e2005-06-24 20:18:35 -070010391 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010392 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
David S. Millerf47c11e2005-06-24 20:18:35 -070010393 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010394
10395 return err;
10396
10397 default:
10398 /* do nothing */
10399 break;
10400 }
10401 return -EOPNOTSUPP;
10402}
10403
10404#if TG3_VLAN_TAG_USED
10405static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10406{
10407 struct tg3 *tp = netdev_priv(dev);
10408
Michael Chan29315e82006-06-29 20:12:30 -070010409 if (netif_running(dev))
10410 tg3_netif_stop(tp);
10411
David S. Millerf47c11e2005-06-24 20:18:35 -070010412 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010413
10414 tp->vlgrp = grp;
10415
10416 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10417 __tg3_set_rx_mode(dev);
10418
Michael Chan29315e82006-06-29 20:12:30 -070010419 if (netif_running(dev))
10420 tg3_netif_start(tp);
Michael Chan46966542007-07-11 19:47:19 -070010421
10422 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010423}
Linus Torvalds1da177e2005-04-16 15:20:36 -070010424#endif
10425
David S. Miller15f98502005-05-18 22:49:26 -070010426static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10427{
10428 struct tg3 *tp = netdev_priv(dev);
10429
10430 memcpy(ec, &tp->coal, sizeof(*ec));
10431 return 0;
10432}
10433
Michael Chand244c892005-07-05 14:42:33 -070010434static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10435{
10436 struct tg3 *tp = netdev_priv(dev);
10437 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10438 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10439
10440 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10441 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10442 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10443 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10444 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10445 }
10446
10447 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10448 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10449 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10450 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10451 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10452 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10453 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10454 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10455 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10456 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10457 return -EINVAL;
10458
10459 /* No rx interrupts will be generated if both are zero */
10460 if ((ec->rx_coalesce_usecs == 0) &&
10461 (ec->rx_max_coalesced_frames == 0))
10462 return -EINVAL;
10463
10464 /* No tx interrupts will be generated if both are zero */
10465 if ((ec->tx_coalesce_usecs == 0) &&
10466 (ec->tx_max_coalesced_frames == 0))
10467 return -EINVAL;
10468
10469 /* Only copy relevant parameters, ignore all others. */
10470 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10471 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10472 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10473 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10474 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10475 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10476 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10477 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10478 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10479
10480 if (netif_running(dev)) {
10481 tg3_full_lock(tp, 0);
10482 __tg3_set_coalesce(tp, &tp->coal);
10483 tg3_full_unlock(tp);
10484 }
10485 return 0;
10486}
10487
Jeff Garzik7282d492006-09-13 14:30:00 -040010488static const struct ethtool_ops tg3_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010489 .get_settings = tg3_get_settings,
10490 .set_settings = tg3_set_settings,
10491 .get_drvinfo = tg3_get_drvinfo,
10492 .get_regs_len = tg3_get_regs_len,
10493 .get_regs = tg3_get_regs,
10494 .get_wol = tg3_get_wol,
10495 .set_wol = tg3_set_wol,
10496 .get_msglevel = tg3_get_msglevel,
10497 .set_msglevel = tg3_set_msglevel,
10498 .nway_reset = tg3_nway_reset,
10499 .get_link = ethtool_op_get_link,
10500 .get_eeprom_len = tg3_get_eeprom_len,
10501 .get_eeprom = tg3_get_eeprom,
10502 .set_eeprom = tg3_set_eeprom,
10503 .get_ringparam = tg3_get_ringparam,
10504 .set_ringparam = tg3_set_ringparam,
10505 .get_pauseparam = tg3_get_pauseparam,
10506 .set_pauseparam = tg3_set_pauseparam,
10507 .get_rx_csum = tg3_get_rx_csum,
10508 .set_rx_csum = tg3_set_rx_csum,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010509 .set_tx_csum = tg3_set_tx_csum,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010510 .set_sg = ethtool_op_set_sg,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010511 .set_tso = tg3_set_tso,
Michael Chan4cafd3f2005-05-29 14:56:34 -070010512 .self_test = tg3_self_test,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010513 .get_strings = tg3_get_strings,
Michael Chan4009a932005-09-05 17:52:54 -070010514 .phys_id = tg3_phys_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010515 .get_ethtool_stats = tg3_get_ethtool_stats,
David S. Miller15f98502005-05-18 22:49:26 -070010516 .get_coalesce = tg3_get_coalesce,
Michael Chand244c892005-07-05 14:42:33 -070010517 .set_coalesce = tg3_set_coalesce,
Jeff Garzikb9f2c042007-10-03 18:07:32 -070010518 .get_sset_count = tg3_get_sset_count,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010519};
10520
10521static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10522{
Michael Chan1b277772006-03-20 22:27:48 -080010523 u32 cursize, val, magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010524
10525 tp->nvram_size = EEPROM_CHIP_SIZE;
10526
Michael Chan18201802006-03-20 22:29:15 -080010527 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010528 return;
10529
Michael Chanb16250e2006-09-27 16:10:14 -070010530 if ((magic != TG3_EEPROM_MAGIC) &&
10531 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10532 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
Linus Torvalds1da177e2005-04-16 15:20:36 -070010533 return;
10534
10535 /*
10536 * Size the chip by reading offsets at increasing powers of two.
10537 * When we encounter our validation signature, we know the addressing
10538 * has wrapped around, and thus have our chip size.
10539 */
Michael Chan1b277772006-03-20 22:27:48 -080010540 cursize = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010541
10542 while (cursize < tp->nvram_size) {
Michael Chan18201802006-03-20 22:29:15 -080010543 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010544 return;
10545
Michael Chan18201802006-03-20 22:29:15 -080010546 if (val == magic)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010547 break;
10548
10549 cursize <<= 1;
10550 }
10551
10552 tp->nvram_size = cursize;
10553}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010554
Linus Torvalds1da177e2005-04-16 15:20:36 -070010555static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10556{
10557 u32 val;
10558
Michael Chan18201802006-03-20 22:29:15 -080010559 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
Michael Chan1b277772006-03-20 22:27:48 -080010560 return;
10561
10562 /* Selfboot format */
Michael Chan18201802006-03-20 22:29:15 -080010563 if (val != TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -080010564 tg3_get_eeprom_size(tp);
10565 return;
10566 }
10567
Linus Torvalds1da177e2005-04-16 15:20:36 -070010568 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10569 if (val != 0) {
10570 tp->nvram_size = (val >> 16) * 1024;
10571 return;
10572 }
10573 }
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010574 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010575}
10576
10577static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10578{
10579 u32 nvcfg1;
10580
10581 nvcfg1 = tr32(NVRAM_CFG1);
10582 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10583 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10584 }
10585 else {
10586 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10587 tw32(NVRAM_CFG1, nvcfg1);
10588 }
10589
Michael Chan4c987482005-09-05 17:52:38 -070010590 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
Michael Chana4e2b342005-10-26 15:46:52 -070010591 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010592 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10593 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10594 tp->nvram_jedecnum = JEDEC_ATMEL;
10595 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10596 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10597 break;
10598 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10599 tp->nvram_jedecnum = JEDEC_ATMEL;
10600 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10601 break;
10602 case FLASH_VENDOR_ATMEL_EEPROM:
10603 tp->nvram_jedecnum = JEDEC_ATMEL;
10604 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10605 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10606 break;
10607 case FLASH_VENDOR_ST:
10608 tp->nvram_jedecnum = JEDEC_ST;
10609 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10610 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10611 break;
10612 case FLASH_VENDOR_SAIFUN:
10613 tp->nvram_jedecnum = JEDEC_SAIFUN;
10614 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10615 break;
10616 case FLASH_VENDOR_SST_SMALL:
10617 case FLASH_VENDOR_SST_LARGE:
10618 tp->nvram_jedecnum = JEDEC_SST;
10619 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10620 break;
10621 }
10622 }
10623 else {
10624 tp->nvram_jedecnum = JEDEC_ATMEL;
10625 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10626 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10627 }
10628}
10629
Michael Chan361b4ac2005-04-21 17:11:21 -070010630static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10631{
10632 u32 nvcfg1;
10633
10634 nvcfg1 = tr32(NVRAM_CFG1);
10635
Michael Chane6af3012005-04-21 17:12:05 -070010636 /* NVRAM protection for TPM */
10637 if (nvcfg1 & (1 << 27))
10638 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10639
Michael Chan361b4ac2005-04-21 17:11:21 -070010640 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10641 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10642 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10643 tp->nvram_jedecnum = JEDEC_ATMEL;
10644 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10645 break;
10646 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10647 tp->nvram_jedecnum = JEDEC_ATMEL;
10648 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10649 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10650 break;
10651 case FLASH_5752VENDOR_ST_M45PE10:
10652 case FLASH_5752VENDOR_ST_M45PE20:
10653 case FLASH_5752VENDOR_ST_M45PE40:
10654 tp->nvram_jedecnum = JEDEC_ST;
10655 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10656 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10657 break;
10658 }
10659
10660 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10661 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10662 case FLASH_5752PAGE_SIZE_256:
10663 tp->nvram_pagesize = 256;
10664 break;
10665 case FLASH_5752PAGE_SIZE_512:
10666 tp->nvram_pagesize = 512;
10667 break;
10668 case FLASH_5752PAGE_SIZE_1K:
10669 tp->nvram_pagesize = 1024;
10670 break;
10671 case FLASH_5752PAGE_SIZE_2K:
10672 tp->nvram_pagesize = 2048;
10673 break;
10674 case FLASH_5752PAGE_SIZE_4K:
10675 tp->nvram_pagesize = 4096;
10676 break;
10677 case FLASH_5752PAGE_SIZE_264:
10678 tp->nvram_pagesize = 264;
10679 break;
10680 }
10681 }
10682 else {
10683 /* For eeprom, set pagesize to maximum eeprom size */
10684 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10685
10686 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10687 tw32(NVRAM_CFG1, nvcfg1);
10688 }
10689}
10690
Michael Chand3c7b882006-03-23 01:28:25 -080010691static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10692{
Matt Carlson989a9d22007-05-05 11:51:05 -070010693 u32 nvcfg1, protect = 0;
Michael Chand3c7b882006-03-23 01:28:25 -080010694
10695 nvcfg1 = tr32(NVRAM_CFG1);
10696
10697 /* NVRAM protection for TPM */
Matt Carlson989a9d22007-05-05 11:51:05 -070010698 if (nvcfg1 & (1 << 27)) {
Michael Chand3c7b882006-03-23 01:28:25 -080010699 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
Matt Carlson989a9d22007-05-05 11:51:05 -070010700 protect = 1;
10701 }
Michael Chand3c7b882006-03-23 01:28:25 -080010702
Matt Carlson989a9d22007-05-05 11:51:05 -070010703 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10704 switch (nvcfg1) {
Michael Chand3c7b882006-03-23 01:28:25 -080010705 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10706 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10707 case FLASH_5755VENDOR_ATMEL_FLASH_3:
Matt Carlson70b65a22007-07-11 19:48:50 -070010708 case FLASH_5755VENDOR_ATMEL_FLASH_5:
Michael Chand3c7b882006-03-23 01:28:25 -080010709 tp->nvram_jedecnum = JEDEC_ATMEL;
10710 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10711 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10712 tp->nvram_pagesize = 264;
Matt Carlson70b65a22007-07-11 19:48:50 -070010713 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10714 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010715 tp->nvram_size = (protect ? 0x3e200 :
10716 TG3_NVRAM_SIZE_512KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010717 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010718 tp->nvram_size = (protect ? 0x1f200 :
10719 TG3_NVRAM_SIZE_256KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010720 else
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010721 tp->nvram_size = (protect ? 0x1f200 :
10722 TG3_NVRAM_SIZE_128KB);
Michael Chand3c7b882006-03-23 01:28:25 -080010723 break;
10724 case FLASH_5752VENDOR_ST_M45PE10:
10725 case FLASH_5752VENDOR_ST_M45PE20:
10726 case FLASH_5752VENDOR_ST_M45PE40:
10727 tp->nvram_jedecnum = JEDEC_ST;
10728 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10729 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10730 tp->nvram_pagesize = 256;
Matt Carlson989a9d22007-05-05 11:51:05 -070010731 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010732 tp->nvram_size = (protect ?
10733 TG3_NVRAM_SIZE_64KB :
10734 TG3_NVRAM_SIZE_128KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010735 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010736 tp->nvram_size = (protect ?
10737 TG3_NVRAM_SIZE_64KB :
10738 TG3_NVRAM_SIZE_256KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010739 else
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010740 tp->nvram_size = (protect ?
10741 TG3_NVRAM_SIZE_128KB :
10742 TG3_NVRAM_SIZE_512KB);
Michael Chand3c7b882006-03-23 01:28:25 -080010743 break;
10744 }
10745}
10746
Michael Chan1b277772006-03-20 22:27:48 -080010747static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10748{
10749 u32 nvcfg1;
10750
10751 nvcfg1 = tr32(NVRAM_CFG1);
10752
10753 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10754 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10755 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10756 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10757 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10758 tp->nvram_jedecnum = JEDEC_ATMEL;
10759 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10760 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10761
10762 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10763 tw32(NVRAM_CFG1, nvcfg1);
10764 break;
10765 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10766 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10767 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10768 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10769 tp->nvram_jedecnum = JEDEC_ATMEL;
10770 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10771 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10772 tp->nvram_pagesize = 264;
10773 break;
10774 case FLASH_5752VENDOR_ST_M45PE10:
10775 case FLASH_5752VENDOR_ST_M45PE20:
10776 case FLASH_5752VENDOR_ST_M45PE40:
10777 tp->nvram_jedecnum = JEDEC_ST;
10778 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10779 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10780 tp->nvram_pagesize = 256;
10781 break;
10782 }
10783}
10784
Matt Carlson6b91fa02007-10-10 18:01:09 -070010785static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10786{
10787 u32 nvcfg1, protect = 0;
10788
10789 nvcfg1 = tr32(NVRAM_CFG1);
10790
10791 /* NVRAM protection for TPM */
10792 if (nvcfg1 & (1 << 27)) {
10793 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10794 protect = 1;
10795 }
10796
10797 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10798 switch (nvcfg1) {
10799 case FLASH_5761VENDOR_ATMEL_ADB021D:
10800 case FLASH_5761VENDOR_ATMEL_ADB041D:
10801 case FLASH_5761VENDOR_ATMEL_ADB081D:
10802 case FLASH_5761VENDOR_ATMEL_ADB161D:
10803 case FLASH_5761VENDOR_ATMEL_MDB021D:
10804 case FLASH_5761VENDOR_ATMEL_MDB041D:
10805 case FLASH_5761VENDOR_ATMEL_MDB081D:
10806 case FLASH_5761VENDOR_ATMEL_MDB161D:
10807 tp->nvram_jedecnum = JEDEC_ATMEL;
10808 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10809 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10810 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10811 tp->nvram_pagesize = 256;
10812 break;
10813 case FLASH_5761VENDOR_ST_A_M45PE20:
10814 case FLASH_5761VENDOR_ST_A_M45PE40:
10815 case FLASH_5761VENDOR_ST_A_M45PE80:
10816 case FLASH_5761VENDOR_ST_A_M45PE16:
10817 case FLASH_5761VENDOR_ST_M_M45PE20:
10818 case FLASH_5761VENDOR_ST_M_M45PE40:
10819 case FLASH_5761VENDOR_ST_M_M45PE80:
10820 case FLASH_5761VENDOR_ST_M_M45PE16:
10821 tp->nvram_jedecnum = JEDEC_ST;
10822 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10823 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10824 tp->nvram_pagesize = 256;
10825 break;
10826 }
10827
10828 if (protect) {
10829 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10830 } else {
10831 switch (nvcfg1) {
10832 case FLASH_5761VENDOR_ATMEL_ADB161D:
10833 case FLASH_5761VENDOR_ATMEL_MDB161D:
10834 case FLASH_5761VENDOR_ST_A_M45PE16:
10835 case FLASH_5761VENDOR_ST_M_M45PE16:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010836 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010837 break;
10838 case FLASH_5761VENDOR_ATMEL_ADB081D:
10839 case FLASH_5761VENDOR_ATMEL_MDB081D:
10840 case FLASH_5761VENDOR_ST_A_M45PE80:
10841 case FLASH_5761VENDOR_ST_M_M45PE80:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010842 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010843 break;
10844 case FLASH_5761VENDOR_ATMEL_ADB041D:
10845 case FLASH_5761VENDOR_ATMEL_MDB041D:
10846 case FLASH_5761VENDOR_ST_A_M45PE40:
10847 case FLASH_5761VENDOR_ST_M_M45PE40:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010848 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010849 break;
10850 case FLASH_5761VENDOR_ATMEL_ADB021D:
10851 case FLASH_5761VENDOR_ATMEL_MDB021D:
10852 case FLASH_5761VENDOR_ST_A_M45PE20:
10853 case FLASH_5761VENDOR_ST_M_M45PE20:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010854 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010855 break;
10856 }
10857 }
10858}
10859
Michael Chanb5d37722006-09-27 16:06:21 -070010860static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10861{
10862 tp->nvram_jedecnum = JEDEC_ATMEL;
10863 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10864 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10865}
10866
Linus Torvalds1da177e2005-04-16 15:20:36 -070010867/* Chips other than 5700/5701 use the NVRAM for fetching info. */
10868static void __devinit tg3_nvram_init(struct tg3 *tp)
10869{
Linus Torvalds1da177e2005-04-16 15:20:36 -070010870 tw32_f(GRC_EEPROM_ADDR,
10871 (EEPROM_ADDR_FSM_RESET |
10872 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10873 EEPROM_ADDR_CLKPERD_SHIFT)));
10874
Michael Chan9d57f012006-12-07 00:23:25 -080010875 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010876
10877 /* Enable seeprom accesses. */
10878 tw32_f(GRC_LOCAL_CTRL,
10879 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10880 udelay(100);
10881
10882 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10883 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10884 tp->tg3_flags |= TG3_FLAG_NVRAM;
10885
Michael Chanec41c7d2006-01-17 02:40:55 -080010886 if (tg3_nvram_lock(tp)) {
10887 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10888 "tg3_nvram_init failed.\n", tp->dev->name);
10889 return;
10890 }
Michael Chane6af3012005-04-21 17:12:05 -070010891 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010892
Matt Carlson989a9d22007-05-05 11:51:05 -070010893 tp->nvram_size = 0;
10894
Michael Chan361b4ac2005-04-21 17:11:21 -070010895 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10896 tg3_get_5752_nvram_info(tp);
Michael Chand3c7b882006-03-23 01:28:25 -080010897 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10898 tg3_get_5755_nvram_info(tp);
Matt Carlsond30cdd22007-10-07 23:28:35 -070010899 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010900 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10901 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan1b277772006-03-20 22:27:48 -080010902 tg3_get_5787_nvram_info(tp);
Matt Carlson6b91fa02007-10-10 18:01:09 -070010903 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10904 tg3_get_5761_nvram_info(tp);
Michael Chanb5d37722006-09-27 16:06:21 -070010905 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10906 tg3_get_5906_nvram_info(tp);
Michael Chan361b4ac2005-04-21 17:11:21 -070010907 else
10908 tg3_get_nvram_info(tp);
10909
Matt Carlson989a9d22007-05-05 11:51:05 -070010910 if (tp->nvram_size == 0)
10911 tg3_get_nvram_size(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010912
Michael Chane6af3012005-04-21 17:12:05 -070010913 tg3_disable_nvram_access(tp);
Michael Chan381291b2005-12-13 21:08:21 -080010914 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010915
10916 } else {
10917 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10918
10919 tg3_get_eeprom_size(tp);
10920 }
10921}
10922
10923static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10924 u32 offset, u32 *val)
10925{
10926 u32 tmp;
10927 int i;
10928
10929 if (offset > EEPROM_ADDR_ADDR_MASK ||
10930 (offset % 4) != 0)
10931 return -EINVAL;
10932
10933 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10934 EEPROM_ADDR_DEVID_MASK |
10935 EEPROM_ADDR_READ);
10936 tw32(GRC_EEPROM_ADDR,
10937 tmp |
10938 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10939 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10940 EEPROM_ADDR_ADDR_MASK) |
10941 EEPROM_ADDR_READ | EEPROM_ADDR_START);
10942
Michael Chan9d57f012006-12-07 00:23:25 -080010943 for (i = 0; i < 1000; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010944 tmp = tr32(GRC_EEPROM_ADDR);
10945
10946 if (tmp & EEPROM_ADDR_COMPLETE)
10947 break;
Michael Chan9d57f012006-12-07 00:23:25 -080010948 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010949 }
10950 if (!(tmp & EEPROM_ADDR_COMPLETE))
10951 return -EBUSY;
10952
10953 *val = tr32(GRC_EEPROM_DATA);
10954 return 0;
10955}
10956
10957#define NVRAM_CMD_TIMEOUT 10000
10958
10959static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10960{
10961 int i;
10962
10963 tw32(NVRAM_CMD, nvram_cmd);
10964 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10965 udelay(10);
10966 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10967 udelay(10);
10968 break;
10969 }
10970 }
10971 if (i == NVRAM_CMD_TIMEOUT) {
10972 return -EBUSY;
10973 }
10974 return 0;
10975}
10976
Michael Chan18201802006-03-20 22:29:15 -080010977static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10978{
10979 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10980 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10981 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
Matt Carlson6b91fa02007-10-10 18:01:09 -070010982 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
Michael Chan18201802006-03-20 22:29:15 -080010983 (tp->nvram_jedecnum == JEDEC_ATMEL))
10984
10985 addr = ((addr / tp->nvram_pagesize) <<
10986 ATMEL_AT45DB0X1B_PAGE_POS) +
10987 (addr % tp->nvram_pagesize);
10988
10989 return addr;
10990}
10991
Michael Chanc4e65752006-03-20 22:29:32 -080010992static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10993{
10994 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10995 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10996 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
Matt Carlson6b91fa02007-10-10 18:01:09 -070010997 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
Michael Chanc4e65752006-03-20 22:29:32 -080010998 (tp->nvram_jedecnum == JEDEC_ATMEL))
10999
11000 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
11001 tp->nvram_pagesize) +
11002 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
11003
11004 return addr;
11005}
11006
Linus Torvalds1da177e2005-04-16 15:20:36 -070011007static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
11008{
11009 int ret;
11010
Linus Torvalds1da177e2005-04-16 15:20:36 -070011011 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
11012 return tg3_nvram_read_using_eeprom(tp, offset, val);
11013
Michael Chan18201802006-03-20 22:29:15 -080011014 offset = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011015
11016 if (offset > NVRAM_ADDR_MSK)
11017 return -EINVAL;
11018
Michael Chanec41c7d2006-01-17 02:40:55 -080011019 ret = tg3_nvram_lock(tp);
11020 if (ret)
11021 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011022
Michael Chane6af3012005-04-21 17:12:05 -070011023 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011024
11025 tw32(NVRAM_ADDR, offset);
11026 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
11027 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
11028
11029 if (ret == 0)
11030 *val = swab32(tr32(NVRAM_RDDATA));
11031
Michael Chane6af3012005-04-21 17:12:05 -070011032 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011033
Michael Chan381291b2005-12-13 21:08:21 -080011034 tg3_nvram_unlock(tp);
11035
Linus Torvalds1da177e2005-04-16 15:20:36 -070011036 return ret;
11037}
11038
Al Virob9fc7dc2007-12-17 22:59:57 -080011039static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
11040{
11041 u32 v;
11042 int res = tg3_nvram_read(tp, offset, &v);
11043 if (!res)
11044 *val = cpu_to_le32(v);
11045 return res;
11046}
11047
Michael Chan18201802006-03-20 22:29:15 -080011048static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
11049{
11050 int err;
11051 u32 tmp;
11052
11053 err = tg3_nvram_read(tp, offset, &tmp);
11054 *val = swab32(tmp);
11055 return err;
11056}
11057
Linus Torvalds1da177e2005-04-16 15:20:36 -070011058static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11059 u32 offset, u32 len, u8 *buf)
11060{
11061 int i, j, rc = 0;
11062 u32 val;
11063
11064 for (i = 0; i < len; i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011065 u32 addr;
11066 __le32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011067
11068 addr = offset + i;
11069
11070 memcpy(&data, buf + i, 4);
11071
Al Virob9fc7dc2007-12-17 22:59:57 -080011072 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011073
11074 val = tr32(GRC_EEPROM_ADDR);
11075 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11076
11077 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11078 EEPROM_ADDR_READ);
11079 tw32(GRC_EEPROM_ADDR, val |
11080 (0 << EEPROM_ADDR_DEVID_SHIFT) |
11081 (addr & EEPROM_ADDR_ADDR_MASK) |
11082 EEPROM_ADDR_START |
11083 EEPROM_ADDR_WRITE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011084
Michael Chan9d57f012006-12-07 00:23:25 -080011085 for (j = 0; j < 1000; j++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011086 val = tr32(GRC_EEPROM_ADDR);
11087
11088 if (val & EEPROM_ADDR_COMPLETE)
11089 break;
Michael Chan9d57f012006-12-07 00:23:25 -080011090 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011091 }
11092 if (!(val & EEPROM_ADDR_COMPLETE)) {
11093 rc = -EBUSY;
11094 break;
11095 }
11096 }
11097
11098 return rc;
11099}
11100
11101/* offset and length are dword aligned */
11102static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11103 u8 *buf)
11104{
11105 int ret = 0;
11106 u32 pagesize = tp->nvram_pagesize;
11107 u32 pagemask = pagesize - 1;
11108 u32 nvram_cmd;
11109 u8 *tmp;
11110
11111 tmp = kmalloc(pagesize, GFP_KERNEL);
11112 if (tmp == NULL)
11113 return -ENOMEM;
11114
11115 while (len) {
11116 int j;
Michael Chane6af3012005-04-21 17:12:05 -070011117 u32 phy_addr, page_off, size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011118
11119 phy_addr = offset & ~pagemask;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011120
Linus Torvalds1da177e2005-04-16 15:20:36 -070011121 for (j = 0; j < pagesize; j += 4) {
Al Viro286e3102007-12-17 23:00:31 -080011122 if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
Al Virob9fc7dc2007-12-17 22:59:57 -080011123 (__le32 *) (tmp + j))))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011124 break;
11125 }
11126 if (ret)
11127 break;
11128
11129 page_off = offset & pagemask;
11130 size = pagesize;
11131 if (len < size)
11132 size = len;
11133
11134 len -= size;
11135
11136 memcpy(tmp + page_off, buf, size);
11137
11138 offset = offset + (pagesize - page_off);
11139
Michael Chane6af3012005-04-21 17:12:05 -070011140 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011141
11142 /*
11143 * Before we can erase the flash page, we need
11144 * to issue a special "write enable" command.
11145 */
11146 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11147
11148 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11149 break;
11150
11151 /* Erase the target page */
11152 tw32(NVRAM_ADDR, phy_addr);
11153
11154 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11155 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11156
11157 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11158 break;
11159
11160 /* Issue another write enable to start the write. */
11161 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11162
11163 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11164 break;
11165
11166 for (j = 0; j < pagesize; j += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011167 __be32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011168
Al Virob9fc7dc2007-12-17 22:59:57 -080011169 data = *((__be32 *) (tmp + j));
11170 /* swab32(le32_to_cpu(data)), actually */
11171 tw32(NVRAM_WRDATA, be32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011172
11173 tw32(NVRAM_ADDR, phy_addr + j);
11174
11175 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11176 NVRAM_CMD_WR;
11177
11178 if (j == 0)
11179 nvram_cmd |= NVRAM_CMD_FIRST;
11180 else if (j == (pagesize - 4))
11181 nvram_cmd |= NVRAM_CMD_LAST;
11182
11183 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11184 break;
11185 }
11186 if (ret)
11187 break;
11188 }
11189
11190 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11191 tg3_nvram_exec_cmd(tp, nvram_cmd);
11192
11193 kfree(tmp);
11194
11195 return ret;
11196}
11197
11198/* offset and length are dword aligned */
11199static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11200 u8 *buf)
11201{
11202 int i, ret = 0;
11203
11204 for (i = 0; i < len; i += 4, offset += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011205 u32 page_off, phy_addr, nvram_cmd;
11206 __be32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011207
11208 memcpy(&data, buf + i, 4);
Al Virob9fc7dc2007-12-17 22:59:57 -080011209 tw32(NVRAM_WRDATA, be32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011210
11211 page_off = offset % tp->nvram_pagesize;
11212
Michael Chan18201802006-03-20 22:29:15 -080011213 phy_addr = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011214
11215 tw32(NVRAM_ADDR, phy_addr);
11216
11217 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11218
11219 if ((page_off == 0) || (i == 0))
11220 nvram_cmd |= NVRAM_CMD_FIRST;
Michael Chanf6d9a252006-04-29 19:00:24 -070011221 if (page_off == (tp->nvram_pagesize - 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011222 nvram_cmd |= NVRAM_CMD_LAST;
11223
11224 if (i == (len - 4))
11225 nvram_cmd |= NVRAM_CMD_LAST;
11226
Michael Chan4c987482005-09-05 17:52:38 -070011227 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
Michael Chanaf36e6b2006-03-23 01:28:06 -080011228 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
Michael Chan1b277772006-03-20 22:27:48 -080011229 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
Matt Carlsond30cdd22007-10-07 23:28:35 -070011230 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
Matt Carlson9936bcf2007-10-10 18:03:07 -070011231 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
Matt Carlson57e69832008-05-25 23:48:31 -070011232 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
Michael Chan4c987482005-09-05 17:52:38 -070011233 (tp->nvram_jedecnum == JEDEC_ST) &&
11234 (nvram_cmd & NVRAM_CMD_FIRST)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011235
11236 if ((ret = tg3_nvram_exec_cmd(tp,
11237 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11238 NVRAM_CMD_DONE)))
11239
11240 break;
11241 }
11242 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11243 /* We always do complete word writes to eeprom. */
11244 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11245 }
11246
11247 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11248 break;
11249 }
11250 return ret;
11251}
11252
11253/* offset and length are dword aligned */
11254static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11255{
11256 int ret;
11257
Linus Torvalds1da177e2005-04-16 15:20:36 -070011258 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -070011259 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11260 ~GRC_LCLCTRL_GPIO_OUTPUT1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011261 udelay(40);
11262 }
11263
11264 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11265 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11266 }
11267 else {
11268 u32 grc_mode;
11269
Michael Chanec41c7d2006-01-17 02:40:55 -080011270 ret = tg3_nvram_lock(tp);
11271 if (ret)
11272 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011273
Michael Chane6af3012005-04-21 17:12:05 -070011274 tg3_enable_nvram_access(tp);
11275 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11276 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011277 tw32(NVRAM_WRITE1, 0x406);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011278
11279 grc_mode = tr32(GRC_MODE);
11280 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11281
11282 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11283 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11284
11285 ret = tg3_nvram_write_block_buffered(tp, offset, len,
11286 buf);
11287 }
11288 else {
11289 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11290 buf);
11291 }
11292
11293 grc_mode = tr32(GRC_MODE);
11294 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11295
Michael Chane6af3012005-04-21 17:12:05 -070011296 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011297 tg3_nvram_unlock(tp);
11298 }
11299
11300 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -070011301 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011302 udelay(40);
11303 }
11304
11305 return ret;
11306}
11307
11308struct subsys_tbl_ent {
11309 u16 subsys_vendor, subsys_devid;
11310 u32 phy_id;
11311};
11312
11313static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11314 /* Broadcom boards. */
11315 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11316 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11317 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11318 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
11319 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11320 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11321 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
11322 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11323 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11324 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11325 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11326
11327 /* 3com boards. */
11328 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11329 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11330 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
11331 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11332 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11333
11334 /* DELL boards. */
11335 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11336 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11337 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11338 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11339
11340 /* Compaq boards. */
11341 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11342 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11343 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
11344 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11345 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11346
11347 /* IBM boards. */
11348 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11349};
11350
11351static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11352{
11353 int i;
11354
11355 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11356 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11357 tp->pdev->subsystem_vendor) &&
11358 (subsys_id_to_phy_id[i].subsys_devid ==
11359 tp->pdev->subsystem_device))
11360 return &subsys_id_to_phy_id[i];
11361 }
11362 return NULL;
11363}
11364
Michael Chan7d0c41e2005-04-21 17:06:20 -070011365static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011366{
Linus Torvalds1da177e2005-04-16 15:20:36 -070011367 u32 val;
Michael Chancaf636c72006-03-22 01:05:31 -080011368 u16 pmcsr;
11369
11370 /* On some early chips the SRAM cannot be accessed in D3hot state,
11371 * so need make sure we're in D0.
11372 */
11373 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11374 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11375 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11376 msleep(1);
Michael Chan7d0c41e2005-04-21 17:06:20 -070011377
11378 /* Make sure register accesses (indirect or otherwise)
11379 * will function correctly.
11380 */
11381 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11382 tp->misc_host_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011383
David S. Millerf49639e2006-06-09 11:58:36 -070011384 /* The memory arbiter has to be enabled in order for SRAM accesses
11385 * to succeed. Normally on powerup the tg3 chip firmware will make
11386 * sure it is enabled, but other entities such as system netboot
11387 * code might disable it.
11388 */
11389 val = tr32(MEMARB_MODE);
11390 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11391
Linus Torvalds1da177e2005-04-16 15:20:36 -070011392 tp->phy_id = PHY_ID_INVALID;
Michael Chan7d0c41e2005-04-21 17:06:20 -070011393 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11394
Gary Zambranoa85feb82007-05-05 11:52:19 -070011395 /* Assume an onboard device and WOL capable by default. */
11396 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
David S. Miller72b845e2006-03-14 14:11:48 -080011397
Michael Chanb5d37722006-09-27 16:06:21 -070011398 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan9d26e212006-12-07 00:21:14 -080011399 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
Michael Chanb5d37722006-09-27 16:06:21 -070011400 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011401 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11402 }
Matt Carlson0527ba32007-10-10 18:03:30 -070011403 val = tr32(VCPU_CFGSHDW);
11404 if (val & VCPU_CFGSHDW_ASPM_DBNC)
Matt Carlson8ed5d972007-05-07 00:25:49 -070011405 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
Matt Carlson0527ba32007-10-10 18:03:30 -070011406 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070011407 (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11408 device_may_wakeup(&tp->pdev->dev))
Matt Carlson0527ba32007-10-10 18:03:30 -070011409 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
Matt Carlson05ac4cb2008-11-03 16:53:46 -080011410 goto done;
Michael Chanb5d37722006-09-27 16:06:21 -070011411 }
11412
Linus Torvalds1da177e2005-04-16 15:20:36 -070011413 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11414 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11415 u32 nic_cfg, led_cfg;
Matt Carlsona9daf362008-05-25 23:49:44 -070011416 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
Michael Chan7d0c41e2005-04-21 17:06:20 -070011417 int eeprom_phy_serdes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011418
11419 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11420 tp->nic_sram_data_cfg = nic_cfg;
11421
11422 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11423 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11424 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11425 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11426 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11427 (ver > 0) && (ver < 0x100))
11428 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11429
Matt Carlsona9daf362008-05-25 23:49:44 -070011430 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11431 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11432
Linus Torvalds1da177e2005-04-16 15:20:36 -070011433 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11434 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11435 eeprom_phy_serdes = 1;
11436
11437 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11438 if (nic_phy_id != 0) {
11439 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11440 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11441
11442 eeprom_phy_id = (id1 >> 16) << 10;
11443 eeprom_phy_id |= (id2 & 0xfc00) << 16;
11444 eeprom_phy_id |= (id2 & 0x03ff) << 0;
11445 } else
11446 eeprom_phy_id = 0;
11447
Michael Chan7d0c41e2005-04-21 17:06:20 -070011448 tp->phy_id = eeprom_phy_id;
Michael Chan747e8f82005-07-25 12:33:22 -070011449 if (eeprom_phy_serdes) {
Michael Chana4e2b342005-10-26 15:46:52 -070011450 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan747e8f82005-07-25 12:33:22 -070011451 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11452 else
11453 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11454 }
Michael Chan7d0c41e2005-04-21 17:06:20 -070011455
John W. Linvillecbf46852005-04-21 17:01:29 -070011456 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011457 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11458 SHASTA_EXT_LED_MODE_MASK);
John W. Linvillecbf46852005-04-21 17:01:29 -070011459 else
Linus Torvalds1da177e2005-04-16 15:20:36 -070011460 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11461
11462 switch (led_cfg) {
11463 default:
11464 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11465 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11466 break;
11467
11468 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11469 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11470 break;
11471
11472 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11473 tp->led_ctrl = LED_CTRL_MODE_MAC;
Michael Chan9ba27792005-06-06 15:16:20 -070011474
11475 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11476 * read on some older 5700/5701 bootcode.
11477 */
11478 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11479 ASIC_REV_5700 ||
11480 GET_ASIC_REV(tp->pci_chip_rev_id) ==
11481 ASIC_REV_5701)
11482 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11483
Linus Torvalds1da177e2005-04-16 15:20:36 -070011484 break;
11485
11486 case SHASTA_EXT_LED_SHARED:
11487 tp->led_ctrl = LED_CTRL_MODE_SHARED;
11488 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11489 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11490 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11491 LED_CTRL_MODE_PHY_2);
11492 break;
11493
11494 case SHASTA_EXT_LED_MAC:
11495 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11496 break;
11497
11498 case SHASTA_EXT_LED_COMBO:
11499 tp->led_ctrl = LED_CTRL_MODE_COMBO;
11500 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11501 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11502 LED_CTRL_MODE_PHY_2);
11503 break;
11504
Stephen Hemminger855e1112008-04-16 16:37:28 -070011505 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011506
11507 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11508 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11509 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11510 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11511
Matt Carlsonb2a5c192008-04-03 21:44:44 -070011512 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11513 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
Matt Carlson5f608912007-11-12 21:17:07 -080011514
Michael Chan9d26e212006-12-07 00:21:14 -080011515 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011516 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011517 if ((tp->pdev->subsystem_vendor ==
11518 PCI_VENDOR_ID_ARIMA) &&
11519 (tp->pdev->subsystem_device == 0x205a ||
11520 tp->pdev->subsystem_device == 0x2063))
11521 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11522 } else {
David S. Millerf49639e2006-06-09 11:58:36 -070011523 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011524 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11525 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011526
11527 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11528 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
John W. Linvillecbf46852005-04-21 17:01:29 -070011529 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011530 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11531 }
Matt Carlsonb2b98d42008-11-03 16:52:32 -080011532
11533 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11534 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
Matt Carlson0d3031d2007-10-10 18:02:43 -070011535 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
Matt Carlsonb2b98d42008-11-03 16:52:32 -080011536
Gary Zambranoa85feb82007-05-05 11:52:19 -070011537 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11538 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11539 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011540
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070011541 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
Matt Carlson05ac4cb2008-11-03 16:53:46 -080011542 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
Matt Carlson0527ba32007-10-10 18:03:30 -070011543 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11544
Linus Torvalds1da177e2005-04-16 15:20:36 -070011545 if (cfg2 & (1 << 17))
11546 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11547
11548 /* serdes signal pre-emphasis in register 0x590 set by */
11549 /* bootcode if bit 18 is set */
11550 if (cfg2 & (1 << 18))
11551 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
Matt Carlson8ed5d972007-05-07 00:25:49 -070011552
11553 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11554 u32 cfg3;
11555
11556 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11557 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11558 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11559 }
Matt Carlsona9daf362008-05-25 23:49:44 -070011560
11561 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11562 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11563 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11564 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11565 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11566 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011567 }
Matt Carlson05ac4cb2008-11-03 16:53:46 -080011568done:
11569 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
11570 device_set_wakeup_enable(&tp->pdev->dev,
11571 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
Michael Chan7d0c41e2005-04-21 17:06:20 -070011572}
11573
Matt Carlsonb2a5c192008-04-03 21:44:44 -070011574static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11575{
11576 int i;
11577 u32 val;
11578
11579 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11580 tw32(OTP_CTRL, cmd);
11581
11582 /* Wait for up to 1 ms for command to execute. */
11583 for (i = 0; i < 100; i++) {
11584 val = tr32(OTP_STATUS);
11585 if (val & OTP_STATUS_CMD_DONE)
11586 break;
11587 udelay(10);
11588 }
11589
11590 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11591}
11592
11593/* Read the gphy configuration from the OTP region of the chip. The gphy
11594 * configuration is a 32-bit value that straddles the alignment boundary.
11595 * We do two 32-bit reads and then shift and merge the results.
11596 */
11597static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11598{
11599 u32 bhalf_otp, thalf_otp;
11600
11601 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11602
11603 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11604 return 0;
11605
11606 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11607
11608 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11609 return 0;
11610
11611 thalf_otp = tr32(OTP_READ_DATA);
11612
11613 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11614
11615 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11616 return 0;
11617
11618 bhalf_otp = tr32(OTP_READ_DATA);
11619
11620 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11621}
11622
Michael Chan7d0c41e2005-04-21 17:06:20 -070011623static int __devinit tg3_phy_probe(struct tg3 *tp)
11624{
11625 u32 hw_phy_id_1, hw_phy_id_2;
11626 u32 hw_phy_id, hw_phy_id_masked;
11627 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011628
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011629 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11630 return tg3_phy_init(tp);
11631
Linus Torvalds1da177e2005-04-16 15:20:36 -070011632 /* Reading the PHY ID register can conflict with ASF
11633 * firwmare access to the PHY hardware.
11634 */
11635 err = 0;
Matt Carlson0d3031d2007-10-10 18:02:43 -070011636 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11637 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011638 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11639 } else {
11640 /* Now read the physical PHY_ID from the chip and verify
11641 * that it is sane. If it doesn't look good, we fall back
11642 * to either the hard-coded table based PHY_ID and failing
11643 * that the value found in the eeprom area.
11644 */
11645 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11646 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11647
11648 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
11649 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11650 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
11651
11652 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11653 }
11654
11655 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11656 tp->phy_id = hw_phy_id;
11657 if (hw_phy_id_masked == PHY_ID_BCM8002)
11658 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
Michael Chanda6b2d02005-08-19 12:54:29 -070011659 else
11660 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011661 } else {
Michael Chan7d0c41e2005-04-21 17:06:20 -070011662 if (tp->phy_id != PHY_ID_INVALID) {
11663 /* Do nothing, phy ID already set up in
11664 * tg3_get_eeprom_hw_cfg().
11665 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070011666 } else {
11667 struct subsys_tbl_ent *p;
11668
11669 /* No eeprom signature? Try the hardcoded
11670 * subsys device table.
11671 */
11672 p = lookup_by_subsys(tp);
11673 if (!p)
11674 return -ENODEV;
11675
11676 tp->phy_id = p->phy_id;
11677 if (!tp->phy_id ||
11678 tp->phy_id == PHY_ID_BCM8002)
11679 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11680 }
11681 }
11682
Michael Chan747e8f82005-07-25 12:33:22 -070011683 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
Matt Carlson0d3031d2007-10-10 18:02:43 -070011684 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070011685 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chan3600d912006-12-07 00:21:48 -080011686 u32 bmsr, adv_reg, tg3_ctrl, mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011687
11688 tg3_readphy(tp, MII_BMSR, &bmsr);
11689 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11690 (bmsr & BMSR_LSTATUS))
11691 goto skip_phy_reset;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011692
Linus Torvalds1da177e2005-04-16 15:20:36 -070011693 err = tg3_phy_reset(tp);
11694 if (err)
11695 return err;
11696
11697 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11698 ADVERTISE_100HALF | ADVERTISE_100FULL |
11699 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11700 tg3_ctrl = 0;
11701 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11702 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11703 MII_TG3_CTRL_ADV_1000_FULL);
11704 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11705 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11706 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11707 MII_TG3_CTRL_ENABLE_AS_MASTER);
11708 }
11709
Michael Chan3600d912006-12-07 00:21:48 -080011710 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11711 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11712 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11713 if (!tg3_copper_is_advertising_all(tp, mask)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011714 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11715
11716 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11717 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11718
11719 tg3_writephy(tp, MII_BMCR,
11720 BMCR_ANENABLE | BMCR_ANRESTART);
11721 }
11722 tg3_phy_set_wirespeed(tp);
11723
11724 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11725 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11726 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11727 }
11728
11729skip_phy_reset:
11730 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11731 err = tg3_init_5401phy_dsp(tp);
11732 if (err)
11733 return err;
11734 }
11735
11736 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11737 err = tg3_init_5401phy_dsp(tp);
11738 }
11739
Michael Chan747e8f82005-07-25 12:33:22 -070011740 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011741 tp->link_config.advertising =
11742 (ADVERTISED_1000baseT_Half |
11743 ADVERTISED_1000baseT_Full |
11744 ADVERTISED_Autoneg |
11745 ADVERTISED_FIBRE);
11746 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11747 tp->link_config.advertising &=
11748 ~(ADVERTISED_1000baseT_Half |
11749 ADVERTISED_1000baseT_Full);
11750
11751 return err;
11752}
11753
11754static void __devinit tg3_read_partno(struct tg3 *tp)
11755{
11756 unsigned char vpd_data[256];
Michael Chanaf2c6a42006-11-07 14:57:51 -080011757 unsigned int i;
Michael Chan1b277772006-03-20 22:27:48 -080011758 u32 magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011759
Michael Chan18201802006-03-20 22:29:15 -080011760 if (tg3_nvram_read_swab(tp, 0x0, &magic))
David S. Millerf49639e2006-06-09 11:58:36 -070011761 goto out_not_found;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011762
Michael Chan18201802006-03-20 22:29:15 -080011763 if (magic == TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -080011764 for (i = 0; i < 256; i += 4) {
11765 u32 tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011766
Michael Chan1b277772006-03-20 22:27:48 -080011767 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11768 goto out_not_found;
11769
11770 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
11771 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
11772 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11773 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11774 }
11775 } else {
11776 int vpd_cap;
11777
11778 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11779 for (i = 0; i < 256; i += 4) {
11780 u32 tmp, j = 0;
Al Virob9fc7dc2007-12-17 22:59:57 -080011781 __le32 v;
Michael Chan1b277772006-03-20 22:27:48 -080011782 u16 tmp16;
11783
11784 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11785 i);
11786 while (j++ < 100) {
11787 pci_read_config_word(tp->pdev, vpd_cap +
11788 PCI_VPD_ADDR, &tmp16);
11789 if (tmp16 & 0x8000)
11790 break;
11791 msleep(1);
11792 }
David S. Millerf49639e2006-06-09 11:58:36 -070011793 if (!(tmp16 & 0x8000))
11794 goto out_not_found;
11795
Michael Chan1b277772006-03-20 22:27:48 -080011796 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11797 &tmp);
Al Virob9fc7dc2007-12-17 22:59:57 -080011798 v = cpu_to_le32(tmp);
11799 memcpy(&vpd_data[i], &v, 4);
Michael Chan1b277772006-03-20 22:27:48 -080011800 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011801 }
11802
11803 /* Now parse and find the part number. */
Michael Chanaf2c6a42006-11-07 14:57:51 -080011804 for (i = 0; i < 254; ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011805 unsigned char val = vpd_data[i];
Michael Chanaf2c6a42006-11-07 14:57:51 -080011806 unsigned int block_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011807
11808 if (val == 0x82 || val == 0x91) {
11809 i = (i + 3 +
11810 (vpd_data[i + 1] +
11811 (vpd_data[i + 2] << 8)));
11812 continue;
11813 }
11814
11815 if (val != 0x90)
11816 goto out_not_found;
11817
11818 block_end = (i + 3 +
11819 (vpd_data[i + 1] +
11820 (vpd_data[i + 2] << 8)));
11821 i += 3;
Michael Chanaf2c6a42006-11-07 14:57:51 -080011822
11823 if (block_end > 256)
11824 goto out_not_found;
11825
11826 while (i < (block_end - 2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011827 if (vpd_data[i + 0] == 'P' &&
11828 vpd_data[i + 1] == 'N') {
11829 int partno_len = vpd_data[i + 2];
11830
Michael Chanaf2c6a42006-11-07 14:57:51 -080011831 i += 3;
11832 if (partno_len > 24 || (partno_len + i) > 256)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011833 goto out_not_found;
11834
11835 memcpy(tp->board_part_number,
Michael Chanaf2c6a42006-11-07 14:57:51 -080011836 &vpd_data[i], partno_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011837
11838 /* Success. */
11839 return;
11840 }
Michael Chanaf2c6a42006-11-07 14:57:51 -080011841 i += 3 + vpd_data[i + 2];
Linus Torvalds1da177e2005-04-16 15:20:36 -070011842 }
11843
11844 /* Part number not found. */
11845 goto out_not_found;
11846 }
11847
11848out_not_found:
Michael Chanb5d37722006-09-27 16:06:21 -070011849 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11850 strcpy(tp->board_part_number, "BCM95906");
11851 else
11852 strcpy(tp->board_part_number, "none");
Linus Torvalds1da177e2005-04-16 15:20:36 -070011853}
11854
Matt Carlson9c8a6202007-10-21 16:16:08 -070011855static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11856{
11857 u32 val;
11858
11859 if (tg3_nvram_read_swab(tp, offset, &val) ||
11860 (val & 0xfc000000) != 0x0c000000 ||
11861 tg3_nvram_read_swab(tp, offset + 4, &val) ||
11862 val != 0)
11863 return 0;
11864
11865 return 1;
11866}
11867
Michael Chanc4e65752006-03-20 22:29:32 -080011868static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11869{
11870 u32 val, offset, start;
Matt Carlson9c8a6202007-10-21 16:16:08 -070011871 u32 ver_offset;
11872 int i, bcnt;
Michael Chanc4e65752006-03-20 22:29:32 -080011873
11874 if (tg3_nvram_read_swab(tp, 0, &val))
11875 return;
11876
11877 if (val != TG3_EEPROM_MAGIC)
11878 return;
11879
11880 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11881 tg3_nvram_read_swab(tp, 0x4, &start))
11882 return;
11883
11884 offset = tg3_nvram_logical_addr(tp, offset);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011885
11886 if (!tg3_fw_img_is_valid(tp, offset) ||
11887 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
Michael Chanc4e65752006-03-20 22:29:32 -080011888 return;
11889
Matt Carlson9c8a6202007-10-21 16:16:08 -070011890 offset = offset + ver_offset - start;
11891 for (i = 0; i < 16; i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011892 __le32 v;
11893 if (tg3_nvram_read_le(tp, offset + i, &v))
Michael Chanc4e65752006-03-20 22:29:32 -080011894 return;
11895
Al Virob9fc7dc2007-12-17 22:59:57 -080011896 memcpy(tp->fw_ver + i, &v, 4);
Michael Chanc4e65752006-03-20 22:29:32 -080011897 }
Matt Carlson9c8a6202007-10-21 16:16:08 -070011898
11899 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
Matt Carlson84af67f2007-11-12 21:08:59 -080011900 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
Matt Carlson9c8a6202007-10-21 16:16:08 -070011901 return;
11902
11903 for (offset = TG3_NVM_DIR_START;
11904 offset < TG3_NVM_DIR_END;
11905 offset += TG3_NVM_DIRENT_SIZE) {
11906 if (tg3_nvram_read_swab(tp, offset, &val))
11907 return;
11908
11909 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11910 break;
11911 }
11912
11913 if (offset == TG3_NVM_DIR_END)
11914 return;
11915
11916 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11917 start = 0x08000000;
11918 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11919 return;
11920
11921 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11922 !tg3_fw_img_is_valid(tp, offset) ||
11923 tg3_nvram_read_swab(tp, offset + 8, &val))
11924 return;
11925
11926 offset += val - start;
11927
11928 bcnt = strlen(tp->fw_ver);
11929
11930 tp->fw_ver[bcnt++] = ',';
11931 tp->fw_ver[bcnt++] = ' ';
11932
11933 for (i = 0; i < 4; i++) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011934 __le32 v;
11935 if (tg3_nvram_read_le(tp, offset, &v))
Matt Carlson9c8a6202007-10-21 16:16:08 -070011936 return;
11937
Al Virob9fc7dc2007-12-17 22:59:57 -080011938 offset += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011939
Al Virob9fc7dc2007-12-17 22:59:57 -080011940 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11941 memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011942 break;
11943 }
11944
Al Virob9fc7dc2007-12-17 22:59:57 -080011945 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11946 bcnt += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011947 }
11948
11949 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
Michael Chanc4e65752006-03-20 22:29:32 -080011950}
11951
Michael Chan7544b092007-05-05 13:08:32 -070011952static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11953
Linus Torvalds1da177e2005-04-16 15:20:36 -070011954static int __devinit tg3_get_invariants(struct tg3 *tp)
11955{
11956 static struct pci_device_id write_reorder_chipsets[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011957 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11958 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
John W. Linvillec165b002006-07-08 13:28:53 -070011959 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11960 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
Michael Chan399de502005-10-03 14:02:39 -070011961 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11962 PCI_DEVICE_ID_VIA_8385_0) },
Linus Torvalds1da177e2005-04-16 15:20:36 -070011963 { },
11964 };
11965 u32 misc_ctrl_reg;
11966 u32 cacheline_sz_reg;
11967 u32 pci_state_reg, grc_misc_cfg;
11968 u32 val;
11969 u16 pci_cmd;
Michael Chanc7835a72006-11-15 21:14:42 -080011970 int err, pcie_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011971
Linus Torvalds1da177e2005-04-16 15:20:36 -070011972 /* Force memory write invalidate off. If we leave it on,
11973 * then on 5700_BX chips we have to enable a workaround.
11974 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11975 * to match the cacheline size. The Broadcom driver have this
11976 * workaround but turns MWI off all the times so never uses
11977 * it. This seems to suggest that the workaround is insufficient.
11978 */
11979 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11980 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11981 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11982
11983 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11984 * has the register indirect write enable bit set before
11985 * we try to access any of the MMIO registers. It is also
11986 * critical that the PCI-X hw workaround situation is decided
11987 * before that as well.
11988 */
11989 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11990 &misc_ctrl_reg);
11991
11992 tp->pci_chip_rev_id = (misc_ctrl_reg >>
11993 MISC_HOST_CTRL_CHIPREV_SHIFT);
Matt Carlson795d01c2007-10-07 23:28:17 -070011994 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11995 u32 prod_id_asic_rev;
11996
11997 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11998 &prod_id_asic_rev);
11999 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
12000 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012001
Michael Chanff645be2005-04-21 17:09:53 -070012002 /* Wrong chip ID in 5752 A0. This code can be removed later
12003 * as A0 is not in production.
12004 */
12005 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12006 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12007
Michael Chan68929142005-08-09 20:17:14 -070012008 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12009 * we need to disable memory and use config. cycles
12010 * only to access all registers. The 5702/03 chips
12011 * can mistakenly decode the special cycles from the
12012 * ICH chipsets as memory write cycles, causing corruption
12013 * of register and memory space. Only certain ICH bridges
12014 * will drive special cycles with non-zero data during the
12015 * address phase which can fall within the 5703's address
12016 * range. This is not an ICH bug as the PCI spec allows
12017 * non-zero address during special cycles. However, only
12018 * these ICH bridges are known to drive non-zero addresses
12019 * during special cycles.
12020 *
12021 * Since special cycles do not cross PCI bridges, we only
12022 * enable this workaround if the 5703 is on the secondary
12023 * bus of these ICH bridges.
12024 */
12025 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12026 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12027 static struct tg3_dev_id {
12028 u32 vendor;
12029 u32 device;
12030 u32 rev;
12031 } ich_chipsets[] = {
12032 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12033 PCI_ANY_ID },
12034 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12035 PCI_ANY_ID },
12036 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12037 0xa },
12038 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12039 PCI_ANY_ID },
12040 { },
12041 };
12042 struct tg3_dev_id *pci_id = &ich_chipsets[0];
12043 struct pci_dev *bridge = NULL;
12044
12045 while (pci_id->vendor != 0) {
12046 bridge = pci_get_device(pci_id->vendor, pci_id->device,
12047 bridge);
12048 if (!bridge) {
12049 pci_id++;
12050 continue;
12051 }
12052 if (pci_id->rev != PCI_ANY_ID) {
Auke Kok44c10132007-06-08 15:46:36 -070012053 if (bridge->revision > pci_id->rev)
Michael Chan68929142005-08-09 20:17:14 -070012054 continue;
12055 }
12056 if (bridge->subordinate &&
12057 (bridge->subordinate->number ==
12058 tp->pdev->bus->number)) {
12059
12060 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
12061 pci_dev_put(bridge);
12062 break;
12063 }
12064 }
12065 }
12066
Matt Carlson41588ba2008-04-19 18:12:33 -070012067 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
12068 static struct tg3_dev_id {
12069 u32 vendor;
12070 u32 device;
12071 } bridge_chipsets[] = {
12072 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12073 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12074 { },
12075 };
12076 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12077 struct pci_dev *bridge = NULL;
12078
12079 while (pci_id->vendor != 0) {
12080 bridge = pci_get_device(pci_id->vendor,
12081 pci_id->device,
12082 bridge);
12083 if (!bridge) {
12084 pci_id++;
12085 continue;
12086 }
12087 if (bridge->subordinate &&
12088 (bridge->subordinate->number <=
12089 tp->pdev->bus->number) &&
12090 (bridge->subordinate->subordinate >=
12091 tp->pdev->bus->number)) {
12092 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12093 pci_dev_put(bridge);
12094 break;
12095 }
12096 }
12097 }
12098
Michael Chan4a29cc22006-03-19 13:21:12 -080012099 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12100 * DMA addresses > 40-bit. This bridge may have other additional
12101 * 57xx devices behind it in some 4-port NIC designs for example.
12102 * Any tg3 device found behind the bridge will also need the 40-bit
12103 * DMA workaround.
12104 */
Michael Chana4e2b342005-10-26 15:46:52 -070012105 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12106 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12107 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
Michael Chan4a29cc22006-03-19 13:21:12 -080012108 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
Michael Chan4cf78e42005-07-25 12:29:19 -070012109 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
Michael Chana4e2b342005-10-26 15:46:52 -070012110 }
Michael Chan4a29cc22006-03-19 13:21:12 -080012111 else {
12112 struct pci_dev *bridge = NULL;
12113
12114 do {
12115 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12116 PCI_DEVICE_ID_SERVERWORKS_EPB,
12117 bridge);
12118 if (bridge && bridge->subordinate &&
12119 (bridge->subordinate->number <=
12120 tp->pdev->bus->number) &&
12121 (bridge->subordinate->subordinate >=
12122 tp->pdev->bus->number)) {
12123 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12124 pci_dev_put(bridge);
12125 break;
12126 }
12127 } while (bridge);
12128 }
Michael Chan4cf78e42005-07-25 12:29:19 -070012129
Linus Torvalds1da177e2005-04-16 15:20:36 -070012130 /* Initialize misc host control in PCI block. */
12131 tp->misc_host_ctrl |= (misc_ctrl_reg &
12132 MISC_HOST_CTRL_CHIPREV);
12133 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12134 tp->misc_host_ctrl);
12135
12136 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12137 &cacheline_sz_reg);
12138
12139 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
12140 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
12141 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
12142 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
12143
Michael Chan7544b092007-05-05 13:08:32 -070012144 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12145 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12146 tp->pdev_peer = tg3_find_peer(tp);
12147
John W. Linville2052da92005-04-21 16:56:08 -070012148 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
Michael Chan4cf78e42005-07-25 12:29:19 -070012149 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -080012150 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chand9ab5ad2006-03-20 22:27:35 -080012151 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012152 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012153 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012154 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012155 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
Michael Chana4e2b342005-10-26 15:46:52 -070012156 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
John W. Linville6708e5c2005-04-21 17:00:52 -070012157 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12158
John W. Linville1b440c562005-04-21 17:03:18 -070012159 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12160 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12161 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12162
Michael Chan5a6f3072006-03-20 22:28:05 -080012163 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Michael Chan7544b092007-05-05 13:08:32 -070012164 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12165 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12166 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12167 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12168 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12169 tp->pdev_peer == tp->pdev))
12170 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12171
Michael Chanaf36e6b2006-03-23 01:28:06 -080012172 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012174 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012175 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012176 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012177 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan5a6f3072006-03-20 22:28:05 -080012178 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
Michael Chanfcfa0a32006-03-20 22:28:41 -080012179 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
Michael Chan52c0fd82006-06-29 20:15:54 -070012180 } else {
Michael Chan7f62ad52007-02-20 23:25:40 -080012181 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
Michael Chan52c0fd82006-06-29 20:15:54 -070012182 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12183 ASIC_REV_5750 &&
12184 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
Michael Chan7f62ad52007-02-20 23:25:40 -080012185 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
Michael Chan52c0fd82006-06-29 20:15:54 -070012186 }
Michael Chan5a6f3072006-03-20 22:28:05 -080012187 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012188
Matt Carlsonf51f3562008-05-25 23:45:08 -070012189 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12190 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan0f893dc2005-07-25 12:30:38 -070012191 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12192
Michael Chanc7835a72006-11-15 21:14:42 -080012193 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12194 if (pcie_cap != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012195 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
Matt Carlson5f5c51e2007-11-12 21:19:37 -080012196
12197 pcie_set_readrq(tp->pdev, 4096);
12198
Michael Chanc7835a72006-11-15 21:14:42 -080012199 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12200 u16 lnkctl;
12201
12202 pci_read_config_word(tp->pdev,
12203 pcie_cap + PCI_EXP_LNKCTL,
12204 &lnkctl);
12205 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
12206 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12207 }
Matt Carlsonfcb389d2008-11-03 16:55:44 -080012208 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12209 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012210
Michael Chan399de502005-10-03 14:02:39 -070012211 /* If we have an AMD 762 or VIA K8T800 chipset, write
12212 * reordering to the mailbox registers done by the host
12213 * controller can cause major troubles. We read back from
12214 * every mailbox register write to force the writes to be
12215 * posted to the chip in order.
12216 */
12217 if (pci_dev_present(write_reorder_chipsets) &&
12218 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12219 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12220
Linus Torvalds1da177e2005-04-16 15:20:36 -070012221 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12222 tp->pci_lat_timer < 64) {
12223 tp->pci_lat_timer = 64;
12224
12225 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
12226 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
12227 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
12228 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
12229
12230 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12231 cacheline_sz_reg);
12232 }
12233
Matt Carlson9974a352007-10-07 23:27:28 -070012234 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12235 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12236 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12237 if (!tp->pcix_cap) {
12238 printk(KERN_ERR PFX "Cannot find PCI-X "
12239 "capability, aborting.\n");
12240 return -EIO;
12241 }
12242 }
12243
Linus Torvalds1da177e2005-04-16 15:20:36 -070012244 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12245 &pci_state_reg);
12246
Matt Carlson9974a352007-10-07 23:27:28 -070012247 if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012248 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12249
12250 /* If this is a 5700 BX chipset, and we are in PCI-X
12251 * mode, enable register write workaround.
12252 *
12253 * The workaround is to use indirect register accesses
12254 * for all chip writes not to mailbox registers.
12255 */
12256 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12257 u32 pm_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012258
12259 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12260
12261 /* The chip can have it's power management PCI config
12262 * space registers clobbered due to this bug.
12263 * So explicitly force the chip into D0 here.
12264 */
Matt Carlson9974a352007-10-07 23:27:28 -070012265 pci_read_config_dword(tp->pdev,
12266 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012267 &pm_reg);
12268 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12269 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
Matt Carlson9974a352007-10-07 23:27:28 -070012270 pci_write_config_dword(tp->pdev,
12271 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012272 pm_reg);
12273
12274 /* Also, force SERR#/PERR# in PCI command. */
12275 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12276 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12277 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12278 }
12279 }
12280
Michael Chan087fe252005-08-09 20:17:41 -070012281 /* 5700 BX chips need to have their TX producer index mailboxes
12282 * written twice to workaround a bug.
12283 */
12284 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
12285 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12286
Linus Torvalds1da177e2005-04-16 15:20:36 -070012287 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12288 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12289 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12290 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12291
12292 /* Chip-specific fixup from Broadcom driver */
12293 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12294 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12295 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12296 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12297 }
12298
Michael Chan1ee582d2005-08-09 20:16:46 -070012299 /* Default fast path register access methods */
Michael Chan20094932005-08-09 20:16:32 -070012300 tp->read32 = tg3_read32;
Michael Chan1ee582d2005-08-09 20:16:46 -070012301 tp->write32 = tg3_write32;
Michael Chan09ee9292005-08-09 20:17:00 -070012302 tp->read32_mbox = tg3_read32;
Michael Chan20094932005-08-09 20:16:32 -070012303 tp->write32_mbox = tg3_write32;
Michael Chan1ee582d2005-08-09 20:16:46 -070012304 tp->write32_tx_mbox = tg3_write32;
12305 tp->write32_rx_mbox = tg3_write32;
12306
12307 /* Various workaround register access methods */
12308 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12309 tp->write32 = tg3_write_indirect_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070012310 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12311 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12312 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12313 /*
12314 * Back to back register writes can cause problems on these
12315 * chips, the workaround is to read back all reg writes
12316 * except those to mailbox regs.
12317 *
12318 * See tg3_write_indirect_reg32().
12319 */
Michael Chan1ee582d2005-08-09 20:16:46 -070012320 tp->write32 = tg3_write_flush_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070012321 }
12322
Michael Chan1ee582d2005-08-09 20:16:46 -070012323
12324 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12325 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12326 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12327 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12328 tp->write32_rx_mbox = tg3_write_flush_reg32;
12329 }
Michael Chan20094932005-08-09 20:16:32 -070012330
Michael Chan68929142005-08-09 20:17:14 -070012331 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12332 tp->read32 = tg3_read_indirect_reg32;
12333 tp->write32 = tg3_write_indirect_reg32;
12334 tp->read32_mbox = tg3_read_indirect_mbox;
12335 tp->write32_mbox = tg3_write_indirect_mbox;
12336 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12337 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12338
12339 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070012340 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070012341
12342 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12343 pci_cmd &= ~PCI_COMMAND_MEMORY;
12344 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12345 }
Michael Chanb5d37722006-09-27 16:06:21 -070012346 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12347 tp->read32_mbox = tg3_read32_mbox_5906;
12348 tp->write32_mbox = tg3_write32_mbox_5906;
12349 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12350 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12351 }
Michael Chan68929142005-08-09 20:17:14 -070012352
Michael Chanbbadf502006-04-06 21:46:34 -070012353 if (tp->write32 == tg3_write_indirect_reg32 ||
12354 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12355 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
David S. Millerf49639e2006-06-09 11:58:36 -070012356 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
Michael Chanbbadf502006-04-06 21:46:34 -070012357 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12358
Michael Chan7d0c41e2005-04-21 17:06:20 -070012359 /* Get eeprom hw config before calling tg3_set_power_state().
Michael Chan9d26e212006-12-07 00:21:14 -080012360 * In particular, the TG3_FLG2_IS_NIC flag must be
Michael Chan7d0c41e2005-04-21 17:06:20 -070012361 * determined before calling tg3_set_power_state() so that
12362 * we know whether or not to switch out of Vaux power.
12363 * When the flag is set, it means that GPIO1 is used for eeprom
12364 * write protect and also implies that it is a LOM where GPIOs
12365 * are not used to switch power.
Jeff Garzik6aa20a22006-09-13 13:24:59 -040012366 */
Michael Chan7d0c41e2005-04-21 17:06:20 -070012367 tg3_get_eeprom_hw_cfg(tp);
12368
Matt Carlson0d3031d2007-10-10 18:02:43 -070012369 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12370 /* Allow reads and writes to the
12371 * APE register and memory space.
12372 */
12373 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12374 PCISTATE_ALLOW_APE_SHMEM_WR;
12375 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12376 pci_state_reg);
12377 }
12378
Matt Carlson9936bcf2007-10-10 18:03:07 -070012379 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012380 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlsonbcb37f62008-11-03 16:52:09 -080012381 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsond30cdd22007-10-07 23:28:35 -070012382 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12383
Michael Chan314fba32005-04-21 17:07:04 -070012384 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12385 * GPIO1 driven high will bring 5700's external PHY out of reset.
12386 * It is also used as eeprom write protect on LOMs.
12387 */
12388 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12389 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12390 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12391 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12392 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan3e7d83b2005-04-21 17:10:36 -070012393 /* Unused GPIO3 must be driven as output on 5752 because there
12394 * are no pull-up resistors on unused GPIO pins.
12395 */
12396 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12397 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chan314fba32005-04-21 17:07:04 -070012398
Michael Chanaf36e6b2006-03-23 01:28:06 -080012399 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12400 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12401
Matt Carlson5f0c4a32008-06-09 15:41:12 -070012402 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12403 /* Turn off the debug UART. */
12404 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12405 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12406 /* Keep VMain power. */
12407 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12408 GRC_LCLCTRL_GPIO_OUTPUT0;
12409 }
12410
Linus Torvalds1da177e2005-04-16 15:20:36 -070012411 /* Force the chip into D0. */
Michael Chanbc1c7562006-03-20 17:48:03 -080012412 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012413 if (err) {
12414 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12415 pci_name(tp->pdev));
12416 return err;
12417 }
12418
12419 /* 5700 B0 chips do not support checksumming correctly due
12420 * to hardware bugs.
12421 */
12422 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12423 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12424
Linus Torvalds1da177e2005-04-16 15:20:36 -070012425 /* Derive initial jumbo mode from MTU assigned in
12426 * ether_setup() via the alloc_etherdev() call
12427 */
Michael Chan0f893dc2005-07-25 12:30:38 -070012428 if (tp->dev->mtu > ETH_DATA_LEN &&
Michael Chana4e2b342005-10-26 15:46:52 -070012429 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan0f893dc2005-07-25 12:30:38 -070012430 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012431
12432 /* Determine WakeOnLan speed to use. */
12433 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12434 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12435 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12436 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12437 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12438 } else {
12439 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12440 }
12441
12442 /* A few boards don't want Ethernet@WireSpeed phy feature */
12443 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12444 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12445 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
Michael Chan747e8f82005-07-25 12:33:22 -070012446 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
Michael Chanb5d37722006-09-27 16:06:21 -070012447 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
Michael Chan747e8f82005-07-25 12:33:22 -070012448 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
Linus Torvalds1da177e2005-04-16 15:20:36 -070012449 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12450
12451 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12452 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12453 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12454 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12455 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12456
Michael Chanc424cb22006-04-29 18:56:34 -070012457 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12458 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012459 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012460 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12461 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
Michael Chand4011ad2007-02-13 12:17:25 -080012462 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12463 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12464 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
Michael Chanc1d2a192007-01-08 19:57:20 -080012465 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12466 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
Matt Carlson57e69832008-05-25 23:48:31 -070012467 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12468 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
Michael Chanc424cb22006-04-29 18:56:34 -070012469 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12470 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012471
Matt Carlsonb2a5c192008-04-03 21:44:44 -070012472 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12473 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12474 tp->phy_otp = tg3_read_otp_phycfg(tp);
12475 if (tp->phy_otp == 0)
12476 tp->phy_otp = TG3_OTP_DEFAULT;
12477 }
12478
Matt Carlsonf51f3562008-05-25 23:45:08 -070012479 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
Matt Carlson8ef21422008-05-02 16:47:53 -070012480 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12481 else
12482 tp->mi_mode = MAC_MI_MODE_BASE;
12483
Linus Torvalds1da177e2005-04-16 15:20:36 -070012484 tp->coalesce_mode = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012485 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12486 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12487 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12488
Matt Carlson57e69832008-05-25 23:48:31 -070012489 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12490 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12491
Matt Carlson158d7ab2008-05-29 01:37:54 -070012492 err = tg3_mdio_init(tp);
12493 if (err)
12494 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012495
12496 /* Initialize data/descriptor byte/word swapping. */
12497 val = tr32(GRC_MODE);
12498 val &= GRC_MODE_HOST_STACKUP;
12499 tw32(GRC_MODE, val | tp->grc_mode);
12500
12501 tg3_switch_clocks(tp);
12502
12503 /* Clear this out for sanity. */
12504 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12505
12506 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12507 &pci_state_reg);
12508 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12509 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12510 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12511
12512 if (chiprevid == CHIPREV_ID_5701_A0 ||
12513 chiprevid == CHIPREV_ID_5701_B0 ||
12514 chiprevid == CHIPREV_ID_5701_B2 ||
12515 chiprevid == CHIPREV_ID_5701_B5) {
12516 void __iomem *sram_base;
12517
12518 /* Write some dummy words into the SRAM status block
12519 * area, see if it reads back correctly. If the return
12520 * value is bad, force enable the PCIX workaround.
12521 */
12522 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12523
12524 writel(0x00000000, sram_base);
12525 writel(0x00000000, sram_base + 4);
12526 writel(0xffffffff, sram_base + 4);
12527 if (readl(sram_base) != 0x00000000)
12528 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12529 }
12530 }
12531
12532 udelay(50);
12533 tg3_nvram_init(tp);
12534
12535 grc_misc_cfg = tr32(GRC_MISC_CFG);
12536 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12537
Linus Torvalds1da177e2005-04-16 15:20:36 -070012538 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12539 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12540 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12541 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12542
David S. Millerfac9b832005-05-18 22:46:34 -070012543 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12544 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12545 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12546 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12547 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12548 HOSTCC_MODE_CLRTICK_TXBD);
12549
12550 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12551 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12552 tp->misc_host_ctrl);
12553 }
12554
Matt Carlson3bda1252008-08-15 14:08:22 -070012555 /* Preserve the APE MAC_MODE bits */
12556 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12557 tp->mac_mode = tr32(MAC_MODE) |
12558 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12559 else
12560 tp->mac_mode = TG3_DEF_MAC_MODE;
12561
Linus Torvalds1da177e2005-04-16 15:20:36 -070012562 /* these are limited to 10/100 only */
12563 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12564 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12565 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12566 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12567 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12568 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12569 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12570 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12571 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
Michael Chan676917d2006-12-07 00:20:22 -080012572 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12573 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
Michael Chanb5d37722006-09-27 16:06:21 -070012574 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012575 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12576
12577 err = tg3_phy_probe(tp);
12578 if (err) {
12579 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12580 pci_name(tp->pdev), err);
12581 /* ... but do not return immediately ... */
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070012582 tg3_mdio_fini(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012583 }
12584
12585 tg3_read_partno(tp);
Michael Chanc4e65752006-03-20 22:29:32 -080012586 tg3_read_fw_ver(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012587
12588 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12589 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12590 } else {
12591 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12592 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12593 else
12594 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12595 }
12596
12597 /* 5700 {AX,BX} chips have a broken status block link
12598 * change bit implementation, so we must use the
12599 * status register in those cases.
12600 */
12601 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12602 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12603 else
12604 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12605
12606 /* The led_ctrl is set during tg3_phy_probe, here we might
12607 * have to force the link status polling mechanism based
12608 * upon subsystem IDs.
12609 */
12610 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
Michael Chan007a880d2007-05-31 14:49:51 -070012611 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070012612 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12613 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12614 TG3_FLAG_USE_LINKCHG_REG);
12615 }
12616
12617 /* For all SERDES we poll the MAC status register. */
12618 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12619 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12620 else
12621 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12622
Matt Carlsonad829262008-11-21 17:16:16 -080012623 tp->rx_offset = NET_IP_ALIGN;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012624 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12625 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12626 tp->rx_offset = 0;
12627
Michael Chanf92905d2006-06-29 20:14:29 -070012628 tp->rx_std_max_post = TG3_RX_RING_SIZE;
12629
12630 /* Increment the rx prod index on the rx std ring by at most
12631 * 8 for these chips to workaround hw errata.
12632 */
12633 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12634 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12635 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12636 tp->rx_std_max_post = 8;
12637
Matt Carlson8ed5d972007-05-07 00:25:49 -070012638 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12639 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12640 PCIE_PWR_MGMT_L1_THRESH_MSK;
12641
Linus Torvalds1da177e2005-04-16 15:20:36 -070012642 return err;
12643}
12644
David S. Miller49b6e95f2007-03-29 01:38:42 -070012645#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012646static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12647{
12648 struct net_device *dev = tp->dev;
12649 struct pci_dev *pdev = tp->pdev;
David S. Miller49b6e95f2007-03-29 01:38:42 -070012650 struct device_node *dp = pci_device_to_OF_node(pdev);
David S. Miller374d4ca2007-03-29 01:57:57 -070012651 const unsigned char *addr;
David S. Miller49b6e95f2007-03-29 01:38:42 -070012652 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012653
David S. Miller49b6e95f2007-03-29 01:38:42 -070012654 addr = of_get_property(dp, "local-mac-address", &len);
12655 if (addr && len == 6) {
12656 memcpy(dev->dev_addr, addr, 6);
12657 memcpy(dev->perm_addr, dev->dev_addr, 6);
12658 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012659 }
12660 return -ENODEV;
12661}
12662
12663static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12664{
12665 struct net_device *dev = tp->dev;
12666
12667 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
John W. Linville2ff43692005-09-12 14:44:20 -070012668 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012669 return 0;
12670}
12671#endif
12672
12673static int __devinit tg3_get_device_address(struct tg3 *tp)
12674{
12675 struct net_device *dev = tp->dev;
12676 u32 hi, lo, mac_offset;
Michael Chan008652b2006-03-27 23:14:53 -080012677 int addr_ok = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012678
David S. Miller49b6e95f2007-03-29 01:38:42 -070012679#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012680 if (!tg3_get_macaddr_sparc(tp))
12681 return 0;
12682#endif
12683
12684 mac_offset = 0x7c;
David S. Millerf49639e2006-06-09 11:58:36 -070012685 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
Michael Chana4e2b342005-10-26 15:46:52 -070012686 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012687 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12688 mac_offset = 0xcc;
12689 if (tg3_nvram_lock(tp))
12690 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12691 else
12692 tg3_nvram_unlock(tp);
12693 }
Michael Chanb5d37722006-09-27 16:06:21 -070012694 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12695 mac_offset = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012696
12697 /* First try to get it from MAC address mailbox. */
12698 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12699 if ((hi >> 16) == 0x484b) {
12700 dev->dev_addr[0] = (hi >> 8) & 0xff;
12701 dev->dev_addr[1] = (hi >> 0) & 0xff;
12702
12703 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12704 dev->dev_addr[2] = (lo >> 24) & 0xff;
12705 dev->dev_addr[3] = (lo >> 16) & 0xff;
12706 dev->dev_addr[4] = (lo >> 8) & 0xff;
12707 dev->dev_addr[5] = (lo >> 0) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012708
Michael Chan008652b2006-03-27 23:14:53 -080012709 /* Some old bootcode may report a 0 MAC address in SRAM */
12710 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12711 }
12712 if (!addr_ok) {
12713 /* Next, try NVRAM. */
David S. Millerf49639e2006-06-09 11:58:36 -070012714 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
Michael Chan008652b2006-03-27 23:14:53 -080012715 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12716 dev->dev_addr[0] = ((hi >> 16) & 0xff);
12717 dev->dev_addr[1] = ((hi >> 24) & 0xff);
12718 dev->dev_addr[2] = ((lo >> 0) & 0xff);
12719 dev->dev_addr[3] = ((lo >> 8) & 0xff);
12720 dev->dev_addr[4] = ((lo >> 16) & 0xff);
12721 dev->dev_addr[5] = ((lo >> 24) & 0xff);
12722 }
12723 /* Finally just fetch it out of the MAC control regs. */
12724 else {
12725 hi = tr32(MAC_ADDR_0_HIGH);
12726 lo = tr32(MAC_ADDR_0_LOW);
12727
12728 dev->dev_addr[5] = lo & 0xff;
12729 dev->dev_addr[4] = (lo >> 8) & 0xff;
12730 dev->dev_addr[3] = (lo >> 16) & 0xff;
12731 dev->dev_addr[2] = (lo >> 24) & 0xff;
12732 dev->dev_addr[1] = hi & 0xff;
12733 dev->dev_addr[0] = (hi >> 8) & 0xff;
12734 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012735 }
12736
12737 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
David S. Miller7582a332008-03-20 15:53:15 -070012738#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012739 if (!tg3_get_default_macaddr_sparc(tp))
12740 return 0;
12741#endif
12742 return -EINVAL;
12743 }
John W. Linville2ff43692005-09-12 14:44:20 -070012744 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012745 return 0;
12746}
12747
David S. Miller59e6b432005-05-18 22:50:10 -070012748#define BOUNDARY_SINGLE_CACHELINE 1
12749#define BOUNDARY_MULTI_CACHELINE 2
12750
12751static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12752{
12753 int cacheline_size;
12754 u8 byte;
12755 int goal;
12756
12757 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12758 if (byte == 0)
12759 cacheline_size = 1024;
12760 else
12761 cacheline_size = (int) byte * 4;
12762
12763 /* On 5703 and later chips, the boundary bits have no
12764 * effect.
12765 */
12766 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12767 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12768 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12769 goto out;
12770
12771#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12772 goal = BOUNDARY_MULTI_CACHELINE;
12773#else
12774#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12775 goal = BOUNDARY_SINGLE_CACHELINE;
12776#else
12777 goal = 0;
12778#endif
12779#endif
12780
12781 if (!goal)
12782 goto out;
12783
12784 /* PCI controllers on most RISC systems tend to disconnect
12785 * when a device tries to burst across a cache-line boundary.
12786 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12787 *
12788 * Unfortunately, for PCI-E there are only limited
12789 * write-side controls for this, and thus for reads
12790 * we will still get the disconnects. We'll also waste
12791 * these PCI cycles for both read and write for chips
12792 * other than 5700 and 5701 which do not implement the
12793 * boundary bits.
12794 */
12795 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12796 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12797 switch (cacheline_size) {
12798 case 16:
12799 case 32:
12800 case 64:
12801 case 128:
12802 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12803 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12804 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12805 } else {
12806 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12807 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12808 }
12809 break;
12810
12811 case 256:
12812 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12813 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12814 break;
12815
12816 default:
12817 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12818 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12819 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012820 }
David S. Miller59e6b432005-05-18 22:50:10 -070012821 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12822 switch (cacheline_size) {
12823 case 16:
12824 case 32:
12825 case 64:
12826 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12827 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12828 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12829 break;
12830 }
12831 /* fallthrough */
12832 case 128:
12833 default:
12834 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12835 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12836 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012837 }
David S. Miller59e6b432005-05-18 22:50:10 -070012838 } else {
12839 switch (cacheline_size) {
12840 case 16:
12841 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12842 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12843 DMA_RWCTRL_WRITE_BNDRY_16);
12844 break;
12845 }
12846 /* fallthrough */
12847 case 32:
12848 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12849 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12850 DMA_RWCTRL_WRITE_BNDRY_32);
12851 break;
12852 }
12853 /* fallthrough */
12854 case 64:
12855 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12856 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12857 DMA_RWCTRL_WRITE_BNDRY_64);
12858 break;
12859 }
12860 /* fallthrough */
12861 case 128:
12862 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12863 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12864 DMA_RWCTRL_WRITE_BNDRY_128);
12865 break;
12866 }
12867 /* fallthrough */
12868 case 256:
12869 val |= (DMA_RWCTRL_READ_BNDRY_256 |
12870 DMA_RWCTRL_WRITE_BNDRY_256);
12871 break;
12872 case 512:
12873 val |= (DMA_RWCTRL_READ_BNDRY_512 |
12874 DMA_RWCTRL_WRITE_BNDRY_512);
12875 break;
12876 case 1024:
12877 default:
12878 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12879 DMA_RWCTRL_WRITE_BNDRY_1024);
12880 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012881 }
David S. Miller59e6b432005-05-18 22:50:10 -070012882 }
12883
12884out:
12885 return val;
12886}
12887
Linus Torvalds1da177e2005-04-16 15:20:36 -070012888static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12889{
12890 struct tg3_internal_buffer_desc test_desc;
12891 u32 sram_dma_descs;
12892 int i, ret;
12893
12894 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12895
12896 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12897 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12898 tw32(RDMAC_STATUS, 0);
12899 tw32(WDMAC_STATUS, 0);
12900
12901 tw32(BUFMGR_MODE, 0);
12902 tw32(FTQ_RESET, 0);
12903
12904 test_desc.addr_hi = ((u64) buf_dma) >> 32;
12905 test_desc.addr_lo = buf_dma & 0xffffffff;
12906 test_desc.nic_mbuf = 0x00002100;
12907 test_desc.len = size;
12908
12909 /*
12910 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12911 * the *second* time the tg3 driver was getting loaded after an
12912 * initial scan.
12913 *
12914 * Broadcom tells me:
12915 * ...the DMA engine is connected to the GRC block and a DMA
12916 * reset may affect the GRC block in some unpredictable way...
12917 * The behavior of resets to individual blocks has not been tested.
12918 *
12919 * Broadcom noted the GRC reset will also reset all sub-components.
12920 */
12921 if (to_device) {
12922 test_desc.cqid_sqid = (13 << 8) | 2;
12923
12924 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12925 udelay(40);
12926 } else {
12927 test_desc.cqid_sqid = (16 << 8) | 7;
12928
12929 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12930 udelay(40);
12931 }
12932 test_desc.flags = 0x00000005;
12933
12934 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12935 u32 val;
12936
12937 val = *(((u32 *)&test_desc) + i);
12938 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12939 sram_dma_descs + (i * sizeof(u32)));
12940 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12941 }
12942 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12943
12944 if (to_device) {
12945 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12946 } else {
12947 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12948 }
12949
12950 ret = -ENODEV;
12951 for (i = 0; i < 40; i++) {
12952 u32 val;
12953
12954 if (to_device)
12955 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12956 else
12957 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12958 if ((val & 0xffff) == sram_dma_descs) {
12959 ret = 0;
12960 break;
12961 }
12962
12963 udelay(100);
12964 }
12965
12966 return ret;
12967}
12968
David S. Millerded73402005-05-23 13:59:47 -070012969#define TEST_BUFFER_SIZE 0x2000
Linus Torvalds1da177e2005-04-16 15:20:36 -070012970
12971static int __devinit tg3_test_dma(struct tg3 *tp)
12972{
12973 dma_addr_t buf_dma;
David S. Miller59e6b432005-05-18 22:50:10 -070012974 u32 *buf, saved_dma_rwctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012975 int ret;
12976
12977 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12978 if (!buf) {
12979 ret = -ENOMEM;
12980 goto out_nofree;
12981 }
12982
12983 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12984 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12985
David S. Miller59e6b432005-05-18 22:50:10 -070012986 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012987
12988 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12989 /* DMA read watermark not used on PCIE */
12990 tp->dma_rwctrl |= 0x00180000;
12991 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
Michael Chan85e94ce2005-04-21 17:05:28 -070012992 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12993 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012994 tp->dma_rwctrl |= 0x003f0000;
12995 else
12996 tp->dma_rwctrl |= 0x003f000f;
12997 } else {
12998 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12999 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
13000 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
Michael Chan49afdeb2007-02-13 12:17:03 -080013001 u32 read_water = 0x7;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013002
Michael Chan4a29cc22006-03-19 13:21:12 -080013003 /* If the 5704 is behind the EPB bridge, we can
13004 * do the less restrictive ONE_DMA workaround for
13005 * better performance.
13006 */
13007 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
13008 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13009 tp->dma_rwctrl |= 0x8000;
13010 else if (ccval == 0x6 || ccval == 0x7)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013011 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
13012
Michael Chan49afdeb2007-02-13 12:17:03 -080013013 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
13014 read_water = 4;
David S. Miller59e6b432005-05-18 22:50:10 -070013015 /* Set bit 23 to enable PCIX hw bug fix */
Michael Chan49afdeb2007-02-13 12:17:03 -080013016 tp->dma_rwctrl |=
13017 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
13018 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
13019 (1 << 23);
Michael Chan4cf78e42005-07-25 12:29:19 -070013020 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
13021 /* 5780 always in PCIX mode */
13022 tp->dma_rwctrl |= 0x00144000;
Michael Chana4e2b342005-10-26 15:46:52 -070013023 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13024 /* 5714 always in PCIX mode */
13025 tp->dma_rwctrl |= 0x00148000;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013026 } else {
13027 tp->dma_rwctrl |= 0x001b000f;
13028 }
13029 }
13030
13031 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13032 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13033 tp->dma_rwctrl &= 0xfffffff0;
13034
13035 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13036 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13037 /* Remove this if it causes problems for some boards. */
13038 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
13039
13040 /* On 5700/5701 chips, we need to set this bit.
13041 * Otherwise the chip will issue cacheline transactions
13042 * to streamable DMA memory with not all the byte
13043 * enables turned on. This is an error on several
13044 * RISC PCI controllers, in particular sparc64.
13045 *
13046 * On 5703/5704 chips, this bit has been reassigned
13047 * a different meaning. In particular, it is used
13048 * on those chips to enable a PCI-X workaround.
13049 */
13050 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
13051 }
13052
13053 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13054
13055#if 0
13056 /* Unneeded, already done by tg3_get_invariants. */
13057 tg3_switch_clocks(tp);
13058#endif
13059
13060 ret = 0;
13061 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13062 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13063 goto out;
13064
David S. Miller59e6b432005-05-18 22:50:10 -070013065 /* It is best to perform DMA test with maximum write burst size
13066 * to expose the 5700/5701 write DMA bug.
13067 */
13068 saved_dma_rwctrl = tp->dma_rwctrl;
13069 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13070 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13071
Linus Torvalds1da177e2005-04-16 15:20:36 -070013072 while (1) {
13073 u32 *p = buf, i;
13074
13075 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
13076 p[i] = i;
13077
13078 /* Send the buffer to the chip. */
13079 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13080 if (ret) {
13081 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
13082 break;
13083 }
13084
13085#if 0
13086 /* validate data reached card RAM correctly. */
13087 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13088 u32 val;
13089 tg3_read_mem(tp, 0x2100 + (i*4), &val);
13090 if (le32_to_cpu(val) != p[i]) {
13091 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
13092 /* ret = -ENODEV here? */
13093 }
13094 p[i] = 0;
13095 }
13096#endif
13097 /* Now read it back. */
13098 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13099 if (ret) {
13100 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13101
13102 break;
13103 }
13104
13105 /* Verify it. */
13106 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13107 if (p[i] == i)
13108 continue;
13109
David S. Miller59e6b432005-05-18 22:50:10 -070013110 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13111 DMA_RWCTRL_WRITE_BNDRY_16) {
13112 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013113 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13114 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13115 break;
13116 } else {
13117 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13118 ret = -ENODEV;
13119 goto out;
13120 }
13121 }
13122
13123 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13124 /* Success. */
13125 ret = 0;
13126 break;
13127 }
13128 }
David S. Miller59e6b432005-05-18 22:50:10 -070013129 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13130 DMA_RWCTRL_WRITE_BNDRY_16) {
Michael Chan6d1cfba2005-06-08 14:13:14 -070013131 static struct pci_device_id dma_wait_state_chipsets[] = {
13132 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13133 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13134 { },
13135 };
13136
David S. Miller59e6b432005-05-18 22:50:10 -070013137 /* DMA test passed without adjusting DMA boundary,
Michael Chan6d1cfba2005-06-08 14:13:14 -070013138 * now look for chipsets that are known to expose the
13139 * DMA bug without failing the test.
David S. Miller59e6b432005-05-18 22:50:10 -070013140 */
Michael Chan6d1cfba2005-06-08 14:13:14 -070013141 if (pci_dev_present(dma_wait_state_chipsets)) {
13142 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13143 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13144 }
13145 else
13146 /* Safe to use the calculated DMA boundary. */
13147 tp->dma_rwctrl = saved_dma_rwctrl;
13148
David S. Miller59e6b432005-05-18 22:50:10 -070013149 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13150 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013151
13152out:
13153 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13154out_nofree:
13155 return ret;
13156}
13157
13158static void __devinit tg3_init_link_config(struct tg3 *tp)
13159{
13160 tp->link_config.advertising =
13161 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13162 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13163 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13164 ADVERTISED_Autoneg | ADVERTISED_MII);
13165 tp->link_config.speed = SPEED_INVALID;
13166 tp->link_config.duplex = DUPLEX_INVALID;
13167 tp->link_config.autoneg = AUTONEG_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013168 tp->link_config.active_speed = SPEED_INVALID;
13169 tp->link_config.active_duplex = DUPLEX_INVALID;
13170 tp->link_config.phy_is_low_power = 0;
13171 tp->link_config.orig_speed = SPEED_INVALID;
13172 tp->link_config.orig_duplex = DUPLEX_INVALID;
13173 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13174}
13175
13176static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13177{
Michael Chanfdfec172005-07-25 12:31:48 -070013178 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13179 tp->bufmgr_config.mbuf_read_dma_low_water =
13180 DEFAULT_MB_RDMA_LOW_WATER_5705;
13181 tp->bufmgr_config.mbuf_mac_rx_low_water =
13182 DEFAULT_MB_MACRX_LOW_WATER_5705;
13183 tp->bufmgr_config.mbuf_high_water =
13184 DEFAULT_MB_HIGH_WATER_5705;
Michael Chanb5d37722006-09-27 16:06:21 -070013185 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13186 tp->bufmgr_config.mbuf_mac_rx_low_water =
13187 DEFAULT_MB_MACRX_LOW_WATER_5906;
13188 tp->bufmgr_config.mbuf_high_water =
13189 DEFAULT_MB_HIGH_WATER_5906;
13190 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013191
Michael Chanfdfec172005-07-25 12:31:48 -070013192 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13193 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13194 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13195 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13196 tp->bufmgr_config.mbuf_high_water_jumbo =
13197 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13198 } else {
13199 tp->bufmgr_config.mbuf_read_dma_low_water =
13200 DEFAULT_MB_RDMA_LOW_WATER;
13201 tp->bufmgr_config.mbuf_mac_rx_low_water =
13202 DEFAULT_MB_MACRX_LOW_WATER;
13203 tp->bufmgr_config.mbuf_high_water =
13204 DEFAULT_MB_HIGH_WATER;
13205
13206 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13207 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13208 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13209 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13210 tp->bufmgr_config.mbuf_high_water_jumbo =
13211 DEFAULT_MB_HIGH_WATER_JUMBO;
13212 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013213
13214 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13215 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13216}
13217
13218static char * __devinit tg3_phy_string(struct tg3 *tp)
13219{
13220 switch (tp->phy_id & PHY_ID_MASK) {
13221 case PHY_ID_BCM5400: return "5400";
13222 case PHY_ID_BCM5401: return "5401";
13223 case PHY_ID_BCM5411: return "5411";
13224 case PHY_ID_BCM5701: return "5701";
13225 case PHY_ID_BCM5703: return "5703";
13226 case PHY_ID_BCM5704: return "5704";
13227 case PHY_ID_BCM5705: return "5705";
13228 case PHY_ID_BCM5750: return "5750";
Michael Chan85e94ce2005-04-21 17:05:28 -070013229 case PHY_ID_BCM5752: return "5752";
Michael Chana4e2b342005-10-26 15:46:52 -070013230 case PHY_ID_BCM5714: return "5714";
Michael Chan4cf78e42005-07-25 12:29:19 -070013231 case PHY_ID_BCM5780: return "5780";
Michael Chanaf36e6b2006-03-23 01:28:06 -080013232 case PHY_ID_BCM5755: return "5755";
Michael Chand9ab5ad2006-03-20 22:27:35 -080013233 case PHY_ID_BCM5787: return "5787";
Matt Carlsond30cdd22007-10-07 23:28:35 -070013234 case PHY_ID_BCM5784: return "5784";
Michael Chan126a3362006-09-27 16:03:07 -070013235 case PHY_ID_BCM5756: return "5722/5756";
Michael Chanb5d37722006-09-27 16:06:21 -070013236 case PHY_ID_BCM5906: return "5906";
Matt Carlson9936bcf2007-10-10 18:03:07 -070013237 case PHY_ID_BCM5761: return "5761";
Linus Torvalds1da177e2005-04-16 15:20:36 -070013238 case PHY_ID_BCM8002: return "8002/serdes";
13239 case 0: return "serdes";
13240 default: return "unknown";
Stephen Hemminger855e1112008-04-16 16:37:28 -070013241 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013242}
13243
Michael Chanf9804dd2005-09-27 12:13:10 -070013244static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13245{
13246 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13247 strcpy(str, "PCI Express");
13248 return str;
13249 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13250 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13251
13252 strcpy(str, "PCIX:");
13253
13254 if ((clock_ctrl == 7) ||
13255 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13256 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13257 strcat(str, "133MHz");
13258 else if (clock_ctrl == 0)
13259 strcat(str, "33MHz");
13260 else if (clock_ctrl == 2)
13261 strcat(str, "50MHz");
13262 else if (clock_ctrl == 4)
13263 strcat(str, "66MHz");
13264 else if (clock_ctrl == 6)
13265 strcat(str, "100MHz");
Michael Chanf9804dd2005-09-27 12:13:10 -070013266 } else {
13267 strcpy(str, "PCI:");
13268 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13269 strcat(str, "66MHz");
13270 else
13271 strcat(str, "33MHz");
13272 }
13273 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13274 strcat(str, ":32-bit");
13275 else
13276 strcat(str, ":64-bit");
13277 return str;
13278}
13279
Michael Chan8c2dc7e2005-12-19 16:26:02 -080013280static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013281{
13282 struct pci_dev *peer;
13283 unsigned int func, devnr = tp->pdev->devfn & ~7;
13284
13285 for (func = 0; func < 8; func++) {
13286 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13287 if (peer && peer != tp->pdev)
13288 break;
13289 pci_dev_put(peer);
13290 }
Michael Chan16fe9d72005-12-13 21:09:54 -080013291 /* 5704 can be configured in single-port mode, set peer to
13292 * tp->pdev in that case.
13293 */
13294 if (!peer) {
13295 peer = tp->pdev;
13296 return peer;
13297 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013298
13299 /*
13300 * We don't need to keep the refcount elevated; there's no way
13301 * to remove one half of this device without removing the other
13302 */
13303 pci_dev_put(peer);
13304
13305 return peer;
13306}
13307
David S. Miller15f98502005-05-18 22:49:26 -070013308static void __devinit tg3_init_coal(struct tg3 *tp)
13309{
13310 struct ethtool_coalesce *ec = &tp->coal;
13311
13312 memset(ec, 0, sizeof(*ec));
13313 ec->cmd = ETHTOOL_GCOALESCE;
13314 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13315 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13316 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13317 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13318 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13319 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13320 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13321 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13322 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13323
13324 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13325 HOSTCC_MODE_CLRTICK_TXBD)) {
13326 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13327 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13328 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13329 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13330 }
Michael Chand244c892005-07-05 14:42:33 -070013331
13332 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13333 ec->rx_coalesce_usecs_irq = 0;
13334 ec->tx_coalesce_usecs_irq = 0;
13335 ec->stats_block_coalesce_usecs = 0;
13336 }
David S. Miller15f98502005-05-18 22:49:26 -070013337}
13338
Stephen Hemminger7c7d64b2008-11-19 22:25:36 -080013339static const struct net_device_ops tg3_netdev_ops = {
13340 .ndo_open = tg3_open,
13341 .ndo_stop = tg3_close,
Stephen Hemminger00829822008-11-20 20:14:53 -080013342 .ndo_start_xmit = tg3_start_xmit,
13343 .ndo_get_stats = tg3_get_stats,
13344 .ndo_validate_addr = eth_validate_addr,
13345 .ndo_set_multicast_list = tg3_set_rx_mode,
13346 .ndo_set_mac_address = tg3_set_mac_addr,
13347 .ndo_do_ioctl = tg3_ioctl,
13348 .ndo_tx_timeout = tg3_tx_timeout,
13349 .ndo_change_mtu = tg3_change_mtu,
13350#if TG3_VLAN_TAG_USED
13351 .ndo_vlan_rx_register = tg3_vlan_rx_register,
13352#endif
13353#ifdef CONFIG_NET_POLL_CONTROLLER
13354 .ndo_poll_controller = tg3_poll_controller,
13355#endif
13356};
13357
13358static const struct net_device_ops tg3_netdev_ops_dma_bug = {
13359 .ndo_open = tg3_open,
13360 .ndo_stop = tg3_close,
13361 .ndo_start_xmit = tg3_start_xmit_dma_bug,
Stephen Hemminger7c7d64b2008-11-19 22:25:36 -080013362 .ndo_get_stats = tg3_get_stats,
13363 .ndo_validate_addr = eth_validate_addr,
13364 .ndo_set_multicast_list = tg3_set_rx_mode,
13365 .ndo_set_mac_address = tg3_set_mac_addr,
13366 .ndo_do_ioctl = tg3_ioctl,
13367 .ndo_tx_timeout = tg3_tx_timeout,
13368 .ndo_change_mtu = tg3_change_mtu,
13369#if TG3_VLAN_TAG_USED
13370 .ndo_vlan_rx_register = tg3_vlan_rx_register,
13371#endif
13372#ifdef CONFIG_NET_POLL_CONTROLLER
13373 .ndo_poll_controller = tg3_poll_controller,
13374#endif
13375};
13376
Linus Torvalds1da177e2005-04-16 15:20:36 -070013377static int __devinit tg3_init_one(struct pci_dev *pdev,
13378 const struct pci_device_id *ent)
13379{
13380 static int tg3_version_printed = 0;
Matt Carlson63532392008-11-03 16:49:57 -080013381 resource_size_t tg3reg_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013382 struct net_device *dev;
13383 struct tg3 *tp;
Joe Perchesd6645372007-12-20 04:06:59 -080013384 int err, pm_cap;
Michael Chanf9804dd2005-09-27 12:13:10 -070013385 char str[40];
Michael Chan72f2afb2006-03-06 19:28:35 -080013386 u64 dma_mask, persist_dma_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013387
13388 if (tg3_version_printed++ == 0)
13389 printk(KERN_INFO "%s", version);
13390
13391 err = pci_enable_device(pdev);
13392 if (err) {
13393 printk(KERN_ERR PFX "Cannot enable PCI device, "
13394 "aborting.\n");
13395 return err;
13396 }
13397
Matt Carlson63532392008-11-03 16:49:57 -080013398 if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013399 printk(KERN_ERR PFX "Cannot find proper PCI device "
13400 "base address, aborting.\n");
13401 err = -ENODEV;
13402 goto err_out_disable_pdev;
13403 }
13404
13405 err = pci_request_regions(pdev, DRV_MODULE_NAME);
13406 if (err) {
13407 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13408 "aborting.\n");
13409 goto err_out_disable_pdev;
13410 }
13411
13412 pci_set_master(pdev);
13413
13414 /* Find power-management capability. */
13415 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13416 if (pm_cap == 0) {
13417 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13418 "aborting.\n");
13419 err = -EIO;
13420 goto err_out_free_res;
13421 }
13422
Linus Torvalds1da177e2005-04-16 15:20:36 -070013423 dev = alloc_etherdev(sizeof(*tp));
13424 if (!dev) {
13425 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13426 err = -ENOMEM;
13427 goto err_out_free_res;
13428 }
13429
Linus Torvalds1da177e2005-04-16 15:20:36 -070013430 SET_NETDEV_DEV(dev, &pdev->dev);
13431
Linus Torvalds1da177e2005-04-16 15:20:36 -070013432#if TG3_VLAN_TAG_USED
13433 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013434#endif
13435
13436 tp = netdev_priv(dev);
13437 tp->pdev = pdev;
13438 tp->dev = dev;
13439 tp->pm_cap = pm_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013440 tp->rx_mode = TG3_DEF_RX_MODE;
13441 tp->tx_mode = TG3_DEF_TX_MODE;
Matt Carlson8ef21422008-05-02 16:47:53 -070013442
Linus Torvalds1da177e2005-04-16 15:20:36 -070013443 if (tg3_debug > 0)
13444 tp->msg_enable = tg3_debug;
13445 else
13446 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13447
13448 /* The word/byte swap controls here control register access byte
13449 * swapping. DMA data byte swapping is controlled in the GRC_MODE
13450 * setting below.
13451 */
13452 tp->misc_host_ctrl =
13453 MISC_HOST_CTRL_MASK_PCI_INT |
13454 MISC_HOST_CTRL_WORD_SWAP |
13455 MISC_HOST_CTRL_INDIR_ACCESS |
13456 MISC_HOST_CTRL_PCISTATE_RW;
13457
13458 /* The NONFRM (non-frame) byte/word swap controls take effect
13459 * on descriptor entries, anything which isn't packet data.
13460 *
13461 * The StrongARM chips on the board (one for tx, one for rx)
13462 * are running in big-endian mode.
13463 */
13464 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13465 GRC_MODE_WSWAP_NONFRM_DATA);
13466#ifdef __BIG_ENDIAN
13467 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13468#endif
13469 spin_lock_init(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013470 spin_lock_init(&tp->indirect_lock);
David Howellsc4028952006-11-22 14:57:56 +000013471 INIT_WORK(&tp->reset_task, tg3_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013472
Matt Carlson63532392008-11-03 16:49:57 -080013473 dev->mem_start = pci_resource_start(pdev, BAR_0);
13474 tg3reg_len = pci_resource_len(pdev, BAR_0);
13475 dev->mem_end = dev->mem_start + tg3reg_len;
13476
13477 tp->regs = ioremap_nocache(dev->mem_start, tg3reg_len);
Andy Gospodarekab0049b2007-09-06 20:42:14 +010013478 if (!tp->regs) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013479 printk(KERN_ERR PFX "Cannot map device registers, "
13480 "aborting.\n");
13481 err = -ENOMEM;
13482 goto err_out_free_dev;
13483 }
13484
13485 tg3_init_link_config(tp);
13486
Linus Torvalds1da177e2005-04-16 15:20:36 -070013487 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13488 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13489 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13490
Stephen Hemmingerbea33482007-10-03 16:41:36 -070013491 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013492 dev->ethtool_ops = &tg3_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013493 dev->watchdog_timeo = TG3_TX_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013494 dev->irq = pdev->irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013495
13496 err = tg3_get_invariants(tp);
13497 if (err) {
13498 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13499 "aborting.\n");
13500 goto err_out_iounmap;
13501 }
13502
Stephen Hemminger00829822008-11-20 20:14:53 -080013503 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13504 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13505 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13506 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13507 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13508 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13509 dev->netdev_ops = &tg3_netdev_ops;
13510 else
13511 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
13512
13513
Michael Chan4a29cc22006-03-19 13:21:12 -080013514 /* The EPB bridge inside 5714, 5715, and 5780 and any
13515 * device behind the EPB cannot support DMA addresses > 40-bit.
Michael Chan72f2afb2006-03-06 19:28:35 -080013516 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13517 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13518 * do DMA address check in tg3_start_xmit().
13519 */
Michael Chan4a29cc22006-03-19 13:21:12 -080013520 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13521 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13522 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
Michael Chan72f2afb2006-03-06 19:28:35 -080013523 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13524#ifdef CONFIG_HIGHMEM
13525 dma_mask = DMA_64BIT_MASK;
13526#endif
Michael Chan4a29cc22006-03-19 13:21:12 -080013527 } else
Michael Chan72f2afb2006-03-06 19:28:35 -080013528 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13529
13530 /* Configure DMA attributes. */
13531 if (dma_mask > DMA_32BIT_MASK) {
13532 err = pci_set_dma_mask(pdev, dma_mask);
13533 if (!err) {
13534 dev->features |= NETIF_F_HIGHDMA;
13535 err = pci_set_consistent_dma_mask(pdev,
13536 persist_dma_mask);
13537 if (err < 0) {
13538 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13539 "DMA for consistent allocations\n");
13540 goto err_out_iounmap;
13541 }
13542 }
13543 }
13544 if (err || dma_mask == DMA_32BIT_MASK) {
13545 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13546 if (err) {
13547 printk(KERN_ERR PFX "No usable DMA configuration, "
13548 "aborting.\n");
13549 goto err_out_iounmap;
13550 }
13551 }
13552
Michael Chanfdfec172005-07-25 12:31:48 -070013553 tg3_init_bufmgr_config(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013554
Linus Torvalds1da177e2005-04-16 15:20:36 -070013555 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13556 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13557 }
13558 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13559 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13560 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
Michael Chanc7835a72006-11-15 21:14:42 -080013561 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -070013562 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13563 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13564 } else {
Michael Chan7f62ad52007-02-20 23:25:40 -080013565 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013566 }
13567
Michael Chan4e3a7aa2006-03-20 17:47:44 -080013568 /* TSO is on by default on chips that support hardware TSO.
13569 * Firmware TSO on older chips gives lower performance, so it
13570 * is off by default, but can be enabled using ethtool.
13571 */
Michael Chanb0026622006-07-03 19:42:14 -070013572 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013573 dev->features |= NETIF_F_TSO;
Michael Chanb5d37722006-09-27 16:06:21 -070013574 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13575 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
Michael Chanb0026622006-07-03 19:42:14 -070013576 dev->features |= NETIF_F_TSO6;
Matt Carlson57e69832008-05-25 23:48:31 -070013577 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13578 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13579 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13580 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlson9936bcf2007-10-10 18:03:07 -070013581 dev->features |= NETIF_F_TSO_ECN;
Michael Chanb0026622006-07-03 19:42:14 -070013582 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013583
Linus Torvalds1da177e2005-04-16 15:20:36 -070013584
13585 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13586 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13587 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13588 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13589 tp->rx_pending = 63;
13590 }
13591
Linus Torvalds1da177e2005-04-16 15:20:36 -070013592 err = tg3_get_device_address(tp);
13593 if (err) {
13594 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13595 "aborting.\n");
13596 goto err_out_iounmap;
13597 }
13598
Matt Carlson0d3031d2007-10-10 18:02:43 -070013599 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
Matt Carlson63532392008-11-03 16:49:57 -080013600 if (!(pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM)) {
Matt Carlson0d3031d2007-10-10 18:02:43 -070013601 printk(KERN_ERR PFX "Cannot find proper PCI device "
13602 "base address for APE, aborting.\n");
13603 err = -ENODEV;
13604 goto err_out_iounmap;
13605 }
13606
Matt Carlson63532392008-11-03 16:49:57 -080013607 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
Al Viro79ea13c2008-01-24 02:06:46 -080013608 if (!tp->aperegs) {
Matt Carlson0d3031d2007-10-10 18:02:43 -070013609 printk(KERN_ERR PFX "Cannot map APE registers, "
13610 "aborting.\n");
13611 err = -ENOMEM;
13612 goto err_out_iounmap;
13613 }
13614
13615 tg3_ape_lock_init(tp);
13616 }
13617
Matt Carlsonc88864d2007-11-12 21:07:01 -080013618 /*
13619 * Reset chip in case UNDI or EFI driver did not shutdown
13620 * DMA self test will enable WDMAC and we'll see (spurious)
13621 * pending DMA on the PCI bus at that point.
13622 */
13623 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13624 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13625 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13626 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13627 }
13628
13629 err = tg3_test_dma(tp);
13630 if (err) {
13631 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13632 goto err_out_apeunmap;
13633 }
13634
13635 /* Tigon3 can do ipv4 only... and some chips have buggy
13636 * checksumming.
13637 */
13638 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13639 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13640 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13641 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13642 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070013643 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13644 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsonc88864d2007-11-12 21:07:01 -080013645 dev->features |= NETIF_F_IPV6_CSUM;
13646
13647 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13648 } else
13649 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13650
13651 /* flow control autonegotiation is default behavior */
13652 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
Matt Carlson8d018622007-12-20 20:05:44 -080013653 tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
Matt Carlsonc88864d2007-11-12 21:07:01 -080013654
13655 tg3_init_coal(tp);
13656
Michael Chanc49a1562006-12-17 17:07:29 -080013657 pci_set_drvdata(pdev, dev);
13658
Linus Torvalds1da177e2005-04-16 15:20:36 -070013659 err = register_netdev(dev);
13660 if (err) {
13661 printk(KERN_ERR PFX "Cannot register net device, "
13662 "aborting.\n");
Matt Carlson0d3031d2007-10-10 18:02:43 -070013663 goto err_out_apeunmap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013664 }
13665
Matt Carlsondf59c942008-11-03 16:52:56 -080013666 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070013667 dev->name,
13668 tp->board_part_number,
13669 tp->pci_chip_rev_id,
Michael Chanf9804dd2005-09-27 12:13:10 -070013670 tg3_bus_string(tp, str),
Johannes Berge1749612008-10-27 15:59:26 -070013671 dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013672
Matt Carlsondf59c942008-11-03 16:52:56 -080013673 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
13674 printk(KERN_INFO
13675 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
13676 tp->dev->name,
13677 tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
Kay Sieversfb28ad32008-11-10 13:55:14 -080013678 dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev));
Matt Carlsondf59c942008-11-03 16:52:56 -080013679 else
13680 printk(KERN_INFO
13681 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
13682 tp->dev->name, tg3_phy_string(tp),
13683 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13684 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13685 "10/100/1000Base-T")),
13686 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
13687
13688 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070013689 dev->name,
13690 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13691 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13692 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13693 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -070013694 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
Michael Chan4a29cc22006-03-19 13:21:12 -080013695 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13696 dev->name, tp->dma_rwctrl,
13697 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13698 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
Linus Torvalds1da177e2005-04-16 15:20:36 -070013699
13700 return 0;
13701
Matt Carlson0d3031d2007-10-10 18:02:43 -070013702err_out_apeunmap:
13703 if (tp->aperegs) {
13704 iounmap(tp->aperegs);
13705 tp->aperegs = NULL;
13706 }
13707
Linus Torvalds1da177e2005-04-16 15:20:36 -070013708err_out_iounmap:
Michael Chan68929142005-08-09 20:17:14 -070013709 if (tp->regs) {
13710 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070013711 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070013712 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013713
13714err_out_free_dev:
13715 free_netdev(dev);
13716
13717err_out_free_res:
13718 pci_release_regions(pdev);
13719
13720err_out_disable_pdev:
13721 pci_disable_device(pdev);
13722 pci_set_drvdata(pdev, NULL);
13723 return err;
13724}
13725
13726static void __devexit tg3_remove_one(struct pci_dev *pdev)
13727{
13728 struct net_device *dev = pci_get_drvdata(pdev);
13729
13730 if (dev) {
13731 struct tg3 *tp = netdev_priv(dev);
13732
Michael Chan7faa0062006-02-02 17:29:28 -080013733 flush_scheduled_work();
Matt Carlson158d7ab2008-05-29 01:37:54 -070013734
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013735 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13736 tg3_phy_fini(tp);
Matt Carlson158d7ab2008-05-29 01:37:54 -070013737 tg3_mdio_fini(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013738 }
Matt Carlson158d7ab2008-05-29 01:37:54 -070013739
Linus Torvalds1da177e2005-04-16 15:20:36 -070013740 unregister_netdev(dev);
Matt Carlson0d3031d2007-10-10 18:02:43 -070013741 if (tp->aperegs) {
13742 iounmap(tp->aperegs);
13743 tp->aperegs = NULL;
13744 }
Michael Chan68929142005-08-09 20:17:14 -070013745 if (tp->regs) {
13746 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070013747 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070013748 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013749 free_netdev(dev);
13750 pci_release_regions(pdev);
13751 pci_disable_device(pdev);
13752 pci_set_drvdata(pdev, NULL);
13753 }
13754}
13755
13756static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13757{
13758 struct net_device *dev = pci_get_drvdata(pdev);
13759 struct tg3 *tp = netdev_priv(dev);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070013760 pci_power_t target_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013761 int err;
13762
Michael Chan3e0c95f2007-08-03 20:56:54 -070013763 /* PCI register 4 needs to be saved whether netif_running() or not.
13764 * MSI address and data need to be saved if using MSI and
13765 * netif_running().
13766 */
13767 pci_save_state(pdev);
13768
Linus Torvalds1da177e2005-04-16 15:20:36 -070013769 if (!netif_running(dev))
13770 return 0;
13771
Michael Chan7faa0062006-02-02 17:29:28 -080013772 flush_scheduled_work();
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013773 tg3_phy_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013774 tg3_netif_stop(tp);
13775
13776 del_timer_sync(&tp->timer);
13777
David S. Millerf47c11e2005-06-24 20:18:35 -070013778 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013779 tg3_disable_ints(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -070013780 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013781
13782 netif_device_detach(dev);
13783
David S. Millerf47c11e2005-06-24 20:18:35 -070013784 tg3_full_lock(tp, 0);
Michael Chan944d9802005-05-29 14:57:48 -070013785 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan6a9eba12005-12-13 21:08:58 -080013786 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
David S. Millerf47c11e2005-06-24 20:18:35 -070013787 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013788
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070013789 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13790
13791 err = tg3_set_power_state(tp, target_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013792 if (err) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013793 int err2;
13794
David S. Millerf47c11e2005-06-24 20:18:35 -070013795 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013796
Michael Chan6a9eba12005-12-13 21:08:58 -080013797 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013798 err2 = tg3_restart_hw(tp, 1);
13799 if (err2)
Michael Chanb9ec6c12006-07-25 16:37:27 -070013800 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013801
13802 tp->timer.expires = jiffies + tp->timer_offset;
13803 add_timer(&tp->timer);
13804
13805 netif_device_attach(dev);
13806 tg3_netif_start(tp);
13807
Michael Chanb9ec6c12006-07-25 16:37:27 -070013808out:
David S. Millerf47c11e2005-06-24 20:18:35 -070013809 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013810
13811 if (!err2)
13812 tg3_phy_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013813 }
13814
13815 return err;
13816}
13817
13818static int tg3_resume(struct pci_dev *pdev)
13819{
13820 struct net_device *dev = pci_get_drvdata(pdev);
13821 struct tg3 *tp = netdev_priv(dev);
13822 int err;
13823
Michael Chan3e0c95f2007-08-03 20:56:54 -070013824 pci_restore_state(tp->pdev);
13825
Linus Torvalds1da177e2005-04-16 15:20:36 -070013826 if (!netif_running(dev))
13827 return 0;
13828
Michael Chanbc1c7562006-03-20 17:48:03 -080013829 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013830 if (err)
13831 return err;
13832
13833 netif_device_attach(dev);
13834
David S. Millerf47c11e2005-06-24 20:18:35 -070013835 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013836
Michael Chan6a9eba12005-12-13 21:08:58 -080013837 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Michael Chanb9ec6c12006-07-25 16:37:27 -070013838 err = tg3_restart_hw(tp, 1);
13839 if (err)
13840 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013841
13842 tp->timer.expires = jiffies + tp->timer_offset;
13843 add_timer(&tp->timer);
13844
Linus Torvalds1da177e2005-04-16 15:20:36 -070013845 tg3_netif_start(tp);
13846
Michael Chanb9ec6c12006-07-25 16:37:27 -070013847out:
David S. Millerf47c11e2005-06-24 20:18:35 -070013848 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013849
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013850 if (!err)
13851 tg3_phy_start(tp);
13852
Michael Chanb9ec6c12006-07-25 16:37:27 -070013853 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013854}
13855
13856static struct pci_driver tg3_driver = {
13857 .name = DRV_MODULE_NAME,
13858 .id_table = tg3_pci_tbl,
13859 .probe = tg3_init_one,
13860 .remove = __devexit_p(tg3_remove_one),
13861 .suspend = tg3_suspend,
13862 .resume = tg3_resume
13863};
13864
13865static int __init tg3_init(void)
13866{
Jeff Garzik29917622006-08-19 17:48:59 -040013867 return pci_register_driver(&tg3_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013868}
13869
13870static void __exit tg3_cleanup(void)
13871{
13872 pci_unregister_driver(&tg3_driver);
13873}
13874
13875module_init(tg3_init);
13876module_exit(tg3_cleanup);