blob: 659fb997819541b8f439ff7e7916d1b275cfd9ba [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
Michael Chan65610fb2007-02-13 12:18:46 -08007 * Copyright (C) 2005-2007 Broadcom Corporation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Firmware is:
Michael Chan49cabf42005-06-06 15:15:17 -070010 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/compiler.h>
24#include <linux/slab.h>
25#include <linux/delay.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020026#include <linux/in.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
Matt Carlson158d7ab2008-05-29 01:37:54 -070035#include <linux/phy.h>
Matt Carlsona9daf362008-05-25 23:49:44 -070036#include <linux/brcmphy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/if_vlan.h>
38#include <linux/ip.h>
39#include <linux/tcp.h>
40#include <linux/workqueue.h>
Michael Chan61487482005-09-05 17:53:19 -070041#include <linux/prefetch.h>
Tobias Klauserf9a5f7d2005-10-29 15:09:26 +020042#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44#include <net/checksum.h>
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -030045#include <net/ip.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47#include <asm/system.h>
48#include <asm/io.h>
49#include <asm/byteorder.h>
50#include <asm/uaccess.h>
51
David S. Miller49b6e95f2007-03-29 01:38:42 -070052#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <asm/idprom.h>
David S. Miller49b6e95f2007-03-29 01:38:42 -070054#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#endif
56
Matt Carlson63532392008-11-03 16:49:57 -080057#define BAR_0 0
58#define BAR_2 2
59
Linus Torvalds1da177e2005-04-16 15:20:36 -070060#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
61#define TG3_VLAN_TAG_USED 1
62#else
63#define TG3_VLAN_TAG_USED 0
64#endif
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#define TG3_TSO_SUPPORT 1
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
68#include "tg3.h"
69
70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": "
Matt Carlsonfa228b32008-11-03 16:58:53 -080072#define DRV_MODULE_VERSION "3.95"
73#define DRV_MODULE_RELDATE "November 3, 2008"
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0
77#define TG3_DEF_TX_MODE 0
78#define TG3_DEF_MSG_ENABLE \
79 (NETIF_MSG_DRV | \
80 NETIF_MSG_PROBE | \
81 NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | \
83 NETIF_MSG_IFDOWN | \
84 NETIF_MSG_IFUP | \
85 NETIF_MSG_RX_ERR | \
86 NETIF_MSG_TX_ERR)
87
88/* length of time before we decide the hardware is borked,
89 * and dev->tx_timeout() should be called to fix the problem
90 */
91#define TG3_TX_TIMEOUT (5 * HZ)
92
93/* hardware minimum and maximum for a single frame's data payload */
94#define TG3_MIN_MTU 60
95#define TG3_MAX_MTU(tp) \
Michael Chan0f893dc2005-07-25 12:30:38 -070096 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
98/* These numbers seem to be hard coded in the NIC firmware somehow.
99 * You can't change the ring sizes, but you can change where you place
100 * them in the NIC onboard memory.
101 */
102#define TG3_RX_RING_SIZE 512
103#define TG3_DEF_RX_RING_PENDING 200
104#define TG3_RX_JUMBO_RING_SIZE 256
105#define TG3_DEF_RX_JUMBO_RING_PENDING 100
106
107/* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
112 */
113#define TG3_RX_RCB_RING_SIZE(tp) \
114 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
115
116#define TG3_TX_RING_SIZE 512
117#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
118
119#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120 TG3_RX_RING_SIZE)
121#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 TG3_RX_JUMBO_RING_SIZE)
123#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RCB_RING_SIZE(tp))
125#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
126 TG3_TX_RING_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128
129#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
130#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
131
132/* minimum number of free TX descriptors required to wake up TX process */
Ranjit Manomohan42952232006-10-18 20:54:26 -0700133#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
Matt Carlsonad829262008-11-21 17:16:16 -0800135#define TG3_RAW_IP_ALIGN 2
136
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137/* number of ETHTOOL_GSTATS u64's */
138#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
139
Michael Chan4cafd3f2005-05-29 14:56:34 -0700140#define TG3_NUM_TEST 6
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142static char version[] __devinitdata =
143 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
144
145MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
146MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
147MODULE_LICENSE("GPL");
148MODULE_VERSION(DRV_MODULE_VERSION);
149
150static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
151module_param(tg3_debug, int, 0);
152MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
153
154static struct pci_device_id tg3_pci_tbl[] = {
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
Michael Chan126a3362006-09-27 16:03:07 -0700179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
Michael Chan126a3362006-09-27 16:03:07 -0700194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
Michael Chan676917d2006-12-07 00:20:22 -0800198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
Michael Chanb5d37722006-09-27 16:06:21 -0700206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
Matt Carlsond30cdd22007-10-07 23:28:35 -0700208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
Matt Carlson6c7af272007-10-21 16:12:02 -0700210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
Matt Carlson9936bcf2007-10-10 18:03:07 -0700211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
Matt Carlsonc88e6682008-11-03 16:49:18 -0800213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
Matt Carlson57e69832008-05-25 23:48:31 -0700215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700216 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
217 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
218 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
219 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
220 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
221 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
222 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
223 {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224};
225
226MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
227
Andreas Mohr50da8592006-08-14 23:54:30 -0700228static const struct {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 const char string[ETH_GSTRING_LEN];
230} ethtool_stats_keys[TG3_NUM_STATS] = {
231 { "rx_octets" },
232 { "rx_fragments" },
233 { "rx_ucast_packets" },
234 { "rx_mcast_packets" },
235 { "rx_bcast_packets" },
236 { "rx_fcs_errors" },
237 { "rx_align_errors" },
238 { "rx_xon_pause_rcvd" },
239 { "rx_xoff_pause_rcvd" },
240 { "rx_mac_ctrl_rcvd" },
241 { "rx_xoff_entered" },
242 { "rx_frame_too_long_errors" },
243 { "rx_jabbers" },
244 { "rx_undersize_packets" },
245 { "rx_in_length_errors" },
246 { "rx_out_length_errors" },
247 { "rx_64_or_less_octet_packets" },
248 { "rx_65_to_127_octet_packets" },
249 { "rx_128_to_255_octet_packets" },
250 { "rx_256_to_511_octet_packets" },
251 { "rx_512_to_1023_octet_packets" },
252 { "rx_1024_to_1522_octet_packets" },
253 { "rx_1523_to_2047_octet_packets" },
254 { "rx_2048_to_4095_octet_packets" },
255 { "rx_4096_to_8191_octet_packets" },
256 { "rx_8192_to_9022_octet_packets" },
257
258 { "tx_octets" },
259 { "tx_collisions" },
260
261 { "tx_xon_sent" },
262 { "tx_xoff_sent" },
263 { "tx_flow_control" },
264 { "tx_mac_errors" },
265 { "tx_single_collisions" },
266 { "tx_mult_collisions" },
267 { "tx_deferred" },
268 { "tx_excessive_collisions" },
269 { "tx_late_collisions" },
270 { "tx_collide_2times" },
271 { "tx_collide_3times" },
272 { "tx_collide_4times" },
273 { "tx_collide_5times" },
274 { "tx_collide_6times" },
275 { "tx_collide_7times" },
276 { "tx_collide_8times" },
277 { "tx_collide_9times" },
278 { "tx_collide_10times" },
279 { "tx_collide_11times" },
280 { "tx_collide_12times" },
281 { "tx_collide_13times" },
282 { "tx_collide_14times" },
283 { "tx_collide_15times" },
284 { "tx_ucast_packets" },
285 { "tx_mcast_packets" },
286 { "tx_bcast_packets" },
287 { "tx_carrier_sense_errors" },
288 { "tx_discards" },
289 { "tx_errors" },
290
291 { "dma_writeq_full" },
292 { "dma_write_prioq_full" },
293 { "rxbds_empty" },
294 { "rx_discards" },
295 { "rx_errors" },
296 { "rx_threshold_hit" },
297
298 { "dma_readq_full" },
299 { "dma_read_prioq_full" },
300 { "tx_comp_queue_full" },
301
302 { "ring_set_send_prod_index" },
303 { "ring_status_update" },
304 { "nic_irqs" },
305 { "nic_avoided_irqs" },
306 { "nic_tx_threshold_hit" }
307};
308
Andreas Mohr50da8592006-08-14 23:54:30 -0700309static const struct {
Michael Chan4cafd3f2005-05-29 14:56:34 -0700310 const char string[ETH_GSTRING_LEN];
311} ethtool_test_keys[TG3_NUM_TEST] = {
312 { "nvram test (online) " },
313 { "link test (online) " },
314 { "register test (offline)" },
315 { "memory test (offline)" },
316 { "loopback test (offline)" },
317 { "interrupt test (offline)" },
318};
319
Michael Chanb401e9e2005-12-19 16:27:04 -0800320static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
321{
322 writel(val, tp->regs + off);
323}
324
325static u32 tg3_read32(struct tg3 *tp, u32 off)
326{
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400327 return (readl(tp->regs + off));
Michael Chanb401e9e2005-12-19 16:27:04 -0800328}
329
Matt Carlson0d3031d2007-10-10 18:02:43 -0700330static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
331{
332 writel(val, tp->aperegs + off);
333}
334
335static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
336{
337 return (readl(tp->aperegs + off));
338}
339
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
341{
Michael Chan68929142005-08-09 20:17:14 -0700342 unsigned long flags;
343
344 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700345 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
346 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
Michael Chan68929142005-08-09 20:17:14 -0700347 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700348}
349
350static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
351{
352 writel(val, tp->regs + off);
353 readl(tp->regs + off);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354}
355
Michael Chan68929142005-08-09 20:17:14 -0700356static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
357{
358 unsigned long flags;
359 u32 val;
360
361 spin_lock_irqsave(&tp->indirect_lock, flags);
362 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
363 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
364 spin_unlock_irqrestore(&tp->indirect_lock, flags);
365 return val;
366}
367
368static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
369{
370 unsigned long flags;
371
372 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
373 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
374 TG3_64BIT_REG_LOW, val);
375 return;
376 }
377 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
378 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
379 TG3_64BIT_REG_LOW, val);
380 return;
381 }
382
383 spin_lock_irqsave(&tp->indirect_lock, flags);
384 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
385 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
386 spin_unlock_irqrestore(&tp->indirect_lock, flags);
387
388 /* In indirect mode when disabling interrupts, we also need
389 * to clear the interrupt bit in the GRC local ctrl register.
390 */
391 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
392 (val == 0x1)) {
393 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
394 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
395 }
396}
397
398static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
399{
400 unsigned long flags;
401 u32 val;
402
403 spin_lock_irqsave(&tp->indirect_lock, flags);
404 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
405 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
406 spin_unlock_irqrestore(&tp->indirect_lock, flags);
407 return val;
408}
409
Michael Chanb401e9e2005-12-19 16:27:04 -0800410/* usec_wait specifies the wait time in usec when writing to certain registers
411 * where it is unsafe to read back the register without some delay.
412 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
413 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
414 */
415static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416{
Michael Chanb401e9e2005-12-19 16:27:04 -0800417 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
418 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
419 /* Non-posted methods */
420 tp->write32(tp, off, val);
421 else {
422 /* Posted method */
423 tg3_write32(tp, off, val);
424 if (usec_wait)
425 udelay(usec_wait);
426 tp->read32(tp, off);
427 }
428 /* Wait again after the read for the posted method to guarantee that
429 * the wait time is met.
430 */
431 if (usec_wait)
432 udelay(usec_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433}
434
Michael Chan09ee9292005-08-09 20:17:00 -0700435static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
436{
437 tp->write32_mbox(tp, off, val);
Michael Chan68929142005-08-09 20:17:14 -0700438 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
439 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
440 tp->read32_mbox(tp, off);
Michael Chan09ee9292005-08-09 20:17:00 -0700441}
442
Michael Chan20094932005-08-09 20:16:32 -0700443static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444{
445 void __iomem *mbox = tp->regs + off;
446 writel(val, mbox);
447 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
448 writel(val, mbox);
449 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
450 readl(mbox);
451}
452
Michael Chanb5d37722006-09-27 16:06:21 -0700453static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
454{
455 return (readl(tp->regs + off + GRCMBOX_BASE));
456}
457
458static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
459{
460 writel(val, tp->regs + off + GRCMBOX_BASE);
461}
462
Michael Chan20094932005-08-09 20:16:32 -0700463#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700464#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
Michael Chan20094932005-08-09 20:16:32 -0700465#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
466#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700467#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
Michael Chan20094932005-08-09 20:16:32 -0700468
469#define tw32(reg,val) tp->write32(tp, reg, val)
Michael Chanb401e9e2005-12-19 16:27:04 -0800470#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
471#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
Michael Chan20094932005-08-09 20:16:32 -0700472#define tr32(reg) tp->read32(tp, reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473
474static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
475{
Michael Chan68929142005-08-09 20:17:14 -0700476 unsigned long flags;
477
Michael Chanb5d37722006-09-27 16:06:21 -0700478 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
479 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
480 return;
481
Michael Chan68929142005-08-09 20:17:14 -0700482 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700483 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
484 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
485 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486
Michael Chanbbadf502006-04-06 21:46:34 -0700487 /* Always leave this as zero. */
488 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
489 } else {
490 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
491 tw32_f(TG3PCI_MEM_WIN_DATA, val);
492
493 /* Always leave this as zero. */
494 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
495 }
Michael Chan68929142005-08-09 20:17:14 -0700496 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497}
498
499static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
500{
Michael Chan68929142005-08-09 20:17:14 -0700501 unsigned long flags;
502
Michael Chanb5d37722006-09-27 16:06:21 -0700503 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
504 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
505 *val = 0;
506 return;
507 }
508
Michael Chan68929142005-08-09 20:17:14 -0700509 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700510 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
511 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
512 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513
Michael Chanbbadf502006-04-06 21:46:34 -0700514 /* Always leave this as zero. */
515 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
516 } else {
517 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
518 *val = tr32(TG3PCI_MEM_WIN_DATA);
519
520 /* Always leave this as zero. */
521 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
522 }
Michael Chan68929142005-08-09 20:17:14 -0700523 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524}
525
Matt Carlson0d3031d2007-10-10 18:02:43 -0700526static void tg3_ape_lock_init(struct tg3 *tp)
527{
528 int i;
529
530 /* Make sure the driver hasn't any stale locks. */
531 for (i = 0; i < 8; i++)
532 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
533 APE_LOCK_GRANT_DRIVER);
534}
535
536static int tg3_ape_lock(struct tg3 *tp, int locknum)
537{
538 int i, off;
539 int ret = 0;
540 u32 status;
541
542 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
543 return 0;
544
545 switch (locknum) {
Matt Carlson77b483f2008-08-15 14:07:24 -0700546 case TG3_APE_LOCK_GRC:
Matt Carlson0d3031d2007-10-10 18:02:43 -0700547 case TG3_APE_LOCK_MEM:
548 break;
549 default:
550 return -EINVAL;
551 }
552
553 off = 4 * locknum;
554
555 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
556
557 /* Wait for up to 1 millisecond to acquire lock. */
558 for (i = 0; i < 100; i++) {
559 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
560 if (status == APE_LOCK_GRANT_DRIVER)
561 break;
562 udelay(10);
563 }
564
565 if (status != APE_LOCK_GRANT_DRIVER) {
566 /* Revoke the lock request. */
567 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
568 APE_LOCK_GRANT_DRIVER);
569
570 ret = -EBUSY;
571 }
572
573 return ret;
574}
575
576static void tg3_ape_unlock(struct tg3 *tp, int locknum)
577{
578 int off;
579
580 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
581 return;
582
583 switch (locknum) {
Matt Carlson77b483f2008-08-15 14:07:24 -0700584 case TG3_APE_LOCK_GRC:
Matt Carlson0d3031d2007-10-10 18:02:43 -0700585 case TG3_APE_LOCK_MEM:
586 break;
587 default:
588 return;
589 }
590
591 off = 4 * locknum;
592 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
593}
594
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595static void tg3_disable_ints(struct tg3 *tp)
596{
597 tw32(TG3PCI_MISC_HOST_CTRL,
598 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700599 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600}
601
602static inline void tg3_cond_int(struct tg3 *tp)
603{
Michael Chan38f38432005-09-05 17:53:32 -0700604 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
605 (tp->hw_status->status & SD_STATUS_UPDATED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
Michael Chanb5d37722006-09-27 16:06:21 -0700607 else
608 tw32(HOSTCC_MODE, tp->coalesce_mode |
609 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610}
611
612static void tg3_enable_ints(struct tg3 *tp)
613{
Michael Chanbbe832c2005-06-24 20:20:04 -0700614 tp->irq_sync = 0;
615 wmb();
616
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 tw32(TG3PCI_MISC_HOST_CTRL,
618 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700619 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
620 (tp->last_tag << 24));
Michael Chanfcfa0a32006-03-20 22:28:41 -0800621 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
622 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
623 (tp->last_tag << 24));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 tg3_cond_int(tp);
625}
626
Michael Chan04237dd2005-04-25 15:17:17 -0700627static inline unsigned int tg3_has_work(struct tg3 *tp)
628{
629 struct tg3_hw_status *sblk = tp->hw_status;
630 unsigned int work_exists = 0;
631
632 /* check for phy events */
633 if (!(tp->tg3_flags &
634 (TG3_FLAG_USE_LINKCHG_REG |
635 TG3_FLAG_POLL_SERDES))) {
636 if (sblk->status & SD_STATUS_LINK_CHG)
637 work_exists = 1;
638 }
639 /* check for RX/TX work to do */
640 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
641 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
642 work_exists = 1;
643
644 return work_exists;
645}
646
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647/* tg3_restart_ints
Michael Chan04237dd2005-04-25 15:17:17 -0700648 * similar to tg3_enable_ints, but it accurately determines whether there
649 * is new work pending and can return without flushing the PIO write
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400650 * which reenables interrupts
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 */
652static void tg3_restart_ints(struct tg3 *tp)
653{
David S. Millerfac9b832005-05-18 22:46:34 -0700654 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
655 tp->last_tag << 24);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 mmiowb();
657
David S. Millerfac9b832005-05-18 22:46:34 -0700658 /* When doing tagged status, this work check is unnecessary.
659 * The last_tag we write above tells the chip which piece of
660 * work we've completed.
661 */
662 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
663 tg3_has_work(tp))
Michael Chan04237dd2005-04-25 15:17:17 -0700664 tw32(HOSTCC_MODE, tp->coalesce_mode |
665 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666}
667
668static inline void tg3_netif_stop(struct tg3 *tp)
669{
Michael Chanbbe832c2005-06-24 20:20:04 -0700670 tp->dev->trans_start = jiffies; /* prevent tx timeout */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700671 napi_disable(&tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 netif_tx_disable(tp->dev);
673}
674
675static inline void tg3_netif_start(struct tg3 *tp)
676{
677 netif_wake_queue(tp->dev);
678 /* NOTE: unconditional netif_wake_queue is only appropriate
679 * so long as all callers are assured to have free tx slots
680 * (such as after tg3_init_hw)
681 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700682 napi_enable(&tp->napi);
David S. Millerf47c11e2005-06-24 20:18:35 -0700683 tp->hw_status->status |= SD_STATUS_UPDATED;
684 tg3_enable_ints(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685}
686
687static void tg3_switch_clocks(struct tg3 *tp)
688{
689 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
690 u32 orig_clock_ctrl;
691
Matt Carlson795d01c2007-10-07 23:28:17 -0700692 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
693 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan4cf78e42005-07-25 12:29:19 -0700694 return;
695
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 orig_clock_ctrl = clock_ctrl;
697 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
698 CLOCK_CTRL_CLKRUN_OENABLE |
699 0x1f);
700 tp->pci_clock_ctrl = clock_ctrl;
701
702 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
703 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800704 tw32_wait_f(TG3PCI_CLOCK_CTRL,
705 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 }
707 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800708 tw32_wait_f(TG3PCI_CLOCK_CTRL,
709 clock_ctrl |
710 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
711 40);
712 tw32_wait_f(TG3PCI_CLOCK_CTRL,
713 clock_ctrl | (CLOCK_CTRL_ALTCLK),
714 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 }
Michael Chanb401e9e2005-12-19 16:27:04 -0800716 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717}
718
719#define PHY_BUSY_LOOPS 5000
720
721static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
722{
723 u32 frame_val;
724 unsigned int loops;
725 int ret;
726
727 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
728 tw32_f(MAC_MI_MODE,
729 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
730 udelay(80);
731 }
732
733 *val = 0x0;
734
735 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
736 MI_COM_PHY_ADDR_MASK);
737 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
738 MI_COM_REG_ADDR_MASK);
739 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400740
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 tw32_f(MAC_MI_COM, frame_val);
742
743 loops = PHY_BUSY_LOOPS;
744 while (loops != 0) {
745 udelay(10);
746 frame_val = tr32(MAC_MI_COM);
747
748 if ((frame_val & MI_COM_BUSY) == 0) {
749 udelay(5);
750 frame_val = tr32(MAC_MI_COM);
751 break;
752 }
753 loops -= 1;
754 }
755
756 ret = -EBUSY;
757 if (loops != 0) {
758 *val = frame_val & MI_COM_DATA_MASK;
759 ret = 0;
760 }
761
762 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
763 tw32_f(MAC_MI_MODE, tp->mi_mode);
764 udelay(80);
765 }
766
767 return ret;
768}
769
770static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
771{
772 u32 frame_val;
773 unsigned int loops;
774 int ret;
775
Michael Chanb5d37722006-09-27 16:06:21 -0700776 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
777 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
778 return 0;
779
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
781 tw32_f(MAC_MI_MODE,
782 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
783 udelay(80);
784 }
785
786 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
787 MI_COM_PHY_ADDR_MASK);
788 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
789 MI_COM_REG_ADDR_MASK);
790 frame_val |= (val & MI_COM_DATA_MASK);
791 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400792
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 tw32_f(MAC_MI_COM, frame_val);
794
795 loops = PHY_BUSY_LOOPS;
796 while (loops != 0) {
797 udelay(10);
798 frame_val = tr32(MAC_MI_COM);
799 if ((frame_val & MI_COM_BUSY) == 0) {
800 udelay(5);
801 frame_val = tr32(MAC_MI_COM);
802 break;
803 }
804 loops -= 1;
805 }
806
807 ret = -EBUSY;
808 if (loops != 0)
809 ret = 0;
810
811 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
812 tw32_f(MAC_MI_MODE, tp->mi_mode);
813 udelay(80);
814 }
815
816 return ret;
817}
818
Matt Carlson95e28692008-05-25 23:44:14 -0700819static int tg3_bmcr_reset(struct tg3 *tp)
820{
821 u32 phy_control;
822 int limit, err;
823
824 /* OK, reset it, and poll the BMCR_RESET bit until it
825 * clears or we time out.
826 */
827 phy_control = BMCR_RESET;
828 err = tg3_writephy(tp, MII_BMCR, phy_control);
829 if (err != 0)
830 return -EBUSY;
831
832 limit = 5000;
833 while (limit--) {
834 err = tg3_readphy(tp, MII_BMCR, &phy_control);
835 if (err != 0)
836 return -EBUSY;
837
838 if ((phy_control & BMCR_RESET) == 0) {
839 udelay(40);
840 break;
841 }
842 udelay(10);
843 }
844 if (limit <= 0)
845 return -EBUSY;
846
847 return 0;
848}
849
Matt Carlson158d7ab2008-05-29 01:37:54 -0700850static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
851{
852 struct tg3 *tp = (struct tg3 *)bp->priv;
853 u32 val;
854
855 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
856 return -EAGAIN;
857
858 if (tg3_readphy(tp, reg, &val))
859 return -EIO;
860
861 return val;
862}
863
864static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
865{
866 struct tg3 *tp = (struct tg3 *)bp->priv;
867
868 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
869 return -EAGAIN;
870
871 if (tg3_writephy(tp, reg, val))
872 return -EIO;
873
874 return 0;
875}
876
877static int tg3_mdio_reset(struct mii_bus *bp)
878{
879 return 0;
880}
881
Matt Carlson9c61d6b2008-11-03 16:54:56 -0800882static void tg3_mdio_config_5785(struct tg3 *tp)
Matt Carlsona9daf362008-05-25 23:49:44 -0700883{
884 u32 val;
Matt Carlsonfcb389d2008-11-03 16:55:44 -0800885 struct phy_device *phydev;
Matt Carlsona9daf362008-05-25 23:49:44 -0700886
Matt Carlsonfcb389d2008-11-03 16:55:44 -0800887 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
888 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
889 case TG3_PHY_ID_BCM50610:
890 val = MAC_PHYCFG2_50610_LED_MODES;
891 break;
892 case TG3_PHY_ID_BCMAC131:
893 val = MAC_PHYCFG2_AC131_LED_MODES;
894 break;
895 case TG3_PHY_ID_RTL8211C:
896 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
897 break;
898 case TG3_PHY_ID_RTL8201E:
899 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
900 break;
901 default:
Matt Carlsona9daf362008-05-25 23:49:44 -0700902 return;
Matt Carlsonfcb389d2008-11-03 16:55:44 -0800903 }
904
905 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
906 tw32(MAC_PHYCFG2, val);
907
908 val = tr32(MAC_PHYCFG1);
909 val &= ~MAC_PHYCFG1_RGMII_INT;
910 tw32(MAC_PHYCFG1, val);
911
912 return;
913 }
914
915 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
916 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
917 MAC_PHYCFG2_FMODE_MASK_MASK |
918 MAC_PHYCFG2_GMODE_MASK_MASK |
919 MAC_PHYCFG2_ACT_MASK_MASK |
920 MAC_PHYCFG2_QUAL_MASK_MASK |
921 MAC_PHYCFG2_INBAND_ENABLE;
922
923 tw32(MAC_PHYCFG2, val);
Matt Carlsona9daf362008-05-25 23:49:44 -0700924
925 val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
926 MAC_PHYCFG1_RGMII_SND_STAT_EN);
927 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
928 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
929 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
930 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
931 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
932 }
933 tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
934
Matt Carlsona9daf362008-05-25 23:49:44 -0700935 val = tr32(MAC_EXT_RGMII_MODE);
936 val &= ~(MAC_RGMII_MODE_RX_INT_B |
937 MAC_RGMII_MODE_RX_QUALITY |
938 MAC_RGMII_MODE_RX_ACTIVITY |
939 MAC_RGMII_MODE_RX_ENG_DET |
940 MAC_RGMII_MODE_TX_ENABLE |
941 MAC_RGMII_MODE_TX_LOWPWR |
942 MAC_RGMII_MODE_TX_RESET);
Matt Carlsonfcb389d2008-11-03 16:55:44 -0800943 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
Matt Carlsona9daf362008-05-25 23:49:44 -0700944 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
945 val |= MAC_RGMII_MODE_RX_INT_B |
946 MAC_RGMII_MODE_RX_QUALITY |
947 MAC_RGMII_MODE_RX_ACTIVITY |
948 MAC_RGMII_MODE_RX_ENG_DET;
949 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
950 val |= MAC_RGMII_MODE_TX_ENABLE |
951 MAC_RGMII_MODE_TX_LOWPWR |
952 MAC_RGMII_MODE_TX_RESET;
953 }
954 tw32(MAC_EXT_RGMII_MODE, val);
955}
956
Matt Carlson158d7ab2008-05-29 01:37:54 -0700957static void tg3_mdio_start(struct tg3 *tp)
958{
959 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700960 mutex_lock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700961 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700962 mutex_unlock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700963 }
964
965 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
966 tw32_f(MAC_MI_MODE, tp->mi_mode);
967 udelay(80);
Matt Carlsona9daf362008-05-25 23:49:44 -0700968
Matt Carlson9c61d6b2008-11-03 16:54:56 -0800969 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
970 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
971 tg3_mdio_config_5785(tp);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700972}
973
974static void tg3_mdio_stop(struct tg3 *tp)
975{
976 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700977 mutex_lock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700978 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700979 mutex_unlock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700980 }
981}
982
983static int tg3_mdio_init(struct tg3 *tp)
984{
985 int i;
986 u32 reg;
Matt Carlsona9daf362008-05-25 23:49:44 -0700987 struct phy_device *phydev;
Matt Carlson158d7ab2008-05-29 01:37:54 -0700988
989 tg3_mdio_start(tp);
990
991 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
992 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
993 return 0;
994
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700995 tp->mdio_bus = mdiobus_alloc();
996 if (tp->mdio_bus == NULL)
997 return -ENOMEM;
Matt Carlson158d7ab2008-05-29 01:37:54 -0700998
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700999 tp->mdio_bus->name = "tg3 mdio bus";
1000 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
Matt Carlson158d7ab2008-05-29 01:37:54 -07001001 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001002 tp->mdio_bus->priv = tp;
1003 tp->mdio_bus->parent = &tp->pdev->dev;
1004 tp->mdio_bus->read = &tg3_mdio_read;
1005 tp->mdio_bus->write = &tg3_mdio_write;
1006 tp->mdio_bus->reset = &tg3_mdio_reset;
1007 tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
1008 tp->mdio_bus->irq = &tp->mdio_irq[0];
Matt Carlson158d7ab2008-05-29 01:37:54 -07001009
1010 for (i = 0; i < PHY_MAX_ADDR; i++)
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001011 tp->mdio_bus->irq[i] = PHY_POLL;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001012
1013 /* The bus registration will look for all the PHYs on the mdio bus.
1014 * Unfortunately, it does not ensure the PHY is powered up before
1015 * accessing the PHY ID registers. A chip reset is the
1016 * quickest way to bring the device back to an operational state..
1017 */
1018 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1019 tg3_bmcr_reset(tp);
1020
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001021 i = mdiobus_register(tp->mdio_bus);
Matt Carlsona9daf362008-05-25 23:49:44 -07001022 if (i) {
Matt Carlson158d7ab2008-05-29 01:37:54 -07001023 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1024 tp->dev->name, i);
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001025 mdiobus_free(tp->mdio_bus);
Matt Carlsona9daf362008-05-25 23:49:44 -07001026 return i;
1027 }
Matt Carlson158d7ab2008-05-29 01:37:54 -07001028
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001029 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsona9daf362008-05-25 23:49:44 -07001030
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001031 if (!phydev || !phydev->drv) {
1032 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1033 mdiobus_unregister(tp->mdio_bus);
1034 mdiobus_free(tp->mdio_bus);
1035 return -ENODEV;
1036 }
1037
1038 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
Matt Carlsona9daf362008-05-25 23:49:44 -07001039 case TG3_PHY_ID_BCM50610:
Matt Carlsona9daf362008-05-25 23:49:44 -07001040 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1041 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1042 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1043 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1044 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1045 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001046 /* fallthru */
1047 case TG3_PHY_ID_RTL8211C:
1048 phydev->interface = PHY_INTERFACE_MODE_RGMII;
Matt Carlsona9daf362008-05-25 23:49:44 -07001049 break;
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001050 case TG3_PHY_ID_RTL8201E:
Matt Carlsona9daf362008-05-25 23:49:44 -07001051 case TG3_PHY_ID_BCMAC131:
1052 phydev->interface = PHY_INTERFACE_MODE_MII;
1053 break;
1054 }
1055
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001056 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1057
1058 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1059 tg3_mdio_config_5785(tp);
Matt Carlsona9daf362008-05-25 23:49:44 -07001060
1061 return 0;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001062}
1063
1064static void tg3_mdio_fini(struct tg3 *tp)
1065{
1066 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1067 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001068 mdiobus_unregister(tp->mdio_bus);
1069 mdiobus_free(tp->mdio_bus);
Matt Carlson158d7ab2008-05-29 01:37:54 -07001070 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1071 }
1072}
1073
Matt Carlson95e28692008-05-25 23:44:14 -07001074/* tp->lock is held. */
Matt Carlson4ba526c2008-08-15 14:10:04 -07001075static inline void tg3_generate_fw_event(struct tg3 *tp)
1076{
1077 u32 val;
1078
1079 val = tr32(GRC_RX_CPU_EVENT);
1080 val |= GRC_RX_CPU_DRIVER_EVENT;
1081 tw32_f(GRC_RX_CPU_EVENT, val);
1082
1083 tp->last_event_jiffies = jiffies;
1084}
1085
1086#define TG3_FW_EVENT_TIMEOUT_USEC 2500
1087
1088/* tp->lock is held. */
Matt Carlson95e28692008-05-25 23:44:14 -07001089static void tg3_wait_for_event_ack(struct tg3 *tp)
1090{
1091 int i;
Matt Carlson4ba526c2008-08-15 14:10:04 -07001092 unsigned int delay_cnt;
1093 long time_remain;
Matt Carlson95e28692008-05-25 23:44:14 -07001094
Matt Carlson4ba526c2008-08-15 14:10:04 -07001095 /* If enough time has passed, no wait is necessary. */
1096 time_remain = (long)(tp->last_event_jiffies + 1 +
1097 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1098 (long)jiffies;
1099 if (time_remain < 0)
1100 return;
1101
1102 /* Check if we can shorten the wait time. */
1103 delay_cnt = jiffies_to_usecs(time_remain);
1104 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1105 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1106 delay_cnt = (delay_cnt >> 3) + 1;
1107
1108 for (i = 0; i < delay_cnt; i++) {
Matt Carlson95e28692008-05-25 23:44:14 -07001109 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1110 break;
Matt Carlson4ba526c2008-08-15 14:10:04 -07001111 udelay(8);
Matt Carlson95e28692008-05-25 23:44:14 -07001112 }
1113}
1114
1115/* tp->lock is held. */
1116static void tg3_ump_link_report(struct tg3 *tp)
1117{
1118 u32 reg;
1119 u32 val;
1120
1121 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1122 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1123 return;
1124
1125 tg3_wait_for_event_ack(tp);
1126
1127 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1128
1129 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1130
1131 val = 0;
1132 if (!tg3_readphy(tp, MII_BMCR, &reg))
1133 val = reg << 16;
1134 if (!tg3_readphy(tp, MII_BMSR, &reg))
1135 val |= (reg & 0xffff);
1136 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1137
1138 val = 0;
1139 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1140 val = reg << 16;
1141 if (!tg3_readphy(tp, MII_LPA, &reg))
1142 val |= (reg & 0xffff);
1143 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1144
1145 val = 0;
1146 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1147 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1148 val = reg << 16;
1149 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1150 val |= (reg & 0xffff);
1151 }
1152 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1153
1154 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1155 val = reg << 16;
1156 else
1157 val = 0;
1158 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1159
Matt Carlson4ba526c2008-08-15 14:10:04 -07001160 tg3_generate_fw_event(tp);
Matt Carlson95e28692008-05-25 23:44:14 -07001161}
1162
1163static void tg3_link_report(struct tg3 *tp)
1164{
1165 if (!netif_carrier_ok(tp->dev)) {
1166 if (netif_msg_link(tp))
1167 printk(KERN_INFO PFX "%s: Link is down.\n",
1168 tp->dev->name);
1169 tg3_ump_link_report(tp);
1170 } else if (netif_msg_link(tp)) {
1171 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1172 tp->dev->name,
1173 (tp->link_config.active_speed == SPEED_1000 ?
1174 1000 :
1175 (tp->link_config.active_speed == SPEED_100 ?
1176 100 : 10)),
1177 (tp->link_config.active_duplex == DUPLEX_FULL ?
1178 "full" : "half"));
1179
1180 printk(KERN_INFO PFX
1181 "%s: Flow control is %s for TX and %s for RX.\n",
1182 tp->dev->name,
1183 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1184 "on" : "off",
1185 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1186 "on" : "off");
1187 tg3_ump_link_report(tp);
1188 }
1189}
1190
1191static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1192{
1193 u16 miireg;
1194
1195 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1196 miireg = ADVERTISE_PAUSE_CAP;
1197 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1198 miireg = ADVERTISE_PAUSE_ASYM;
1199 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1200 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1201 else
1202 miireg = 0;
1203
1204 return miireg;
1205}
1206
1207static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1208{
1209 u16 miireg;
1210
1211 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1212 miireg = ADVERTISE_1000XPAUSE;
1213 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1214 miireg = ADVERTISE_1000XPSE_ASYM;
1215 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1216 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1217 else
1218 miireg = 0;
1219
1220 return miireg;
1221}
1222
1223static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1224{
1225 u8 cap = 0;
1226
1227 if (lcladv & ADVERTISE_PAUSE_CAP) {
1228 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1229 if (rmtadv & LPA_PAUSE_CAP)
1230 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1231 else if (rmtadv & LPA_PAUSE_ASYM)
1232 cap = TG3_FLOW_CTRL_RX;
1233 } else {
1234 if (rmtadv & LPA_PAUSE_CAP)
1235 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1236 }
1237 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1238 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1239 cap = TG3_FLOW_CTRL_TX;
1240 }
1241
1242 return cap;
1243}
1244
1245static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1246{
1247 u8 cap = 0;
1248
1249 if (lcladv & ADVERTISE_1000XPAUSE) {
1250 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1251 if (rmtadv & LPA_1000XPAUSE)
1252 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1253 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1254 cap = TG3_FLOW_CTRL_RX;
1255 } else {
1256 if (rmtadv & LPA_1000XPAUSE)
1257 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1258 }
1259 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1260 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1261 cap = TG3_FLOW_CTRL_TX;
1262 }
1263
1264 return cap;
1265}
1266
Matt Carlsonf51f3562008-05-25 23:45:08 -07001267static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
Matt Carlson95e28692008-05-25 23:44:14 -07001268{
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001269 u8 autoneg;
Matt Carlsonf51f3562008-05-25 23:45:08 -07001270 u8 flowctrl = 0;
Matt Carlson95e28692008-05-25 23:44:14 -07001271 u32 old_rx_mode = tp->rx_mode;
1272 u32 old_tx_mode = tp->tx_mode;
1273
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001274 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001275 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001276 else
1277 autoneg = tp->link_config.autoneg;
1278
1279 if (autoneg == AUTONEG_ENABLE &&
Matt Carlson95e28692008-05-25 23:44:14 -07001280 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1281 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Matt Carlsonf51f3562008-05-25 23:45:08 -07001282 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
Matt Carlson95e28692008-05-25 23:44:14 -07001283 else
Matt Carlsonf51f3562008-05-25 23:45:08 -07001284 flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1285 } else
1286 flowctrl = tp->link_config.flowctrl;
Matt Carlson95e28692008-05-25 23:44:14 -07001287
Matt Carlsonf51f3562008-05-25 23:45:08 -07001288 tp->link_config.active_flowctrl = flowctrl;
Matt Carlson95e28692008-05-25 23:44:14 -07001289
Matt Carlsonf51f3562008-05-25 23:45:08 -07001290 if (flowctrl & TG3_FLOW_CTRL_RX)
Matt Carlson95e28692008-05-25 23:44:14 -07001291 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1292 else
1293 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1294
Matt Carlsonf51f3562008-05-25 23:45:08 -07001295 if (old_rx_mode != tp->rx_mode)
Matt Carlson95e28692008-05-25 23:44:14 -07001296 tw32_f(MAC_RX_MODE, tp->rx_mode);
Matt Carlson95e28692008-05-25 23:44:14 -07001297
Matt Carlsonf51f3562008-05-25 23:45:08 -07001298 if (flowctrl & TG3_FLOW_CTRL_TX)
Matt Carlson95e28692008-05-25 23:44:14 -07001299 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1300 else
1301 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1302
Matt Carlsonf51f3562008-05-25 23:45:08 -07001303 if (old_tx_mode != tp->tx_mode)
Matt Carlson95e28692008-05-25 23:44:14 -07001304 tw32_f(MAC_TX_MODE, tp->tx_mode);
Matt Carlson95e28692008-05-25 23:44:14 -07001305}
1306
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001307static void tg3_adjust_link(struct net_device *dev)
1308{
1309 u8 oldflowctrl, linkmesg = 0;
1310 u32 mac_mode, lcl_adv, rmt_adv;
1311 struct tg3 *tp = netdev_priv(dev);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001312 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001313
1314 spin_lock(&tp->lock);
1315
1316 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1317 MAC_MODE_HALF_DUPLEX);
1318
1319 oldflowctrl = tp->link_config.active_flowctrl;
1320
1321 if (phydev->link) {
1322 lcl_adv = 0;
1323 rmt_adv = 0;
1324
1325 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1326 mac_mode |= MAC_MODE_PORT_MODE_MII;
1327 else
1328 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1329
1330 if (phydev->duplex == DUPLEX_HALF)
1331 mac_mode |= MAC_MODE_HALF_DUPLEX;
1332 else {
1333 lcl_adv = tg3_advert_flowctrl_1000T(
1334 tp->link_config.flowctrl);
1335
1336 if (phydev->pause)
1337 rmt_adv = LPA_PAUSE_CAP;
1338 if (phydev->asym_pause)
1339 rmt_adv |= LPA_PAUSE_ASYM;
1340 }
1341
1342 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1343 } else
1344 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1345
1346 if (mac_mode != tp->mac_mode) {
1347 tp->mac_mode = mac_mode;
1348 tw32_f(MAC_MODE, tp->mac_mode);
1349 udelay(40);
1350 }
1351
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001352 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1353 if (phydev->speed == SPEED_10)
1354 tw32(MAC_MI_STAT,
1355 MAC_MI_STAT_10MBPS_MODE |
1356 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1357 else
1358 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1359 }
1360
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001361 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1362 tw32(MAC_TX_LENGTHS,
1363 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1364 (6 << TX_LENGTHS_IPG_SHIFT) |
1365 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1366 else
1367 tw32(MAC_TX_LENGTHS,
1368 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1369 (6 << TX_LENGTHS_IPG_SHIFT) |
1370 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1371
1372 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1373 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1374 phydev->speed != tp->link_config.active_speed ||
1375 phydev->duplex != tp->link_config.active_duplex ||
1376 oldflowctrl != tp->link_config.active_flowctrl)
1377 linkmesg = 1;
1378
1379 tp->link_config.active_speed = phydev->speed;
1380 tp->link_config.active_duplex = phydev->duplex;
1381
1382 spin_unlock(&tp->lock);
1383
1384 if (linkmesg)
1385 tg3_link_report(tp);
1386}
1387
1388static int tg3_phy_init(struct tg3 *tp)
1389{
1390 struct phy_device *phydev;
1391
1392 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1393 return 0;
1394
1395 /* Bring the PHY back to a known state. */
1396 tg3_bmcr_reset(tp);
1397
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001398 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001399
1400 /* Attach the MAC to the PHY. */
Kay Sieversfb28ad32008-11-10 13:55:14 -08001401 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
Matt Carlsona9daf362008-05-25 23:49:44 -07001402 phydev->dev_flags, phydev->interface);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001403 if (IS_ERR(phydev)) {
1404 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1405 return PTR_ERR(phydev);
1406 }
1407
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001408 /* Mask with MAC supported features. */
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001409 switch (phydev->interface) {
1410 case PHY_INTERFACE_MODE_GMII:
1411 case PHY_INTERFACE_MODE_RGMII:
1412 phydev->supported &= (PHY_GBIT_FEATURES |
1413 SUPPORTED_Pause |
1414 SUPPORTED_Asym_Pause);
1415 break;
1416 case PHY_INTERFACE_MODE_MII:
1417 phydev->supported &= (PHY_BASIC_FEATURES |
1418 SUPPORTED_Pause |
1419 SUPPORTED_Asym_Pause);
1420 break;
1421 default:
1422 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1423 return -EINVAL;
1424 }
1425
1426 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001427
1428 phydev->advertising = phydev->supported;
1429
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001430 return 0;
1431}
1432
1433static void tg3_phy_start(struct tg3 *tp)
1434{
1435 struct phy_device *phydev;
1436
1437 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1438 return;
1439
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001440 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001441
1442 if (tp->link_config.phy_is_low_power) {
1443 tp->link_config.phy_is_low_power = 0;
1444 phydev->speed = tp->link_config.orig_speed;
1445 phydev->duplex = tp->link_config.orig_duplex;
1446 phydev->autoneg = tp->link_config.orig_autoneg;
1447 phydev->advertising = tp->link_config.orig_advertising;
1448 }
1449
1450 phy_start(phydev);
1451
1452 phy_start_aneg(phydev);
1453}
1454
1455static void tg3_phy_stop(struct tg3 *tp)
1456{
1457 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1458 return;
1459
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001460 phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001461}
1462
1463static void tg3_phy_fini(struct tg3 *tp)
1464{
1465 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001466 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001467 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1468 }
1469}
1470
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001471static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1472{
1473 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1474 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1475}
1476
Matt Carlson9ef8ca92007-07-11 19:48:29 -07001477static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1478{
1479 u32 phy;
1480
1481 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1482 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1483 return;
1484
1485 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1486 u32 ephy;
1487
1488 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1489 tg3_writephy(tp, MII_TG3_EPHY_TEST,
1490 ephy | MII_TG3_EPHY_SHADOW_EN);
1491 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1492 if (enable)
1493 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1494 else
1495 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1496 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1497 }
1498 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1499 }
1500 } else {
1501 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1502 MII_TG3_AUXCTL_SHDWSEL_MISC;
1503 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1504 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1505 if (enable)
1506 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1507 else
1508 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1509 phy |= MII_TG3_AUXCTL_MISC_WREN;
1510 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1511 }
1512 }
1513}
1514
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515static void tg3_phy_set_wirespeed(struct tg3 *tp)
1516{
1517 u32 val;
1518
1519 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1520 return;
1521
1522 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1523 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1524 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1525 (val | (1 << 15) | (1 << 4)));
1526}
1527
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001528static void tg3_phy_apply_otp(struct tg3 *tp)
1529{
1530 u32 otp, phy;
1531
1532 if (!tp->phy_otp)
1533 return;
1534
1535 otp = tp->phy_otp;
1536
1537 /* Enable SM_DSP clock and tx 6dB coding. */
1538 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1539 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1540 MII_TG3_AUXCTL_ACTL_TX_6DB;
1541 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1542
1543 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1544 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1545 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1546
1547 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1548 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1549 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1550
1551 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1552 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1553 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1554
1555 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1556 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1557
1558 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1559 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1560
1561 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1562 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1563 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1564
1565 /* Turn off SM_DSP clock. */
1566 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1567 MII_TG3_AUXCTL_ACTL_TX_6DB;
1568 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1569}
1570
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571static int tg3_wait_macro_done(struct tg3 *tp)
1572{
1573 int limit = 100;
1574
1575 while (limit--) {
1576 u32 tmp32;
1577
1578 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1579 if ((tmp32 & 0x1000) == 0)
1580 break;
1581 }
1582 }
1583 if (limit <= 0)
1584 return -EBUSY;
1585
1586 return 0;
1587}
1588
1589static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1590{
1591 static const u32 test_pat[4][6] = {
1592 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1593 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1594 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1595 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1596 };
1597 int chan;
1598
1599 for (chan = 0; chan < 4; chan++) {
1600 int i;
1601
1602 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1603 (chan * 0x2000) | 0x0200);
1604 tg3_writephy(tp, 0x16, 0x0002);
1605
1606 for (i = 0; i < 6; i++)
1607 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1608 test_pat[chan][i]);
1609
1610 tg3_writephy(tp, 0x16, 0x0202);
1611 if (tg3_wait_macro_done(tp)) {
1612 *resetp = 1;
1613 return -EBUSY;
1614 }
1615
1616 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1617 (chan * 0x2000) | 0x0200);
1618 tg3_writephy(tp, 0x16, 0x0082);
1619 if (tg3_wait_macro_done(tp)) {
1620 *resetp = 1;
1621 return -EBUSY;
1622 }
1623
1624 tg3_writephy(tp, 0x16, 0x0802);
1625 if (tg3_wait_macro_done(tp)) {
1626 *resetp = 1;
1627 return -EBUSY;
1628 }
1629
1630 for (i = 0; i < 6; i += 2) {
1631 u32 low, high;
1632
1633 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1634 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1635 tg3_wait_macro_done(tp)) {
1636 *resetp = 1;
1637 return -EBUSY;
1638 }
1639 low &= 0x7fff;
1640 high &= 0x000f;
1641 if (low != test_pat[chan][i] ||
1642 high != test_pat[chan][i+1]) {
1643 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1644 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1645 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1646
1647 return -EBUSY;
1648 }
1649 }
1650 }
1651
1652 return 0;
1653}
1654
1655static int tg3_phy_reset_chanpat(struct tg3 *tp)
1656{
1657 int chan;
1658
1659 for (chan = 0; chan < 4; chan++) {
1660 int i;
1661
1662 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1663 (chan * 0x2000) | 0x0200);
1664 tg3_writephy(tp, 0x16, 0x0002);
1665 for (i = 0; i < 6; i++)
1666 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1667 tg3_writephy(tp, 0x16, 0x0202);
1668 if (tg3_wait_macro_done(tp))
1669 return -EBUSY;
1670 }
1671
1672 return 0;
1673}
1674
1675static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1676{
1677 u32 reg32, phy9_orig;
1678 int retries, do_phy_reset, err;
1679
1680 retries = 10;
1681 do_phy_reset = 1;
1682 do {
1683 if (do_phy_reset) {
1684 err = tg3_bmcr_reset(tp);
1685 if (err)
1686 return err;
1687 do_phy_reset = 0;
1688 }
1689
1690 /* Disable transmitter and interrupt. */
1691 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1692 continue;
1693
1694 reg32 |= 0x3000;
1695 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1696
1697 /* Set full-duplex, 1000 mbps. */
1698 tg3_writephy(tp, MII_BMCR,
1699 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1700
1701 /* Set to master mode. */
1702 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1703 continue;
1704
1705 tg3_writephy(tp, MII_TG3_CTRL,
1706 (MII_TG3_CTRL_AS_MASTER |
1707 MII_TG3_CTRL_ENABLE_AS_MASTER));
1708
1709 /* Enable SM_DSP_CLOCK and 6dB. */
1710 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1711
1712 /* Block the PHY control access. */
1713 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1714 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1715
1716 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1717 if (!err)
1718 break;
1719 } while (--retries);
1720
1721 err = tg3_phy_reset_chanpat(tp);
1722 if (err)
1723 return err;
1724
1725 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1726 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1727
1728 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1729 tg3_writephy(tp, 0x16, 0x0000);
1730
1731 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1732 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1733 /* Set Extended packet length bit for jumbo frames */
1734 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1735 }
1736 else {
1737 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1738 }
1739
1740 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1741
1742 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1743 reg32 &= ~0x3000;
1744 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1745 } else if (!err)
1746 err = -EBUSY;
1747
1748 return err;
1749}
1750
1751/* This will reset the tigon3 PHY if there is no valid
1752 * link unless the FORCE argument is non-zero.
1753 */
1754static int tg3_phy_reset(struct tg3 *tp)
1755{
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001756 u32 cpmuctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 u32 phy_status;
1758 int err;
1759
Michael Chan60189dd2006-12-17 17:08:07 -08001760 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1761 u32 val;
1762
1763 val = tr32(GRC_MISC_CFG);
1764 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1765 udelay(40);
1766 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1768 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1769 if (err != 0)
1770 return -EBUSY;
1771
Michael Chanc8e1e822006-04-29 18:55:17 -07001772 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1773 netif_carrier_off(tp->dev);
1774 tg3_link_report(tp);
1775 }
1776
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1778 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1779 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1780 err = tg3_phy_reset_5703_4_5(tp);
1781 if (err)
1782 return err;
1783 goto out;
1784 }
1785
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001786 cpmuctrl = 0;
1787 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1788 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1789 cpmuctrl = tr32(TG3_CPMU_CTRL);
1790 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1791 tw32(TG3_CPMU_CTRL,
1792 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1793 }
1794
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 err = tg3_bmcr_reset(tp);
1796 if (err)
1797 return err;
1798
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001799 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1800 u32 phy;
1801
1802 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1803 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1804
1805 tw32(TG3_CPMU_CTRL, cpmuctrl);
1806 }
1807
Matt Carlsonbcb37f62008-11-03 16:52:09 -08001808 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1809 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
Matt Carlsonce057f02007-11-12 21:08:03 -08001810 u32 val;
1811
1812 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1813 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1814 CPMU_LSPD_1000MB_MACCLK_12_5) {
1815 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1816 udelay(40);
1817 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1818 }
Matt Carlson662f38d2007-11-12 21:16:17 -08001819
1820 /* Disable GPHY autopowerdown. */
1821 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1822 MII_TG3_MISC_SHDW_WREN |
1823 MII_TG3_MISC_SHDW_APD_SEL |
1824 MII_TG3_MISC_SHDW_APD_WKTM_84MS);
Matt Carlsonce057f02007-11-12 21:08:03 -08001825 }
1826
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001827 tg3_phy_apply_otp(tp);
1828
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829out:
1830 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1831 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1832 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1833 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1834 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1835 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1836 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1837 }
1838 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1839 tg3_writephy(tp, 0x1c, 0x8d68);
1840 tg3_writephy(tp, 0x1c, 0x8d68);
1841 }
1842 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1843 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1844 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1845 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1846 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1847 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1848 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1849 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1850 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1851 }
Michael Chanc424cb22006-04-29 18:56:34 -07001852 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1853 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1854 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
Michael Chanc1d2a192007-01-08 19:57:20 -08001855 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1856 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1857 tg3_writephy(tp, MII_TG3_TEST1,
1858 MII_TG3_TEST1_TRIM_EN | 0x4);
1859 } else
1860 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
Michael Chanc424cb22006-04-29 18:56:34 -07001861 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1862 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863 /* Set Extended packet length bit (bit 14) on all chips that */
1864 /* support jumbo frames */
1865 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1866 /* Cannot do read-modify-write on 5401 */
1867 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
Michael Chan0f893dc2005-07-25 12:30:38 -07001868 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 u32 phy_reg;
1870
1871 /* Set bit 14 with read-modify-write to preserve other bits */
1872 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1873 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1874 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1875 }
1876
1877 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1878 * jumbo frames transmission.
1879 */
Michael Chan0f893dc2005-07-25 12:30:38 -07001880 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 u32 phy_reg;
1882
1883 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1884 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1885 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1886 }
1887
Michael Chan715116a2006-09-27 16:09:25 -07001888 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan715116a2006-09-27 16:09:25 -07001889 /* adjust output voltage */
1890 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
Michael Chan715116a2006-09-27 16:09:25 -07001891 }
1892
Matt Carlson9ef8ca92007-07-11 19:48:29 -07001893 tg3_phy_toggle_automdix(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 tg3_phy_set_wirespeed(tp);
1895 return 0;
1896}
1897
1898static void tg3_frob_aux_power(struct tg3 *tp)
1899{
1900 struct tg3 *tp_peer = tp;
1901
Michael Chan9d26e212006-12-07 00:21:14 -08001902 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 return;
1904
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001905 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1906 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1907 struct net_device *dev_peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001909 dev_peer = pci_get_drvdata(tp->pdev_peer);
Michael Chanbc1c7562006-03-20 17:48:03 -08001910 /* remove_one() may have been run on the peer. */
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001911 if (!dev_peer)
Michael Chanbc1c7562006-03-20 17:48:03 -08001912 tp_peer = tp;
1913 else
1914 tp_peer = netdev_priv(dev_peer);
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001915 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916
1917 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
Michael Chan6921d202005-12-13 21:15:53 -08001918 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1919 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1920 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1922 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
Michael Chanb401e9e2005-12-19 16:27:04 -08001923 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1924 (GRC_LCLCTRL_GPIO_OE0 |
1925 GRC_LCLCTRL_GPIO_OE1 |
1926 GRC_LCLCTRL_GPIO_OE2 |
1927 GRC_LCLCTRL_GPIO_OUTPUT0 |
1928 GRC_LCLCTRL_GPIO_OUTPUT1),
1929 100);
Matt Carlson5f0c4a32008-06-09 15:41:12 -07001930 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1931 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1932 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1933 GRC_LCLCTRL_GPIO_OE1 |
1934 GRC_LCLCTRL_GPIO_OE2 |
1935 GRC_LCLCTRL_GPIO_OUTPUT0 |
1936 GRC_LCLCTRL_GPIO_OUTPUT1 |
1937 tp->grc_local_ctrl;
1938 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1939
1940 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1941 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1942
1943 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1944 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 } else {
1946 u32 no_gpio2;
Michael Chandc56b7d2005-12-19 16:26:28 -08001947 u32 grc_local_ctrl = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948
1949 if (tp_peer != tp &&
1950 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1951 return;
1952
Michael Chandc56b7d2005-12-19 16:26:28 -08001953 /* Workaround to prevent overdrawing Amps. */
1954 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1955 ASIC_REV_5714) {
1956 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chanb401e9e2005-12-19 16:27:04 -08001957 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1958 grc_local_ctrl, 100);
Michael Chandc56b7d2005-12-19 16:26:28 -08001959 }
1960
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 /* On 5753 and variants, GPIO2 cannot be used. */
1962 no_gpio2 = tp->nic_sram_data_cfg &
1963 NIC_SRAM_DATA_CFG_NO_GPIO2;
1964
Michael Chandc56b7d2005-12-19 16:26:28 -08001965 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 GRC_LCLCTRL_GPIO_OE1 |
1967 GRC_LCLCTRL_GPIO_OE2 |
1968 GRC_LCLCTRL_GPIO_OUTPUT1 |
1969 GRC_LCLCTRL_GPIO_OUTPUT2;
1970 if (no_gpio2) {
1971 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1972 GRC_LCLCTRL_GPIO_OUTPUT2);
1973 }
Michael Chanb401e9e2005-12-19 16:27:04 -08001974 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1975 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976
1977 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1978
Michael Chanb401e9e2005-12-19 16:27:04 -08001979 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1980 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981
1982 if (!no_gpio2) {
1983 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chanb401e9e2005-12-19 16:27:04 -08001984 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1985 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 }
1987 }
1988 } else {
1989 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1990 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1991 if (tp_peer != tp &&
1992 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1993 return;
1994
Michael Chanb401e9e2005-12-19 16:27:04 -08001995 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1996 (GRC_LCLCTRL_GPIO_OE1 |
1997 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998
Michael Chanb401e9e2005-12-19 16:27:04 -08001999 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2000 GRC_LCLCTRL_GPIO_OE1, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001
Michael Chanb401e9e2005-12-19 16:27:04 -08002002 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2003 (GRC_LCLCTRL_GPIO_OE1 |
2004 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 }
2006 }
2007}
2008
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002009static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2010{
2011 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2012 return 1;
2013 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2014 if (speed != SPEED_10)
2015 return 1;
2016 } else if (speed == SPEED_10)
2017 return 1;
2018
2019 return 0;
2020}
2021
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022static int tg3_setup_phy(struct tg3 *, int);
2023
2024#define RESET_KIND_SHUTDOWN 0
2025#define RESET_KIND_INIT 1
2026#define RESET_KIND_SUSPEND 2
2027
2028static void tg3_write_sig_post_reset(struct tg3 *, int);
2029static int tg3_halt_cpu(struct tg3 *, u32);
Michael Chan6921d202005-12-13 21:15:53 -08002030static int tg3_nvram_lock(struct tg3 *);
2031static void tg3_nvram_unlock(struct tg3 *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032
Matt Carlson0a459aa2008-11-03 16:54:15 -08002033static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
Michael Chan15c3b692006-03-22 01:06:52 -08002034{
Matt Carlsonce057f02007-11-12 21:08:03 -08002035 u32 val;
2036
Michael Chan51297242007-02-13 12:17:57 -08002037 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2038 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2039 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2040 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2041
2042 sg_dig_ctrl |=
2043 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2044 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2045 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2046 }
Michael Chan3f7045c2006-09-27 16:02:29 -07002047 return;
Michael Chan51297242007-02-13 12:17:57 -08002048 }
Michael Chan3f7045c2006-09-27 16:02:29 -07002049
Michael Chan60189dd2006-12-17 17:08:07 -08002050 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan60189dd2006-12-17 17:08:07 -08002051 tg3_bmcr_reset(tp);
2052 val = tr32(GRC_MISC_CFG);
2053 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2054 udelay(40);
2055 return;
Matt Carlson0a459aa2008-11-03 16:54:15 -08002056 } else if (do_low_power) {
Michael Chan715116a2006-09-27 16:09:25 -07002057 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2058 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
Matt Carlson0a459aa2008-11-03 16:54:15 -08002059
2060 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2061 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2062 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2063 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2064 MII_TG3_AUXCTL_PCTL_VREG_11V);
Michael Chan715116a2006-09-27 16:09:25 -07002065 }
Michael Chan3f7045c2006-09-27 16:02:29 -07002066
Michael Chan15c3b692006-03-22 01:06:52 -08002067 /* The PHY should not be powered down on some chips because
2068 * of bugs.
2069 */
2070 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2071 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2072 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2073 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2074 return;
Matt Carlsonce057f02007-11-12 21:08:03 -08002075
Matt Carlsonbcb37f62008-11-03 16:52:09 -08002076 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2077 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
Matt Carlsonce057f02007-11-12 21:08:03 -08002078 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2079 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2080 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2081 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2082 }
2083
Michael Chan15c3b692006-03-22 01:06:52 -08002084 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2085}
2086
Matt Carlson3f007892008-11-03 16:51:36 -08002087/* tp->lock is held. */
2088static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2089{
2090 u32 addr_high, addr_low;
2091 int i;
2092
2093 addr_high = ((tp->dev->dev_addr[0] << 8) |
2094 tp->dev->dev_addr[1]);
2095 addr_low = ((tp->dev->dev_addr[2] << 24) |
2096 (tp->dev->dev_addr[3] << 16) |
2097 (tp->dev->dev_addr[4] << 8) |
2098 (tp->dev->dev_addr[5] << 0));
2099 for (i = 0; i < 4; i++) {
2100 if (i == 1 && skip_mac_1)
2101 continue;
2102 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2103 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2104 }
2105
2106 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2107 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2108 for (i = 0; i < 12; i++) {
2109 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2110 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2111 }
2112 }
2113
2114 addr_high = (tp->dev->dev_addr[0] +
2115 tp->dev->dev_addr[1] +
2116 tp->dev->dev_addr[2] +
2117 tp->dev->dev_addr[3] +
2118 tp->dev->dev_addr[4] +
2119 tp->dev->dev_addr[5]) &
2120 TX_BACKOFF_SEED_MASK;
2121 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2122}
2123
Michael Chanbc1c7562006-03-20 17:48:03 -08002124static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125{
2126 u32 misc_host_ctrl;
Matt Carlson0a459aa2008-11-03 16:54:15 -08002127 bool device_should_wake, do_low_power;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128
2129 /* Make sure register accesses (indirect or otherwise)
2130 * will function correctly.
2131 */
2132 pci_write_config_dword(tp->pdev,
2133 TG3PCI_MISC_HOST_CTRL,
2134 tp->misc_host_ctrl);
2135
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136 switch (state) {
Michael Chanbc1c7562006-03-20 17:48:03 -08002137 case PCI_D0:
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002138 pci_enable_wake(tp->pdev, state, false);
2139 pci_set_power_state(tp->pdev, PCI_D0);
Michael Chan8c6bda12005-04-21 17:09:08 -07002140
Michael Chan9d26e212006-12-07 00:21:14 -08002141 /* Switch out of Vaux if it is a NIC */
2142 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
Michael Chanb401e9e2005-12-19 16:27:04 -08002143 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144
2145 return 0;
2146
Michael Chanbc1c7562006-03-20 17:48:03 -08002147 case PCI_D1:
Michael Chanbc1c7562006-03-20 17:48:03 -08002148 case PCI_D2:
Michael Chanbc1c7562006-03-20 17:48:03 -08002149 case PCI_D3hot:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 break;
2151
2152 default:
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002153 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2154 tp->dev->name, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155 return -EINVAL;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002156 }
Matt Carlson5e7dfd02008-11-21 17:18:16 -08002157
2158 /* Restore the CLKREQ setting. */
2159 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2160 u16 lnkctl;
2161
2162 pci_read_config_word(tp->pdev,
2163 tp->pcie_cap + PCI_EXP_LNKCTL,
2164 &lnkctl);
2165 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2166 pci_write_config_word(tp->pdev,
2167 tp->pcie_cap + PCI_EXP_LNKCTL,
2168 lnkctl);
2169 }
2170
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2172 tw32(TG3PCI_MISC_HOST_CTRL,
2173 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2174
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002175 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2176 device_may_wakeup(&tp->pdev->dev) &&
2177 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2178
Matt Carlsondd477002008-05-25 23:45:58 -07002179 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
Matt Carlson0a459aa2008-11-03 16:54:15 -08002180 do_low_power = false;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002181 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2182 !tp->link_config.phy_is_low_power) {
2183 struct phy_device *phydev;
Matt Carlson0a459aa2008-11-03 16:54:15 -08002184 u32 phyid, advertising;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002185
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07002186 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002187
2188 tp->link_config.phy_is_low_power = 1;
2189
2190 tp->link_config.orig_speed = phydev->speed;
2191 tp->link_config.orig_duplex = phydev->duplex;
2192 tp->link_config.orig_autoneg = phydev->autoneg;
2193 tp->link_config.orig_advertising = phydev->advertising;
2194
2195 advertising = ADVERTISED_TP |
2196 ADVERTISED_Pause |
2197 ADVERTISED_Autoneg |
2198 ADVERTISED_10baseT_Half;
2199
2200 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002201 device_should_wake) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002202 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2203 advertising |=
2204 ADVERTISED_100baseT_Half |
2205 ADVERTISED_100baseT_Full |
2206 ADVERTISED_10baseT_Full;
2207 else
2208 advertising |= ADVERTISED_10baseT_Full;
2209 }
2210
2211 phydev->advertising = advertising;
2212
2213 phy_start_aneg(phydev);
Matt Carlson0a459aa2008-11-03 16:54:15 -08002214
2215 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2216 if (phyid != TG3_PHY_ID_BCMAC131) {
2217 phyid &= TG3_PHY_OUI_MASK;
2218 if (phyid == TG3_PHY_OUI_1 &&
2219 phyid == TG3_PHY_OUI_2 &&
2220 phyid == TG3_PHY_OUI_3)
2221 do_low_power = true;
2222 }
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002223 }
Matt Carlsondd477002008-05-25 23:45:58 -07002224 } else {
Matt Carlson0a459aa2008-11-03 16:54:15 -08002225 do_low_power = false;
2226
Matt Carlsondd477002008-05-25 23:45:58 -07002227 if (tp->link_config.phy_is_low_power == 0) {
2228 tp->link_config.phy_is_low_power = 1;
2229 tp->link_config.orig_speed = tp->link_config.speed;
2230 tp->link_config.orig_duplex = tp->link_config.duplex;
2231 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2232 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233
Matt Carlsondd477002008-05-25 23:45:58 -07002234 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2235 tp->link_config.speed = SPEED_10;
2236 tp->link_config.duplex = DUPLEX_HALF;
2237 tp->link_config.autoneg = AUTONEG_ENABLE;
2238 tg3_setup_phy(tp, 0);
2239 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 }
2241
Matt Carlson3f007892008-11-03 16:51:36 -08002242 __tg3_set_mac_addr(tp, 0);
2243
Michael Chanb5d37722006-09-27 16:06:21 -07002244 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2245 u32 val;
2246
2247 val = tr32(GRC_VCPU_EXT_CTRL);
2248 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2249 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chan6921d202005-12-13 21:15:53 -08002250 int i;
2251 u32 val;
2252
2253 for (i = 0; i < 200; i++) {
2254 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2255 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2256 break;
2257 msleep(1);
2258 }
2259 }
Gary Zambranoa85feb82007-05-05 11:52:19 -07002260 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2261 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2262 WOL_DRV_STATE_SHUTDOWN |
2263 WOL_DRV_WOL |
2264 WOL_SET_MAGIC_PKT);
Michael Chan6921d202005-12-13 21:15:53 -08002265
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002266 if (device_should_wake) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 u32 mac_mode;
2268
2269 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
Matt Carlson0a459aa2008-11-03 16:54:15 -08002270 if (do_low_power) {
Matt Carlsondd477002008-05-25 23:45:58 -07002271 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2272 udelay(40);
2273 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274
Michael Chan3f7045c2006-09-27 16:02:29 -07002275 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2276 mac_mode = MAC_MODE_PORT_MODE_GMII;
2277 else
2278 mac_mode = MAC_MODE_PORT_MODE_MII;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002280 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2281 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2282 ASIC_REV_5700) {
2283 u32 speed = (tp->tg3_flags &
2284 TG3_FLAG_WOL_SPEED_100MB) ?
2285 SPEED_100 : SPEED_10;
2286 if (tg3_5700_link_polarity(tp, speed))
2287 mac_mode |= MAC_MODE_LINK_POLARITY;
2288 else
2289 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2290 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 } else {
2292 mac_mode = MAC_MODE_PORT_MODE_TBI;
2293 }
2294
John W. Linvillecbf46852005-04-21 17:01:29 -07002295 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296 tw32(MAC_LED_CTRL, tp->led_ctrl);
2297
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002298 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2299 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2300 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2301 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2302 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2303 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304
Matt Carlson3bda1252008-08-15 14:08:22 -07002305 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2306 mac_mode |= tp->mac_mode &
2307 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2308 if (mac_mode & MAC_MODE_APE_TX_EN)
2309 mac_mode |= MAC_MODE_TDE_ENABLE;
2310 }
2311
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312 tw32_f(MAC_MODE, mac_mode);
2313 udelay(100);
2314
2315 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2316 udelay(10);
2317 }
2318
2319 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2320 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2321 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2322 u32 base_val;
2323
2324 base_val = tp->pci_clock_ctrl;
2325 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2326 CLOCK_CTRL_TXCLK_DISABLE);
2327
Michael Chanb401e9e2005-12-19 16:27:04 -08002328 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2329 CLOCK_CTRL_PWRDOWN_PLL133, 40);
Michael Chand7b0a852007-02-13 12:17:38 -08002330 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
Matt Carlson795d01c2007-10-07 23:28:17 -07002331 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
Michael Chand7b0a852007-02-13 12:17:38 -08002332 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
Michael Chan4cf78e42005-07-25 12:29:19 -07002333 /* do nothing */
Michael Chan85e94ce2005-04-21 17:05:28 -07002334 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2336 u32 newbits1, newbits2;
2337
2338 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2339 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2340 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2341 CLOCK_CTRL_TXCLK_DISABLE |
2342 CLOCK_CTRL_ALTCLK);
2343 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2344 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2345 newbits1 = CLOCK_CTRL_625_CORE;
2346 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2347 } else {
2348 newbits1 = CLOCK_CTRL_ALTCLK;
2349 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2350 }
2351
Michael Chanb401e9e2005-12-19 16:27:04 -08002352 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2353 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354
Michael Chanb401e9e2005-12-19 16:27:04 -08002355 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2356 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357
2358 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2359 u32 newbits3;
2360
2361 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2362 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2363 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2364 CLOCK_CTRL_TXCLK_DISABLE |
2365 CLOCK_CTRL_44MHZ_CORE);
2366 } else {
2367 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2368 }
2369
Michael Chanb401e9e2005-12-19 16:27:04 -08002370 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2371 tp->pci_clock_ctrl | newbits3, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372 }
2373 }
2374
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002375 if (!(device_should_wake) &&
Matt Carlson0d3031d2007-10-10 18:02:43 -07002376 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
2377 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
Matt Carlson0a459aa2008-11-03 16:54:15 -08002378 tg3_power_down_phy(tp, do_low_power);
Michael Chan6921d202005-12-13 21:15:53 -08002379
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380 tg3_frob_aux_power(tp);
2381
2382 /* Workaround for unstable PLL clock */
2383 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2384 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2385 u32 val = tr32(0x7d00);
2386
2387 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2388 tw32(0x7d00, val);
Michael Chan6921d202005-12-13 21:15:53 -08002389 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chanec41c7d2006-01-17 02:40:55 -08002390 int err;
2391
2392 err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393 tg3_halt_cpu(tp, RX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -08002394 if (!err)
2395 tg3_nvram_unlock(tp);
Michael Chan6921d202005-12-13 21:15:53 -08002396 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397 }
2398
Michael Chanbbadf502006-04-06 21:46:34 -07002399 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2400
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002401 if (device_should_wake)
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002402 pci_enable_wake(tp->pdev, state, true);
2403
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404 /* Finally, set the new power state. */
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002405 pci_set_power_state(tp->pdev, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407 return 0;
2408}
2409
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2411{
2412 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2413 case MII_TG3_AUX_STAT_10HALF:
2414 *speed = SPEED_10;
2415 *duplex = DUPLEX_HALF;
2416 break;
2417
2418 case MII_TG3_AUX_STAT_10FULL:
2419 *speed = SPEED_10;
2420 *duplex = DUPLEX_FULL;
2421 break;
2422
2423 case MII_TG3_AUX_STAT_100HALF:
2424 *speed = SPEED_100;
2425 *duplex = DUPLEX_HALF;
2426 break;
2427
2428 case MII_TG3_AUX_STAT_100FULL:
2429 *speed = SPEED_100;
2430 *duplex = DUPLEX_FULL;
2431 break;
2432
2433 case MII_TG3_AUX_STAT_1000HALF:
2434 *speed = SPEED_1000;
2435 *duplex = DUPLEX_HALF;
2436 break;
2437
2438 case MII_TG3_AUX_STAT_1000FULL:
2439 *speed = SPEED_1000;
2440 *duplex = DUPLEX_FULL;
2441 break;
2442
2443 default:
Michael Chan715116a2006-09-27 16:09:25 -07002444 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2445 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2446 SPEED_10;
2447 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2448 DUPLEX_HALF;
2449 break;
2450 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 *speed = SPEED_INVALID;
2452 *duplex = DUPLEX_INVALID;
2453 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002454 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455}
2456
2457static void tg3_phy_copper_begin(struct tg3 *tp)
2458{
2459 u32 new_adv;
2460 int i;
2461
2462 if (tp->link_config.phy_is_low_power) {
2463 /* Entering low power mode. Disable gigabit and
2464 * 100baseT advertisements.
2465 */
2466 tg3_writephy(tp, MII_TG3_CTRL, 0);
2467
2468 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2469 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2470 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2471 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2472
2473 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2474 } else if (tp->link_config.speed == SPEED_INVALID) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2476 tp->link_config.advertising &=
2477 ~(ADVERTISED_1000baseT_Half |
2478 ADVERTISED_1000baseT_Full);
2479
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002480 new_adv = ADVERTISE_CSMA;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2482 new_adv |= ADVERTISE_10HALF;
2483 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2484 new_adv |= ADVERTISE_10FULL;
2485 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2486 new_adv |= ADVERTISE_100HALF;
2487 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2488 new_adv |= ADVERTISE_100FULL;
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002489
2490 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2491
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2493
2494 if (tp->link_config.advertising &
2495 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2496 new_adv = 0;
2497 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2498 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2499 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2500 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2501 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2502 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2503 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2504 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2505 MII_TG3_CTRL_ENABLE_AS_MASTER);
2506 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2507 } else {
2508 tg3_writephy(tp, MII_TG3_CTRL, 0);
2509 }
2510 } else {
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002511 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2512 new_adv |= ADVERTISE_CSMA;
2513
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514 /* Asking for a specific link mode. */
2515 if (tp->link_config.speed == SPEED_1000) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2517
2518 if (tp->link_config.duplex == DUPLEX_FULL)
2519 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2520 else
2521 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2522 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2523 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2524 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2525 MII_TG3_CTRL_ENABLE_AS_MASTER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527 if (tp->link_config.speed == SPEED_100) {
2528 if (tp->link_config.duplex == DUPLEX_FULL)
2529 new_adv |= ADVERTISE_100FULL;
2530 else
2531 new_adv |= ADVERTISE_100HALF;
2532 } else {
2533 if (tp->link_config.duplex == DUPLEX_FULL)
2534 new_adv |= ADVERTISE_10FULL;
2535 else
2536 new_adv |= ADVERTISE_10HALF;
2537 }
2538 tg3_writephy(tp, MII_ADVERTISE, new_adv);
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002539
2540 new_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541 }
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002542
2543 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544 }
2545
2546 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2547 tp->link_config.speed != SPEED_INVALID) {
2548 u32 bmcr, orig_bmcr;
2549
2550 tp->link_config.active_speed = tp->link_config.speed;
2551 tp->link_config.active_duplex = tp->link_config.duplex;
2552
2553 bmcr = 0;
2554 switch (tp->link_config.speed) {
2555 default:
2556 case SPEED_10:
2557 break;
2558
2559 case SPEED_100:
2560 bmcr |= BMCR_SPEED100;
2561 break;
2562
2563 case SPEED_1000:
2564 bmcr |= TG3_BMCR_SPEED1000;
2565 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002566 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567
2568 if (tp->link_config.duplex == DUPLEX_FULL)
2569 bmcr |= BMCR_FULLDPLX;
2570
2571 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2572 (bmcr != orig_bmcr)) {
2573 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2574 for (i = 0; i < 1500; i++) {
2575 u32 tmp;
2576
2577 udelay(10);
2578 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2579 tg3_readphy(tp, MII_BMSR, &tmp))
2580 continue;
2581 if (!(tmp & BMSR_LSTATUS)) {
2582 udelay(40);
2583 break;
2584 }
2585 }
2586 tg3_writephy(tp, MII_BMCR, bmcr);
2587 udelay(40);
2588 }
2589 } else {
2590 tg3_writephy(tp, MII_BMCR,
2591 BMCR_ANENABLE | BMCR_ANRESTART);
2592 }
2593}
2594
2595static int tg3_init_5401phy_dsp(struct tg3 *tp)
2596{
2597 int err;
2598
2599 /* Turn off tap power management. */
2600 /* Set Extended packet length bit */
2601 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2602
2603 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2604 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2605
2606 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2607 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2608
2609 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2610 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2611
2612 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2613 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2614
2615 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2616 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2617
2618 udelay(40);
2619
2620 return err;
2621}
2622
Michael Chan3600d912006-12-07 00:21:48 -08002623static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624{
Michael Chan3600d912006-12-07 00:21:48 -08002625 u32 adv_reg, all_mask = 0;
2626
2627 if (mask & ADVERTISED_10baseT_Half)
2628 all_mask |= ADVERTISE_10HALF;
2629 if (mask & ADVERTISED_10baseT_Full)
2630 all_mask |= ADVERTISE_10FULL;
2631 if (mask & ADVERTISED_100baseT_Half)
2632 all_mask |= ADVERTISE_100HALF;
2633 if (mask & ADVERTISED_100baseT_Full)
2634 all_mask |= ADVERTISE_100FULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002635
2636 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2637 return 0;
2638
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639 if ((adv_reg & all_mask) != all_mask)
2640 return 0;
2641 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2642 u32 tg3_ctrl;
2643
Michael Chan3600d912006-12-07 00:21:48 -08002644 all_mask = 0;
2645 if (mask & ADVERTISED_1000baseT_Half)
2646 all_mask |= ADVERTISE_1000HALF;
2647 if (mask & ADVERTISED_1000baseT_Full)
2648 all_mask |= ADVERTISE_1000FULL;
2649
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2651 return 0;
2652
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 if ((tg3_ctrl & all_mask) != all_mask)
2654 return 0;
2655 }
2656 return 1;
2657}
2658
Matt Carlsonef167e22007-12-20 20:10:01 -08002659static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2660{
2661 u32 curadv, reqadv;
2662
2663 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2664 return 1;
2665
2666 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2667 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2668
2669 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2670 if (curadv != reqadv)
2671 return 0;
2672
2673 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2674 tg3_readphy(tp, MII_LPA, rmtadv);
2675 } else {
2676 /* Reprogram the advertisement register, even if it
2677 * does not affect the current link. If the link
2678 * gets renegotiated in the future, we can save an
2679 * additional renegotiation cycle by advertising
2680 * it correctly in the first place.
2681 */
2682 if (curadv != reqadv) {
2683 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2684 ADVERTISE_PAUSE_ASYM);
2685 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2686 }
2687 }
2688
2689 return 1;
2690}
2691
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2693{
2694 int current_link_up;
2695 u32 bmsr, dummy;
Matt Carlsonef167e22007-12-20 20:10:01 -08002696 u32 lcl_adv, rmt_adv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697 u16 current_speed;
2698 u8 current_duplex;
2699 int i, err;
2700
2701 tw32(MAC_EVENT, 0);
2702
2703 tw32_f(MAC_STATUS,
2704 (MAC_STATUS_SYNC_CHANGED |
2705 MAC_STATUS_CFG_CHANGED |
2706 MAC_STATUS_MI_COMPLETION |
2707 MAC_STATUS_LNKSTATE_CHANGED));
2708 udelay(40);
2709
Matt Carlson8ef21422008-05-02 16:47:53 -07002710 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2711 tw32_f(MAC_MI_MODE,
2712 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2713 udelay(80);
2714 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715
2716 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2717
2718 /* Some third-party PHYs need to be reset on link going
2719 * down.
2720 */
2721 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2722 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2723 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2724 netif_carrier_ok(tp->dev)) {
2725 tg3_readphy(tp, MII_BMSR, &bmsr);
2726 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2727 !(bmsr & BMSR_LSTATUS))
2728 force_reset = 1;
2729 }
2730 if (force_reset)
2731 tg3_phy_reset(tp);
2732
2733 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2734 tg3_readphy(tp, MII_BMSR, &bmsr);
2735 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2736 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2737 bmsr = 0;
2738
2739 if (!(bmsr & BMSR_LSTATUS)) {
2740 err = tg3_init_5401phy_dsp(tp);
2741 if (err)
2742 return err;
2743
2744 tg3_readphy(tp, MII_BMSR, &bmsr);
2745 for (i = 0; i < 1000; i++) {
2746 udelay(10);
2747 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2748 (bmsr & BMSR_LSTATUS)) {
2749 udelay(40);
2750 break;
2751 }
2752 }
2753
2754 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2755 !(bmsr & BMSR_LSTATUS) &&
2756 tp->link_config.active_speed == SPEED_1000) {
2757 err = tg3_phy_reset(tp);
2758 if (!err)
2759 err = tg3_init_5401phy_dsp(tp);
2760 if (err)
2761 return err;
2762 }
2763 }
2764 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2765 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2766 /* 5701 {A0,B0} CRC bug workaround */
2767 tg3_writephy(tp, 0x15, 0x0a75);
2768 tg3_writephy(tp, 0x1c, 0x8c68);
2769 tg3_writephy(tp, 0x1c, 0x8d68);
2770 tg3_writephy(tp, 0x1c, 0x8c68);
2771 }
2772
2773 /* Clear pending interrupts... */
2774 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2775 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2776
2777 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2778 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
Michael Chan715116a2006-09-27 16:09:25 -07002779 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2781
2782 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2783 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2784 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2785 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2786 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2787 else
2788 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2789 }
2790
2791 current_link_up = 0;
2792 current_speed = SPEED_INVALID;
2793 current_duplex = DUPLEX_INVALID;
2794
2795 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2796 u32 val;
2797
2798 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2799 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2800 if (!(val & (1 << 10))) {
2801 val |= (1 << 10);
2802 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2803 goto relink;
2804 }
2805 }
2806
2807 bmsr = 0;
2808 for (i = 0; i < 100; i++) {
2809 tg3_readphy(tp, MII_BMSR, &bmsr);
2810 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2811 (bmsr & BMSR_LSTATUS))
2812 break;
2813 udelay(40);
2814 }
2815
2816 if (bmsr & BMSR_LSTATUS) {
2817 u32 aux_stat, bmcr;
2818
2819 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2820 for (i = 0; i < 2000; i++) {
2821 udelay(10);
2822 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2823 aux_stat)
2824 break;
2825 }
2826
2827 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2828 &current_speed,
2829 &current_duplex);
2830
2831 bmcr = 0;
2832 for (i = 0; i < 200; i++) {
2833 tg3_readphy(tp, MII_BMCR, &bmcr);
2834 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2835 continue;
2836 if (bmcr && bmcr != 0x7fff)
2837 break;
2838 udelay(10);
2839 }
2840
Matt Carlsonef167e22007-12-20 20:10:01 -08002841 lcl_adv = 0;
2842 rmt_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843
Matt Carlsonef167e22007-12-20 20:10:01 -08002844 tp->link_config.active_speed = current_speed;
2845 tp->link_config.active_duplex = current_duplex;
2846
2847 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2848 if ((bmcr & BMCR_ANENABLE) &&
2849 tg3_copper_is_advertising_all(tp,
2850 tp->link_config.advertising)) {
2851 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2852 &rmt_adv))
2853 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 }
2855 } else {
2856 if (!(bmcr & BMCR_ANENABLE) &&
2857 tp->link_config.speed == current_speed &&
Matt Carlsonef167e22007-12-20 20:10:01 -08002858 tp->link_config.duplex == current_duplex &&
2859 tp->link_config.flowctrl ==
2860 tp->link_config.active_flowctrl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862 }
2863 }
2864
Matt Carlsonef167e22007-12-20 20:10:01 -08002865 if (current_link_up == 1 &&
2866 tp->link_config.active_duplex == DUPLEX_FULL)
2867 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868 }
2869
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870relink:
Michael Chan6921d202005-12-13 21:15:53 -08002871 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872 u32 tmp;
2873
2874 tg3_phy_copper_begin(tp);
2875
2876 tg3_readphy(tp, MII_BMSR, &tmp);
2877 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2878 (tmp & BMSR_LSTATUS))
2879 current_link_up = 1;
2880 }
2881
2882 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2883 if (current_link_up == 1) {
2884 if (tp->link_config.active_speed == SPEED_100 ||
2885 tp->link_config.active_speed == SPEED_10)
2886 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2887 else
2888 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2889 } else
2890 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2891
2892 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2893 if (tp->link_config.active_duplex == DUPLEX_HALF)
2894 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2895
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002897 if (current_link_up == 1 &&
2898 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002900 else
2901 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902 }
2903
2904 /* ??? Without this setting Netgear GA302T PHY does not
2905 * ??? send/receive packets...
2906 */
2907 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2908 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2909 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2910 tw32_f(MAC_MI_MODE, tp->mi_mode);
2911 udelay(80);
2912 }
2913
2914 tw32_f(MAC_MODE, tp->mac_mode);
2915 udelay(40);
2916
2917 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2918 /* Polled via timer. */
2919 tw32_f(MAC_EVENT, 0);
2920 } else {
2921 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2922 }
2923 udelay(40);
2924
2925 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2926 current_link_up == 1 &&
2927 tp->link_config.active_speed == SPEED_1000 &&
2928 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2929 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2930 udelay(120);
2931 tw32_f(MAC_STATUS,
2932 (MAC_STATUS_SYNC_CHANGED |
2933 MAC_STATUS_CFG_CHANGED));
2934 udelay(40);
2935 tg3_write_mem(tp,
2936 NIC_SRAM_FIRMWARE_MBOX,
2937 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2938 }
2939
Matt Carlson5e7dfd02008-11-21 17:18:16 -08002940 /* Prevent send BD corruption. */
2941 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2942 u16 oldlnkctl, newlnkctl;
2943
2944 pci_read_config_word(tp->pdev,
2945 tp->pcie_cap + PCI_EXP_LNKCTL,
2946 &oldlnkctl);
2947 if (tp->link_config.active_speed == SPEED_100 ||
2948 tp->link_config.active_speed == SPEED_10)
2949 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
2950 else
2951 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
2952 if (newlnkctl != oldlnkctl)
2953 pci_write_config_word(tp->pdev,
2954 tp->pcie_cap + PCI_EXP_LNKCTL,
2955 newlnkctl);
2956 }
2957
Linus Torvalds1da177e2005-04-16 15:20:36 -07002958 if (current_link_up != netif_carrier_ok(tp->dev)) {
2959 if (current_link_up)
2960 netif_carrier_on(tp->dev);
2961 else
2962 netif_carrier_off(tp->dev);
2963 tg3_link_report(tp);
2964 }
2965
2966 return 0;
2967}
2968
2969struct tg3_fiber_aneginfo {
2970 int state;
2971#define ANEG_STATE_UNKNOWN 0
2972#define ANEG_STATE_AN_ENABLE 1
2973#define ANEG_STATE_RESTART_INIT 2
2974#define ANEG_STATE_RESTART 3
2975#define ANEG_STATE_DISABLE_LINK_OK 4
2976#define ANEG_STATE_ABILITY_DETECT_INIT 5
2977#define ANEG_STATE_ABILITY_DETECT 6
2978#define ANEG_STATE_ACK_DETECT_INIT 7
2979#define ANEG_STATE_ACK_DETECT 8
2980#define ANEG_STATE_COMPLETE_ACK_INIT 9
2981#define ANEG_STATE_COMPLETE_ACK 10
2982#define ANEG_STATE_IDLE_DETECT_INIT 11
2983#define ANEG_STATE_IDLE_DETECT 12
2984#define ANEG_STATE_LINK_OK 13
2985#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2986#define ANEG_STATE_NEXT_PAGE_WAIT 15
2987
2988 u32 flags;
2989#define MR_AN_ENABLE 0x00000001
2990#define MR_RESTART_AN 0x00000002
2991#define MR_AN_COMPLETE 0x00000004
2992#define MR_PAGE_RX 0x00000008
2993#define MR_NP_LOADED 0x00000010
2994#define MR_TOGGLE_TX 0x00000020
2995#define MR_LP_ADV_FULL_DUPLEX 0x00000040
2996#define MR_LP_ADV_HALF_DUPLEX 0x00000080
2997#define MR_LP_ADV_SYM_PAUSE 0x00000100
2998#define MR_LP_ADV_ASYM_PAUSE 0x00000200
2999#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3000#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3001#define MR_LP_ADV_NEXT_PAGE 0x00001000
3002#define MR_TOGGLE_RX 0x00002000
3003#define MR_NP_RX 0x00004000
3004
3005#define MR_LINK_OK 0x80000000
3006
3007 unsigned long link_time, cur_time;
3008
3009 u32 ability_match_cfg;
3010 int ability_match_count;
3011
3012 char ability_match, idle_match, ack_match;
3013
3014 u32 txconfig, rxconfig;
3015#define ANEG_CFG_NP 0x00000080
3016#define ANEG_CFG_ACK 0x00000040
3017#define ANEG_CFG_RF2 0x00000020
3018#define ANEG_CFG_RF1 0x00000010
3019#define ANEG_CFG_PS2 0x00000001
3020#define ANEG_CFG_PS1 0x00008000
3021#define ANEG_CFG_HD 0x00004000
3022#define ANEG_CFG_FD 0x00002000
3023#define ANEG_CFG_INVAL 0x00001f06
3024
3025};
3026#define ANEG_OK 0
3027#define ANEG_DONE 1
3028#define ANEG_TIMER_ENAB 2
3029#define ANEG_FAILED -1
3030
3031#define ANEG_STATE_SETTLE_TIME 10000
3032
3033static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3034 struct tg3_fiber_aneginfo *ap)
3035{
Matt Carlson5be73b42007-12-20 20:09:29 -08003036 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037 unsigned long delta;
3038 u32 rx_cfg_reg;
3039 int ret;
3040
3041 if (ap->state == ANEG_STATE_UNKNOWN) {
3042 ap->rxconfig = 0;
3043 ap->link_time = 0;
3044 ap->cur_time = 0;
3045 ap->ability_match_cfg = 0;
3046 ap->ability_match_count = 0;
3047 ap->ability_match = 0;
3048 ap->idle_match = 0;
3049 ap->ack_match = 0;
3050 }
3051 ap->cur_time++;
3052
3053 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3054 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3055
3056 if (rx_cfg_reg != ap->ability_match_cfg) {
3057 ap->ability_match_cfg = rx_cfg_reg;
3058 ap->ability_match = 0;
3059 ap->ability_match_count = 0;
3060 } else {
3061 if (++ap->ability_match_count > 1) {
3062 ap->ability_match = 1;
3063 ap->ability_match_cfg = rx_cfg_reg;
3064 }
3065 }
3066 if (rx_cfg_reg & ANEG_CFG_ACK)
3067 ap->ack_match = 1;
3068 else
3069 ap->ack_match = 0;
3070
3071 ap->idle_match = 0;
3072 } else {
3073 ap->idle_match = 1;
3074 ap->ability_match_cfg = 0;
3075 ap->ability_match_count = 0;
3076 ap->ability_match = 0;
3077 ap->ack_match = 0;
3078
3079 rx_cfg_reg = 0;
3080 }
3081
3082 ap->rxconfig = rx_cfg_reg;
3083 ret = ANEG_OK;
3084
3085 switch(ap->state) {
3086 case ANEG_STATE_UNKNOWN:
3087 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3088 ap->state = ANEG_STATE_AN_ENABLE;
3089
3090 /* fallthru */
3091 case ANEG_STATE_AN_ENABLE:
3092 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3093 if (ap->flags & MR_AN_ENABLE) {
3094 ap->link_time = 0;
3095 ap->cur_time = 0;
3096 ap->ability_match_cfg = 0;
3097 ap->ability_match_count = 0;
3098 ap->ability_match = 0;
3099 ap->idle_match = 0;
3100 ap->ack_match = 0;
3101
3102 ap->state = ANEG_STATE_RESTART_INIT;
3103 } else {
3104 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3105 }
3106 break;
3107
3108 case ANEG_STATE_RESTART_INIT:
3109 ap->link_time = ap->cur_time;
3110 ap->flags &= ~(MR_NP_LOADED);
3111 ap->txconfig = 0;
3112 tw32(MAC_TX_AUTO_NEG, 0);
3113 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3114 tw32_f(MAC_MODE, tp->mac_mode);
3115 udelay(40);
3116
3117 ret = ANEG_TIMER_ENAB;
3118 ap->state = ANEG_STATE_RESTART;
3119
3120 /* fallthru */
3121 case ANEG_STATE_RESTART:
3122 delta = ap->cur_time - ap->link_time;
3123 if (delta > ANEG_STATE_SETTLE_TIME) {
3124 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3125 } else {
3126 ret = ANEG_TIMER_ENAB;
3127 }
3128 break;
3129
3130 case ANEG_STATE_DISABLE_LINK_OK:
3131 ret = ANEG_DONE;
3132 break;
3133
3134 case ANEG_STATE_ABILITY_DETECT_INIT:
3135 ap->flags &= ~(MR_TOGGLE_TX);
Matt Carlson5be73b42007-12-20 20:09:29 -08003136 ap->txconfig = ANEG_CFG_FD;
3137 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3138 if (flowctrl & ADVERTISE_1000XPAUSE)
3139 ap->txconfig |= ANEG_CFG_PS1;
3140 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3141 ap->txconfig |= ANEG_CFG_PS2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003142 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3143 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3144 tw32_f(MAC_MODE, tp->mac_mode);
3145 udelay(40);
3146
3147 ap->state = ANEG_STATE_ABILITY_DETECT;
3148 break;
3149
3150 case ANEG_STATE_ABILITY_DETECT:
3151 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3152 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3153 }
3154 break;
3155
3156 case ANEG_STATE_ACK_DETECT_INIT:
3157 ap->txconfig |= ANEG_CFG_ACK;
3158 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3159 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3160 tw32_f(MAC_MODE, tp->mac_mode);
3161 udelay(40);
3162
3163 ap->state = ANEG_STATE_ACK_DETECT;
3164
3165 /* fallthru */
3166 case ANEG_STATE_ACK_DETECT:
3167 if (ap->ack_match != 0) {
3168 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3169 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3170 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3171 } else {
3172 ap->state = ANEG_STATE_AN_ENABLE;
3173 }
3174 } else if (ap->ability_match != 0 &&
3175 ap->rxconfig == 0) {
3176 ap->state = ANEG_STATE_AN_ENABLE;
3177 }
3178 break;
3179
3180 case ANEG_STATE_COMPLETE_ACK_INIT:
3181 if (ap->rxconfig & ANEG_CFG_INVAL) {
3182 ret = ANEG_FAILED;
3183 break;
3184 }
3185 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3186 MR_LP_ADV_HALF_DUPLEX |
3187 MR_LP_ADV_SYM_PAUSE |
3188 MR_LP_ADV_ASYM_PAUSE |
3189 MR_LP_ADV_REMOTE_FAULT1 |
3190 MR_LP_ADV_REMOTE_FAULT2 |
3191 MR_LP_ADV_NEXT_PAGE |
3192 MR_TOGGLE_RX |
3193 MR_NP_RX);
3194 if (ap->rxconfig & ANEG_CFG_FD)
3195 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3196 if (ap->rxconfig & ANEG_CFG_HD)
3197 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3198 if (ap->rxconfig & ANEG_CFG_PS1)
3199 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3200 if (ap->rxconfig & ANEG_CFG_PS2)
3201 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3202 if (ap->rxconfig & ANEG_CFG_RF1)
3203 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3204 if (ap->rxconfig & ANEG_CFG_RF2)
3205 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3206 if (ap->rxconfig & ANEG_CFG_NP)
3207 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3208
3209 ap->link_time = ap->cur_time;
3210
3211 ap->flags ^= (MR_TOGGLE_TX);
3212 if (ap->rxconfig & 0x0008)
3213 ap->flags |= MR_TOGGLE_RX;
3214 if (ap->rxconfig & ANEG_CFG_NP)
3215 ap->flags |= MR_NP_RX;
3216 ap->flags |= MR_PAGE_RX;
3217
3218 ap->state = ANEG_STATE_COMPLETE_ACK;
3219 ret = ANEG_TIMER_ENAB;
3220 break;
3221
3222 case ANEG_STATE_COMPLETE_ACK:
3223 if (ap->ability_match != 0 &&
3224 ap->rxconfig == 0) {
3225 ap->state = ANEG_STATE_AN_ENABLE;
3226 break;
3227 }
3228 delta = ap->cur_time - ap->link_time;
3229 if (delta > ANEG_STATE_SETTLE_TIME) {
3230 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3231 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3232 } else {
3233 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3234 !(ap->flags & MR_NP_RX)) {
3235 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3236 } else {
3237 ret = ANEG_FAILED;
3238 }
3239 }
3240 }
3241 break;
3242
3243 case ANEG_STATE_IDLE_DETECT_INIT:
3244 ap->link_time = ap->cur_time;
3245 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3246 tw32_f(MAC_MODE, tp->mac_mode);
3247 udelay(40);
3248
3249 ap->state = ANEG_STATE_IDLE_DETECT;
3250 ret = ANEG_TIMER_ENAB;
3251 break;
3252
3253 case ANEG_STATE_IDLE_DETECT:
3254 if (ap->ability_match != 0 &&
3255 ap->rxconfig == 0) {
3256 ap->state = ANEG_STATE_AN_ENABLE;
3257 break;
3258 }
3259 delta = ap->cur_time - ap->link_time;
3260 if (delta > ANEG_STATE_SETTLE_TIME) {
3261 /* XXX another gem from the Broadcom driver :( */
3262 ap->state = ANEG_STATE_LINK_OK;
3263 }
3264 break;
3265
3266 case ANEG_STATE_LINK_OK:
3267 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3268 ret = ANEG_DONE;
3269 break;
3270
3271 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3272 /* ??? unimplemented */
3273 break;
3274
3275 case ANEG_STATE_NEXT_PAGE_WAIT:
3276 /* ??? unimplemented */
3277 break;
3278
3279 default:
3280 ret = ANEG_FAILED;
3281 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07003282 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283
3284 return ret;
3285}
3286
Matt Carlson5be73b42007-12-20 20:09:29 -08003287static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003288{
3289 int res = 0;
3290 struct tg3_fiber_aneginfo aninfo;
3291 int status = ANEG_FAILED;
3292 unsigned int tick;
3293 u32 tmp;
3294
3295 tw32_f(MAC_TX_AUTO_NEG, 0);
3296
3297 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3298 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3299 udelay(40);
3300
3301 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3302 udelay(40);
3303
3304 memset(&aninfo, 0, sizeof(aninfo));
3305 aninfo.flags |= MR_AN_ENABLE;
3306 aninfo.state = ANEG_STATE_UNKNOWN;
3307 aninfo.cur_time = 0;
3308 tick = 0;
3309 while (++tick < 195000) {
3310 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3311 if (status == ANEG_DONE || status == ANEG_FAILED)
3312 break;
3313
3314 udelay(1);
3315 }
3316
3317 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3318 tw32_f(MAC_MODE, tp->mac_mode);
3319 udelay(40);
3320
Matt Carlson5be73b42007-12-20 20:09:29 -08003321 *txflags = aninfo.txconfig;
3322 *rxflags = aninfo.flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323
3324 if (status == ANEG_DONE &&
3325 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3326 MR_LP_ADV_FULL_DUPLEX)))
3327 res = 1;
3328
3329 return res;
3330}
3331
3332static void tg3_init_bcm8002(struct tg3 *tp)
3333{
3334 u32 mac_status = tr32(MAC_STATUS);
3335 int i;
3336
3337 /* Reset when initting first time or we have a link. */
3338 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3339 !(mac_status & MAC_STATUS_PCS_SYNCED))
3340 return;
3341
3342 /* Set PLL lock range. */
3343 tg3_writephy(tp, 0x16, 0x8007);
3344
3345 /* SW reset */
3346 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3347
3348 /* Wait for reset to complete. */
3349 /* XXX schedule_timeout() ... */
3350 for (i = 0; i < 500; i++)
3351 udelay(10);
3352
3353 /* Config mode; select PMA/Ch 1 regs. */
3354 tg3_writephy(tp, 0x10, 0x8411);
3355
3356 /* Enable auto-lock and comdet, select txclk for tx. */
3357 tg3_writephy(tp, 0x11, 0x0a10);
3358
3359 tg3_writephy(tp, 0x18, 0x00a0);
3360 tg3_writephy(tp, 0x16, 0x41ff);
3361
3362 /* Assert and deassert POR. */
3363 tg3_writephy(tp, 0x13, 0x0400);
3364 udelay(40);
3365 tg3_writephy(tp, 0x13, 0x0000);
3366
3367 tg3_writephy(tp, 0x11, 0x0a50);
3368 udelay(40);
3369 tg3_writephy(tp, 0x11, 0x0a10);
3370
3371 /* Wait for signal to stabilize */
3372 /* XXX schedule_timeout() ... */
3373 for (i = 0; i < 15000; i++)
3374 udelay(10);
3375
3376 /* Deselect the channel register so we can read the PHYID
3377 * later.
3378 */
3379 tg3_writephy(tp, 0x10, 0x8011);
3380}
3381
3382static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3383{
Matt Carlson82cd3d12007-12-20 20:09:00 -08003384 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003385 u32 sg_dig_ctrl, sg_dig_status;
3386 u32 serdes_cfg, expected_sg_dig_ctrl;
3387 int workaround, port_a;
3388 int current_link_up;
3389
3390 serdes_cfg = 0;
3391 expected_sg_dig_ctrl = 0;
3392 workaround = 0;
3393 port_a = 1;
3394 current_link_up = 0;
3395
3396 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3397 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3398 workaround = 1;
3399 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3400 port_a = 0;
3401
3402 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3403 /* preserve bits 20-23 for voltage regulator */
3404 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3405 }
3406
3407 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3408
3409 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003410 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411 if (workaround) {
3412 u32 val = serdes_cfg;
3413
3414 if (port_a)
3415 val |= 0xc010000;
3416 else
3417 val |= 0x4010000;
3418 tw32_f(MAC_SERDES_CFG, val);
3419 }
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003420
3421 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003422 }
3423 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3424 tg3_setup_flow_control(tp, 0, 0);
3425 current_link_up = 1;
3426 }
3427 goto out;
3428 }
3429
3430 /* Want auto-negotiation. */
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003431 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003432
Matt Carlson82cd3d12007-12-20 20:09:00 -08003433 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3434 if (flowctrl & ADVERTISE_1000XPAUSE)
3435 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3436 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3437 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438
3439 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003440 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3441 tp->serdes_counter &&
3442 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3443 MAC_STATUS_RCVD_CFG)) ==
3444 MAC_STATUS_PCS_SYNCED)) {
3445 tp->serdes_counter--;
3446 current_link_up = 1;
3447 goto out;
3448 }
3449restart_autoneg:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450 if (workaround)
3451 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003452 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003453 udelay(5);
3454 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3455
Michael Chan3d3ebe72006-09-27 15:59:15 -07003456 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3457 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3459 MAC_STATUS_SIGNAL_DET)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003460 sg_dig_status = tr32(SG_DIG_STATUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003461 mac_status = tr32(MAC_STATUS);
3462
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003463 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464 (mac_status & MAC_STATUS_PCS_SYNCED)) {
Matt Carlson82cd3d12007-12-20 20:09:00 -08003465 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003466
Matt Carlson82cd3d12007-12-20 20:09:00 -08003467 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3468 local_adv |= ADVERTISE_1000XPAUSE;
3469 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3470 local_adv |= ADVERTISE_1000XPSE_ASYM;
3471
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003472 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08003473 remote_adv |= LPA_1000XPAUSE;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003474 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08003475 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476
3477 tg3_setup_flow_control(tp, local_adv, remote_adv);
3478 current_link_up = 1;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003479 tp->serdes_counter = 0;
3480 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003481 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003482 if (tp->serdes_counter)
3483 tp->serdes_counter--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484 else {
3485 if (workaround) {
3486 u32 val = serdes_cfg;
3487
3488 if (port_a)
3489 val |= 0xc010000;
3490 else
3491 val |= 0x4010000;
3492
3493 tw32_f(MAC_SERDES_CFG, val);
3494 }
3495
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003496 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497 udelay(40);
3498
3499 /* Link parallel detection - link is up */
3500 /* only if we have PCS_SYNC and not */
3501 /* receiving config code words */
3502 mac_status = tr32(MAC_STATUS);
3503 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3504 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3505 tg3_setup_flow_control(tp, 0, 0);
3506 current_link_up = 1;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003507 tp->tg3_flags2 |=
3508 TG3_FLG2_PARALLEL_DETECT;
3509 tp->serdes_counter =
3510 SERDES_PARALLEL_DET_TIMEOUT;
3511 } else
3512 goto restart_autoneg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003513 }
3514 }
Michael Chan3d3ebe72006-09-27 15:59:15 -07003515 } else {
3516 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3517 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003518 }
3519
3520out:
3521 return current_link_up;
3522}
3523
3524static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3525{
3526 int current_link_up = 0;
3527
Michael Chan5cf64b82007-05-05 12:11:21 -07003528 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003529 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003530
3531 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
Matt Carlson5be73b42007-12-20 20:09:29 -08003532 u32 txflags, rxflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003533 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003534
Matt Carlson5be73b42007-12-20 20:09:29 -08003535 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3536 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537
Matt Carlson5be73b42007-12-20 20:09:29 -08003538 if (txflags & ANEG_CFG_PS1)
3539 local_adv |= ADVERTISE_1000XPAUSE;
3540 if (txflags & ANEG_CFG_PS2)
3541 local_adv |= ADVERTISE_1000XPSE_ASYM;
3542
3543 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3544 remote_adv |= LPA_1000XPAUSE;
3545 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3546 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003547
3548 tg3_setup_flow_control(tp, local_adv, remote_adv);
3549
Linus Torvalds1da177e2005-04-16 15:20:36 -07003550 current_link_up = 1;
3551 }
3552 for (i = 0; i < 30; i++) {
3553 udelay(20);
3554 tw32_f(MAC_STATUS,
3555 (MAC_STATUS_SYNC_CHANGED |
3556 MAC_STATUS_CFG_CHANGED));
3557 udelay(40);
3558 if ((tr32(MAC_STATUS) &
3559 (MAC_STATUS_SYNC_CHANGED |
3560 MAC_STATUS_CFG_CHANGED)) == 0)
3561 break;
3562 }
3563
3564 mac_status = tr32(MAC_STATUS);
3565 if (current_link_up == 0 &&
3566 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3567 !(mac_status & MAC_STATUS_RCVD_CFG))
3568 current_link_up = 1;
3569 } else {
Matt Carlson5be73b42007-12-20 20:09:29 -08003570 tg3_setup_flow_control(tp, 0, 0);
3571
Linus Torvalds1da177e2005-04-16 15:20:36 -07003572 /* Forcing 1000FD link up. */
3573 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003574
3575 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3576 udelay(40);
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07003577
3578 tw32_f(MAC_MODE, tp->mac_mode);
3579 udelay(40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003580 }
3581
3582out:
3583 return current_link_up;
3584}
3585
3586static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3587{
3588 u32 orig_pause_cfg;
3589 u16 orig_active_speed;
3590 u8 orig_active_duplex;
3591 u32 mac_status;
3592 int current_link_up;
3593 int i;
3594
Matt Carlson8d018622007-12-20 20:05:44 -08003595 orig_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003596 orig_active_speed = tp->link_config.active_speed;
3597 orig_active_duplex = tp->link_config.active_duplex;
3598
3599 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3600 netif_carrier_ok(tp->dev) &&
3601 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3602 mac_status = tr32(MAC_STATUS);
3603 mac_status &= (MAC_STATUS_PCS_SYNCED |
3604 MAC_STATUS_SIGNAL_DET |
3605 MAC_STATUS_CFG_CHANGED |
3606 MAC_STATUS_RCVD_CFG);
3607 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3608 MAC_STATUS_SIGNAL_DET)) {
3609 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3610 MAC_STATUS_CFG_CHANGED));
3611 return 0;
3612 }
3613 }
3614
3615 tw32_f(MAC_TX_AUTO_NEG, 0);
3616
3617 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3618 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3619 tw32_f(MAC_MODE, tp->mac_mode);
3620 udelay(40);
3621
3622 if (tp->phy_id == PHY_ID_BCM8002)
3623 tg3_init_bcm8002(tp);
3624
3625 /* Enable link change event even when serdes polling. */
3626 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3627 udelay(40);
3628
3629 current_link_up = 0;
3630 mac_status = tr32(MAC_STATUS);
3631
3632 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3633 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3634 else
3635 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3636
Linus Torvalds1da177e2005-04-16 15:20:36 -07003637 tp->hw_status->status =
3638 (SD_STATUS_UPDATED |
3639 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3640
3641 for (i = 0; i < 100; i++) {
3642 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3643 MAC_STATUS_CFG_CHANGED));
3644 udelay(5);
3645 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
Michael Chan3d3ebe72006-09-27 15:59:15 -07003646 MAC_STATUS_CFG_CHANGED |
3647 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003648 break;
3649 }
3650
3651 mac_status = tr32(MAC_STATUS);
3652 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3653 current_link_up = 0;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003654 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3655 tp->serdes_counter == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003656 tw32_f(MAC_MODE, (tp->mac_mode |
3657 MAC_MODE_SEND_CONFIGS));
3658 udelay(1);
3659 tw32_f(MAC_MODE, tp->mac_mode);
3660 }
3661 }
3662
3663 if (current_link_up == 1) {
3664 tp->link_config.active_speed = SPEED_1000;
3665 tp->link_config.active_duplex = DUPLEX_FULL;
3666 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3667 LED_CTRL_LNKLED_OVERRIDE |
3668 LED_CTRL_1000MBPS_ON));
3669 } else {
3670 tp->link_config.active_speed = SPEED_INVALID;
3671 tp->link_config.active_duplex = DUPLEX_INVALID;
3672 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3673 LED_CTRL_LNKLED_OVERRIDE |
3674 LED_CTRL_TRAFFIC_OVERRIDE));
3675 }
3676
3677 if (current_link_up != netif_carrier_ok(tp->dev)) {
3678 if (current_link_up)
3679 netif_carrier_on(tp->dev);
3680 else
3681 netif_carrier_off(tp->dev);
3682 tg3_link_report(tp);
3683 } else {
Matt Carlson8d018622007-12-20 20:05:44 -08003684 u32 now_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003685 if (orig_pause_cfg != now_pause_cfg ||
3686 orig_active_speed != tp->link_config.active_speed ||
3687 orig_active_duplex != tp->link_config.active_duplex)
3688 tg3_link_report(tp);
3689 }
3690
3691 return 0;
3692}
3693
Michael Chan747e8f82005-07-25 12:33:22 -07003694static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3695{
3696 int current_link_up, err = 0;
3697 u32 bmsr, bmcr;
3698 u16 current_speed;
3699 u8 current_duplex;
Matt Carlsonef167e22007-12-20 20:10:01 -08003700 u32 local_adv, remote_adv;
Michael Chan747e8f82005-07-25 12:33:22 -07003701
3702 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3703 tw32_f(MAC_MODE, tp->mac_mode);
3704 udelay(40);
3705
3706 tw32(MAC_EVENT, 0);
3707
3708 tw32_f(MAC_STATUS,
3709 (MAC_STATUS_SYNC_CHANGED |
3710 MAC_STATUS_CFG_CHANGED |
3711 MAC_STATUS_MI_COMPLETION |
3712 MAC_STATUS_LNKSTATE_CHANGED));
3713 udelay(40);
3714
3715 if (force_reset)
3716 tg3_phy_reset(tp);
3717
3718 current_link_up = 0;
3719 current_speed = SPEED_INVALID;
3720 current_duplex = DUPLEX_INVALID;
3721
3722 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3723 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08003724 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3725 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3726 bmsr |= BMSR_LSTATUS;
3727 else
3728 bmsr &= ~BMSR_LSTATUS;
3729 }
Michael Chan747e8f82005-07-25 12:33:22 -07003730
3731 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3732
3733 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
Matt Carlson2bd3ed02008-06-09 15:39:55 -07003734 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
Michael Chan747e8f82005-07-25 12:33:22 -07003735 /* do nothing, just check for link up at the end */
3736 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3737 u32 adv, new_adv;
3738
3739 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3740 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3741 ADVERTISE_1000XPAUSE |
3742 ADVERTISE_1000XPSE_ASYM |
3743 ADVERTISE_SLCT);
3744
Matt Carlsonba4d07a2007-12-20 20:08:00 -08003745 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
Michael Chan747e8f82005-07-25 12:33:22 -07003746
3747 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3748 new_adv |= ADVERTISE_1000XHALF;
3749 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3750 new_adv |= ADVERTISE_1000XFULL;
3751
3752 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3753 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3754 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3755 tg3_writephy(tp, MII_BMCR, bmcr);
3756
3757 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
Michael Chan3d3ebe72006-09-27 15:59:15 -07003758 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
Michael Chan747e8f82005-07-25 12:33:22 -07003759 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3760
3761 return err;
3762 }
3763 } else {
3764 u32 new_bmcr;
3765
3766 bmcr &= ~BMCR_SPEED1000;
3767 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3768
3769 if (tp->link_config.duplex == DUPLEX_FULL)
3770 new_bmcr |= BMCR_FULLDPLX;
3771
3772 if (new_bmcr != bmcr) {
3773 /* BMCR_SPEED1000 is a reserved bit that needs
3774 * to be set on write.
3775 */
3776 new_bmcr |= BMCR_SPEED1000;
3777
3778 /* Force a linkdown */
3779 if (netif_carrier_ok(tp->dev)) {
3780 u32 adv;
3781
3782 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3783 adv &= ~(ADVERTISE_1000XFULL |
3784 ADVERTISE_1000XHALF |
3785 ADVERTISE_SLCT);
3786 tg3_writephy(tp, MII_ADVERTISE, adv);
3787 tg3_writephy(tp, MII_BMCR, bmcr |
3788 BMCR_ANRESTART |
3789 BMCR_ANENABLE);
3790 udelay(10);
3791 netif_carrier_off(tp->dev);
3792 }
3793 tg3_writephy(tp, MII_BMCR, new_bmcr);
3794 bmcr = new_bmcr;
3795 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3796 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08003797 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3798 ASIC_REV_5714) {
3799 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3800 bmsr |= BMSR_LSTATUS;
3801 else
3802 bmsr &= ~BMSR_LSTATUS;
3803 }
Michael Chan747e8f82005-07-25 12:33:22 -07003804 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3805 }
3806 }
3807
3808 if (bmsr & BMSR_LSTATUS) {
3809 current_speed = SPEED_1000;
3810 current_link_up = 1;
3811 if (bmcr & BMCR_FULLDPLX)
3812 current_duplex = DUPLEX_FULL;
3813 else
3814 current_duplex = DUPLEX_HALF;
3815
Matt Carlsonef167e22007-12-20 20:10:01 -08003816 local_adv = 0;
3817 remote_adv = 0;
3818
Michael Chan747e8f82005-07-25 12:33:22 -07003819 if (bmcr & BMCR_ANENABLE) {
Matt Carlsonef167e22007-12-20 20:10:01 -08003820 u32 common;
Michael Chan747e8f82005-07-25 12:33:22 -07003821
3822 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3823 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3824 common = local_adv & remote_adv;
3825 if (common & (ADVERTISE_1000XHALF |
3826 ADVERTISE_1000XFULL)) {
3827 if (common & ADVERTISE_1000XFULL)
3828 current_duplex = DUPLEX_FULL;
3829 else
3830 current_duplex = DUPLEX_HALF;
Michael Chan747e8f82005-07-25 12:33:22 -07003831 }
3832 else
3833 current_link_up = 0;
3834 }
3835 }
3836
Matt Carlsonef167e22007-12-20 20:10:01 -08003837 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3838 tg3_setup_flow_control(tp, local_adv, remote_adv);
3839
Michael Chan747e8f82005-07-25 12:33:22 -07003840 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3841 if (tp->link_config.active_duplex == DUPLEX_HALF)
3842 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3843
3844 tw32_f(MAC_MODE, tp->mac_mode);
3845 udelay(40);
3846
3847 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3848
3849 tp->link_config.active_speed = current_speed;
3850 tp->link_config.active_duplex = current_duplex;
3851
3852 if (current_link_up != netif_carrier_ok(tp->dev)) {
3853 if (current_link_up)
3854 netif_carrier_on(tp->dev);
3855 else {
3856 netif_carrier_off(tp->dev);
3857 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3858 }
3859 tg3_link_report(tp);
3860 }
3861 return err;
3862}
3863
3864static void tg3_serdes_parallel_detect(struct tg3 *tp)
3865{
Michael Chan3d3ebe72006-09-27 15:59:15 -07003866 if (tp->serdes_counter) {
Michael Chan747e8f82005-07-25 12:33:22 -07003867 /* Give autoneg time to complete. */
Michael Chan3d3ebe72006-09-27 15:59:15 -07003868 tp->serdes_counter--;
Michael Chan747e8f82005-07-25 12:33:22 -07003869 return;
3870 }
3871 if (!netif_carrier_ok(tp->dev) &&
3872 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3873 u32 bmcr;
3874
3875 tg3_readphy(tp, MII_BMCR, &bmcr);
3876 if (bmcr & BMCR_ANENABLE) {
3877 u32 phy1, phy2;
3878
3879 /* Select shadow register 0x1f */
3880 tg3_writephy(tp, 0x1c, 0x7c00);
3881 tg3_readphy(tp, 0x1c, &phy1);
3882
3883 /* Select expansion interrupt status register */
3884 tg3_writephy(tp, 0x17, 0x0f01);
3885 tg3_readphy(tp, 0x15, &phy2);
3886 tg3_readphy(tp, 0x15, &phy2);
3887
3888 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3889 /* We have signal detect and not receiving
3890 * config code words, link is up by parallel
3891 * detection.
3892 */
3893
3894 bmcr &= ~BMCR_ANENABLE;
3895 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3896 tg3_writephy(tp, MII_BMCR, bmcr);
3897 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3898 }
3899 }
3900 }
3901 else if (netif_carrier_ok(tp->dev) &&
3902 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3903 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3904 u32 phy2;
3905
3906 /* Select expansion interrupt status register */
3907 tg3_writephy(tp, 0x17, 0x0f01);
3908 tg3_readphy(tp, 0x15, &phy2);
3909 if (phy2 & 0x20) {
3910 u32 bmcr;
3911
3912 /* Config code words received, turn on autoneg. */
3913 tg3_readphy(tp, MII_BMCR, &bmcr);
3914 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3915
3916 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3917
3918 }
3919 }
3920}
3921
Linus Torvalds1da177e2005-04-16 15:20:36 -07003922static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3923{
3924 int err;
3925
3926 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3927 err = tg3_setup_fiber_phy(tp, force_reset);
Michael Chan747e8f82005-07-25 12:33:22 -07003928 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3929 err = tg3_setup_fiber_mii_phy(tp, force_reset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003930 } else {
3931 err = tg3_setup_copper_phy(tp, force_reset);
3932 }
3933
Matt Carlsonbcb37f62008-11-03 16:52:09 -08003934 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
Matt Carlsonaa6c91f2007-11-12 21:18:04 -08003935 u32 val, scale;
3936
3937 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3938 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3939 scale = 65;
3940 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3941 scale = 6;
3942 else
3943 scale = 12;
3944
3945 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3946 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3947 tw32(GRC_MISC_CFG, val);
3948 }
3949
Linus Torvalds1da177e2005-04-16 15:20:36 -07003950 if (tp->link_config.active_speed == SPEED_1000 &&
3951 tp->link_config.active_duplex == DUPLEX_HALF)
3952 tw32(MAC_TX_LENGTHS,
3953 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3954 (6 << TX_LENGTHS_IPG_SHIFT) |
3955 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3956 else
3957 tw32(MAC_TX_LENGTHS,
3958 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3959 (6 << TX_LENGTHS_IPG_SHIFT) |
3960 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3961
3962 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3963 if (netif_carrier_ok(tp->dev)) {
3964 tw32(HOSTCC_STAT_COAL_TICKS,
David S. Miller15f98502005-05-18 22:49:26 -07003965 tp->coal.stats_block_coalesce_usecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003966 } else {
3967 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3968 }
3969 }
3970
Matt Carlson8ed5d972007-05-07 00:25:49 -07003971 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3972 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3973 if (!netif_carrier_ok(tp->dev))
3974 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3975 tp->pwrmgmt_thresh;
3976 else
3977 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3978 tw32(PCIE_PWR_MGMT_THRESH, val);
3979 }
3980
Linus Torvalds1da177e2005-04-16 15:20:36 -07003981 return err;
3982}
3983
Michael Chandf3e6542006-05-26 17:48:07 -07003984/* This is called whenever we suspect that the system chipset is re-
3985 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3986 * is bogus tx completions. We try to recover by setting the
3987 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3988 * in the workqueue.
3989 */
3990static void tg3_tx_recover(struct tg3 *tp)
3991{
3992 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3993 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3994
3995 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3996 "mapped I/O cycles to the network device, attempting to "
3997 "recover. Please report the problem to the driver maintainer "
3998 "and include system chipset information.\n", tp->dev->name);
3999
4000 spin_lock(&tp->lock);
Michael Chandf3e6542006-05-26 17:48:07 -07004001 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
Michael Chandf3e6542006-05-26 17:48:07 -07004002 spin_unlock(&tp->lock);
4003}
4004
Michael Chan1b2a7202006-08-07 21:46:02 -07004005static inline u32 tg3_tx_avail(struct tg3 *tp)
4006{
4007 smp_mb();
4008 return (tp->tx_pending -
4009 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
4010}
4011
Linus Torvalds1da177e2005-04-16 15:20:36 -07004012/* Tigon3 never reports partial packet sends. So we do not
4013 * need special logic to handle SKBs that have not had all
4014 * of their frags sent yet, like SunGEM does.
4015 */
4016static void tg3_tx(struct tg3 *tp)
4017{
4018 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
4019 u32 sw_idx = tp->tx_cons;
4020
4021 while (sw_idx != hw_idx) {
4022 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
4023 struct sk_buff *skb = ri->skb;
Michael Chandf3e6542006-05-26 17:48:07 -07004024 int i, tx_bug = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004025
Michael Chandf3e6542006-05-26 17:48:07 -07004026 if (unlikely(skb == NULL)) {
4027 tg3_tx_recover(tp);
4028 return;
4029 }
4030
David S. Miller90079ce2008-09-11 04:52:51 -07004031 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004032
4033 ri->skb = NULL;
4034
4035 sw_idx = NEXT_TX(sw_idx);
4036
4037 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004038 ri = &tp->tx_buffers[sw_idx];
Michael Chandf3e6542006-05-26 17:48:07 -07004039 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4040 tx_bug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004041 sw_idx = NEXT_TX(sw_idx);
4042 }
4043
David S. Millerf47c11e2005-06-24 20:18:35 -07004044 dev_kfree_skb(skb);
Michael Chandf3e6542006-05-26 17:48:07 -07004045
4046 if (unlikely(tx_bug)) {
4047 tg3_tx_recover(tp);
4048 return;
4049 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004050 }
4051
4052 tp->tx_cons = sw_idx;
4053
Michael Chan1b2a7202006-08-07 21:46:02 -07004054 /* Need to make the tx_cons update visible to tg3_start_xmit()
4055 * before checking for netif_queue_stopped(). Without the
4056 * memory barrier, there is a small possibility that tg3_start_xmit()
4057 * will miss it and cause the queue to be stopped forever.
4058 */
4059 smp_mb();
4060
4061 if (unlikely(netif_queue_stopped(tp->dev) &&
Ranjit Manomohan42952232006-10-18 20:54:26 -07004062 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
Michael Chan1b2a7202006-08-07 21:46:02 -07004063 netif_tx_lock(tp->dev);
Michael Chan51b91462005-09-01 17:41:28 -07004064 if (netif_queue_stopped(tp->dev) &&
Ranjit Manomohan42952232006-10-18 20:54:26 -07004065 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
Michael Chan51b91462005-09-01 17:41:28 -07004066 netif_wake_queue(tp->dev);
Michael Chan1b2a7202006-08-07 21:46:02 -07004067 netif_tx_unlock(tp->dev);
Michael Chan51b91462005-09-01 17:41:28 -07004068 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004069}
4070
4071/* Returns size of skb allocated or < 0 on error.
4072 *
4073 * We only need to fill in the address because the other members
4074 * of the RX descriptor are invariant, see tg3_init_rings.
4075 *
4076 * Note the purposeful assymetry of cpu vs. chip accesses. For
4077 * posting buffers we only dirty the first cache line of the RX
4078 * descriptor (containing the address). Whereas for the RX status
4079 * buffers the cpu only reads the last cacheline of the RX descriptor
4080 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4081 */
4082static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
4083 int src_idx, u32 dest_idx_unmasked)
4084{
4085 struct tg3_rx_buffer_desc *desc;
4086 struct ring_info *map, *src_map;
4087 struct sk_buff *skb;
4088 dma_addr_t mapping;
4089 int skb_size, dest_idx;
4090
4091 src_map = NULL;
4092 switch (opaque_key) {
4093 case RXD_OPAQUE_RING_STD:
4094 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4095 desc = &tp->rx_std[dest_idx];
4096 map = &tp->rx_std_buffers[dest_idx];
4097 if (src_idx >= 0)
4098 src_map = &tp->rx_std_buffers[src_idx];
Michael Chan7e72aad2005-07-25 12:31:17 -07004099 skb_size = tp->rx_pkt_buf_sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004100 break;
4101
4102 case RXD_OPAQUE_RING_JUMBO:
4103 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4104 desc = &tp->rx_jumbo[dest_idx];
4105 map = &tp->rx_jumbo_buffers[dest_idx];
4106 if (src_idx >= 0)
4107 src_map = &tp->rx_jumbo_buffers[src_idx];
4108 skb_size = RX_JUMBO_PKT_BUF_SZ;
4109 break;
4110
4111 default:
4112 return -EINVAL;
Stephen Hemminger855e1112008-04-16 16:37:28 -07004113 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004114
4115 /* Do not overwrite any of the map or rp information
4116 * until we are sure we can commit to a new buffer.
4117 *
4118 * Callers depend upon this behavior and assume that
4119 * we leave everything unchanged if we fail.
4120 */
David S. Millera20e9c62006-07-31 22:38:16 -07004121 skb = netdev_alloc_skb(tp->dev, skb_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004122 if (skb == NULL)
4123 return -ENOMEM;
4124
Linus Torvalds1da177e2005-04-16 15:20:36 -07004125 skb_reserve(skb, tp->rx_offset);
4126
4127 mapping = pci_map_single(tp->pdev, skb->data,
4128 skb_size - tp->rx_offset,
4129 PCI_DMA_FROMDEVICE);
4130
4131 map->skb = skb;
4132 pci_unmap_addr_set(map, mapping, mapping);
4133
4134 if (src_map != NULL)
4135 src_map->skb = NULL;
4136
4137 desc->addr_hi = ((u64)mapping >> 32);
4138 desc->addr_lo = ((u64)mapping & 0xffffffff);
4139
4140 return skb_size;
4141}
4142
4143/* We only need to move over in the address because the other
4144 * members of the RX descriptor are invariant. See notes above
4145 * tg3_alloc_rx_skb for full details.
4146 */
4147static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
4148 int src_idx, u32 dest_idx_unmasked)
4149{
4150 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4151 struct ring_info *src_map, *dest_map;
4152 int dest_idx;
4153
4154 switch (opaque_key) {
4155 case RXD_OPAQUE_RING_STD:
4156 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4157 dest_desc = &tp->rx_std[dest_idx];
4158 dest_map = &tp->rx_std_buffers[dest_idx];
4159 src_desc = &tp->rx_std[src_idx];
4160 src_map = &tp->rx_std_buffers[src_idx];
4161 break;
4162
4163 case RXD_OPAQUE_RING_JUMBO:
4164 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4165 dest_desc = &tp->rx_jumbo[dest_idx];
4166 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4167 src_desc = &tp->rx_jumbo[src_idx];
4168 src_map = &tp->rx_jumbo_buffers[src_idx];
4169 break;
4170
4171 default:
4172 return;
Stephen Hemminger855e1112008-04-16 16:37:28 -07004173 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004174
4175 dest_map->skb = src_map->skb;
4176 pci_unmap_addr_set(dest_map, mapping,
4177 pci_unmap_addr(src_map, mapping));
4178 dest_desc->addr_hi = src_desc->addr_hi;
4179 dest_desc->addr_lo = src_desc->addr_lo;
4180
4181 src_map->skb = NULL;
4182}
4183
4184#if TG3_VLAN_TAG_USED
4185static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4186{
4187 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4188}
4189#endif
4190
4191/* The RX ring scheme is composed of multiple rings which post fresh
4192 * buffers to the chip, and one special ring the chip uses to report
4193 * status back to the host.
4194 *
4195 * The special ring reports the status of received packets to the
4196 * host. The chip does not write into the original descriptor the
4197 * RX buffer was obtained from. The chip simply takes the original
4198 * descriptor as provided by the host, updates the status and length
4199 * field, then writes this into the next status ring entry.
4200 *
4201 * Each ring the host uses to post buffers to the chip is described
4202 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4203 * it is first placed into the on-chip ram. When the packet's length
4204 * is known, it walks down the TG3_BDINFO entries to select the ring.
4205 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4206 * which is within the range of the new packet's length is chosen.
4207 *
4208 * The "separate ring for rx status" scheme may sound queer, but it makes
4209 * sense from a cache coherency perspective. If only the host writes
4210 * to the buffer post rings, and only the chip writes to the rx status
4211 * rings, then cache lines never move beyond shared-modified state.
4212 * If both the host and chip were to write into the same ring, cache line
4213 * eviction could occur since both entities want it in an exclusive state.
4214 */
4215static int tg3_rx(struct tg3 *tp, int budget)
4216{
Michael Chanf92905d2006-06-29 20:14:29 -07004217 u32 work_mask, rx_std_posted = 0;
Michael Chan483ba502005-04-25 15:14:03 -07004218 u32 sw_idx = tp->rx_rcb_ptr;
4219 u16 hw_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004220 int received;
4221
4222 hw_idx = tp->hw_status->idx[0].rx_producer;
4223 /*
4224 * We need to order the read of hw_idx and the read of
4225 * the opaque cookie.
4226 */
4227 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004228 work_mask = 0;
4229 received = 0;
4230 while (sw_idx != hw_idx && budget > 0) {
4231 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4232 unsigned int len;
4233 struct sk_buff *skb;
4234 dma_addr_t dma_addr;
4235 u32 opaque_key, desc_idx, *post_ptr;
4236
4237 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4238 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4239 if (opaque_key == RXD_OPAQUE_RING_STD) {
4240 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4241 mapping);
4242 skb = tp->rx_std_buffers[desc_idx].skb;
4243 post_ptr = &tp->rx_std_ptr;
Michael Chanf92905d2006-06-29 20:14:29 -07004244 rx_std_posted++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4246 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4247 mapping);
4248 skb = tp->rx_jumbo_buffers[desc_idx].skb;
4249 post_ptr = &tp->rx_jumbo_ptr;
4250 }
4251 else {
4252 goto next_pkt_nopost;
4253 }
4254
4255 work_mask |= opaque_key;
4256
4257 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4258 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4259 drop_it:
4260 tg3_recycle_rx(tp, opaque_key,
4261 desc_idx, *post_ptr);
4262 drop_it_no_recycle:
4263 /* Other statistics kept track of by card. */
4264 tp->net_stats.rx_dropped++;
4265 goto next_pkt;
4266 }
4267
Matt Carlsonad829262008-11-21 17:16:16 -08004268 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4269 ETH_FCS_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004270
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004271 if (len > RX_COPY_THRESHOLD
Matt Carlsonad829262008-11-21 17:16:16 -08004272 && tp->rx_offset == NET_IP_ALIGN
4273 /* rx_offset will likely not equal NET_IP_ALIGN
4274 * if this is a 5701 card running in PCI-X mode
4275 * [see tg3_get_invariants()]
4276 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004277 ) {
4278 int skb_size;
4279
4280 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4281 desc_idx, *post_ptr);
4282 if (skb_size < 0)
4283 goto drop_it;
4284
4285 pci_unmap_single(tp->pdev, dma_addr,
4286 skb_size - tp->rx_offset,
4287 PCI_DMA_FROMDEVICE);
4288
4289 skb_put(skb, len);
4290 } else {
4291 struct sk_buff *copy_skb;
4292
4293 tg3_recycle_rx(tp, opaque_key,
4294 desc_idx, *post_ptr);
4295
Matt Carlsonad829262008-11-21 17:16:16 -08004296 copy_skb = netdev_alloc_skb(tp->dev,
4297 len + TG3_RAW_IP_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004298 if (copy_skb == NULL)
4299 goto drop_it_no_recycle;
4300
Matt Carlsonad829262008-11-21 17:16:16 -08004301 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004302 skb_put(copy_skb, len);
4303 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03004304 skb_copy_from_linear_data(skb, copy_skb->data, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004305 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4306
4307 /* We'll reuse the original ring buffer. */
4308 skb = copy_skb;
4309 }
4310
4311 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4312 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4313 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4314 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4315 skb->ip_summed = CHECKSUM_UNNECESSARY;
4316 else
4317 skb->ip_summed = CHECKSUM_NONE;
4318
4319 skb->protocol = eth_type_trans(skb, tp->dev);
4320#if TG3_VLAN_TAG_USED
4321 if (tp->vlgrp != NULL &&
4322 desc->type_flags & RXD_FLAG_VLAN) {
4323 tg3_vlan_rx(tp, skb,
4324 desc->err_vlan & RXD_VLAN_MASK);
4325 } else
4326#endif
4327 netif_receive_skb(skb);
4328
Linus Torvalds1da177e2005-04-16 15:20:36 -07004329 received++;
4330 budget--;
4331
4332next_pkt:
4333 (*post_ptr)++;
Michael Chanf92905d2006-06-29 20:14:29 -07004334
4335 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4336 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4337
4338 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4339 TG3_64BIT_REG_LOW, idx);
4340 work_mask &= ~RXD_OPAQUE_RING_STD;
4341 rx_std_posted = 0;
4342 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004343next_pkt_nopost:
Michael Chan483ba502005-04-25 15:14:03 -07004344 sw_idx++;
Eric Dumazet6b31a512007-02-06 13:29:21 -08004345 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
Michael Chan52f6d692005-04-25 15:14:32 -07004346
4347 /* Refresh hw_idx to see if there is new work */
4348 if (sw_idx == hw_idx) {
4349 hw_idx = tp->hw_status->idx[0].rx_producer;
4350 rmb();
4351 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004352 }
4353
4354 /* ACK the status ring. */
Michael Chan483ba502005-04-25 15:14:03 -07004355 tp->rx_rcb_ptr = sw_idx;
4356 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004357
4358 /* Refill RX ring(s). */
4359 if (work_mask & RXD_OPAQUE_RING_STD) {
4360 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4361 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4362 sw_idx);
4363 }
4364 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4365 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4366 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4367 sw_idx);
4368 }
4369 mmiowb();
4370
4371 return received;
4372}
4373
David S. Miller6f535762007-10-11 18:08:29 -07004374static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004375{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004376 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004377
Linus Torvalds1da177e2005-04-16 15:20:36 -07004378 /* handle link change and other phy events */
4379 if (!(tp->tg3_flags &
4380 (TG3_FLAG_USE_LINKCHG_REG |
4381 TG3_FLAG_POLL_SERDES))) {
4382 if (sblk->status & SD_STATUS_LINK_CHG) {
4383 sblk->status = SD_STATUS_UPDATED |
4384 (sblk->status & ~SD_STATUS_LINK_CHG);
David S. Millerf47c11e2005-06-24 20:18:35 -07004385 spin_lock(&tp->lock);
Matt Carlsondd477002008-05-25 23:45:58 -07004386 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4387 tw32_f(MAC_STATUS,
4388 (MAC_STATUS_SYNC_CHANGED |
4389 MAC_STATUS_CFG_CHANGED |
4390 MAC_STATUS_MI_COMPLETION |
4391 MAC_STATUS_LNKSTATE_CHANGED));
4392 udelay(40);
4393 } else
4394 tg3_setup_phy(tp, 0);
David S. Millerf47c11e2005-06-24 20:18:35 -07004395 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004396 }
4397 }
4398
4399 /* run TX completion thread */
4400 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004401 tg3_tx(tp);
David S. Miller6f535762007-10-11 18:08:29 -07004402 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
Michael Chan4fd7ab52007-10-12 01:39:50 -07004403 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004404 }
4405
Linus Torvalds1da177e2005-04-16 15:20:36 -07004406 /* run RX thread, within the bounds set by NAPI.
4407 * All RX "locking" is done by ensuring outside
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004408 * code synchronizes with tg3->napi.poll()
Linus Torvalds1da177e2005-04-16 15:20:36 -07004409 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004410 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
David S. Miller6f535762007-10-11 18:08:29 -07004411 work_done += tg3_rx(tp, budget - work_done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004412
David S. Miller6f535762007-10-11 18:08:29 -07004413 return work_done;
4414}
David S. Millerf7383c22005-05-18 22:50:53 -07004415
David S. Miller6f535762007-10-11 18:08:29 -07004416static int tg3_poll(struct napi_struct *napi, int budget)
4417{
4418 struct tg3 *tp = container_of(napi, struct tg3, napi);
4419 int work_done = 0;
Michael Chan4fd7ab52007-10-12 01:39:50 -07004420 struct tg3_hw_status *sblk = tp->hw_status;
David S. Miller6f535762007-10-11 18:08:29 -07004421
4422 while (1) {
4423 work_done = tg3_poll_work(tp, work_done, budget);
4424
4425 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4426 goto tx_recovery;
4427
4428 if (unlikely(work_done >= budget))
4429 break;
4430
Michael Chan4fd7ab52007-10-12 01:39:50 -07004431 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4432 /* tp->last_tag is used in tg3_restart_ints() below
4433 * to tell the hw how much work has been processed,
4434 * so we must read it before checking for more work.
4435 */
4436 tp->last_tag = sblk->status_tag;
4437 rmb();
4438 } else
4439 sblk->status &= ~SD_STATUS_UPDATED;
4440
David S. Miller6f535762007-10-11 18:08:29 -07004441 if (likely(!tg3_has_work(tp))) {
David S. Miller6f535762007-10-11 18:08:29 -07004442 netif_rx_complete(tp->dev, napi);
4443 tg3_restart_ints(tp);
4444 break;
4445 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004446 }
4447
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004448 return work_done;
David S. Miller6f535762007-10-11 18:08:29 -07004449
4450tx_recovery:
Michael Chan4fd7ab52007-10-12 01:39:50 -07004451 /* work_done is guaranteed to be less than budget. */
David S. Miller6f535762007-10-11 18:08:29 -07004452 netif_rx_complete(tp->dev, napi);
4453 schedule_work(&tp->reset_task);
Michael Chan4fd7ab52007-10-12 01:39:50 -07004454 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004455}
4456
David S. Millerf47c11e2005-06-24 20:18:35 -07004457static void tg3_irq_quiesce(struct tg3 *tp)
4458{
4459 BUG_ON(tp->irq_sync);
4460
4461 tp->irq_sync = 1;
4462 smp_mb();
4463
4464 synchronize_irq(tp->pdev->irq);
4465}
4466
4467static inline int tg3_irq_sync(struct tg3 *tp)
4468{
4469 return tp->irq_sync;
4470}
4471
4472/* Fully shutdown all tg3 driver activity elsewhere in the system.
4473 * If irq_sync is non-zero, then the IRQ handler must be synchronized
4474 * with as well. Most of the time, this is not necessary except when
4475 * shutting down the device.
4476 */
4477static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4478{
Michael Chan46966542007-07-11 19:47:19 -07004479 spin_lock_bh(&tp->lock);
David S. Millerf47c11e2005-06-24 20:18:35 -07004480 if (irq_sync)
4481 tg3_irq_quiesce(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07004482}
4483
4484static inline void tg3_full_unlock(struct tg3 *tp)
4485{
David S. Millerf47c11e2005-06-24 20:18:35 -07004486 spin_unlock_bh(&tp->lock);
4487}
4488
Michael Chanfcfa0a32006-03-20 22:28:41 -08004489/* One-shot MSI handler - Chip automatically disables interrupt
4490 * after sending MSI so driver doesn't have to do it.
4491 */
David Howells7d12e782006-10-05 14:55:46 +01004492static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
Michael Chanfcfa0a32006-03-20 22:28:41 -08004493{
4494 struct net_device *dev = dev_id;
4495 struct tg3 *tp = netdev_priv(dev);
4496
4497 prefetch(tp->hw_status);
4498 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4499
4500 if (likely(!tg3_irq_sync(tp)))
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004501 netif_rx_schedule(dev, &tp->napi);
Michael Chanfcfa0a32006-03-20 22:28:41 -08004502
4503 return IRQ_HANDLED;
4504}
4505
Michael Chan88b06bc2005-04-21 17:13:25 -07004506/* MSI ISR - No need to check for interrupt sharing and no need to
4507 * flush status block and interrupt mailbox. PCI ordering rules
4508 * guarantee that MSI will arrive after the status block.
4509 */
David Howells7d12e782006-10-05 14:55:46 +01004510static irqreturn_t tg3_msi(int irq, void *dev_id)
Michael Chan88b06bc2005-04-21 17:13:25 -07004511{
4512 struct net_device *dev = dev_id;
4513 struct tg3 *tp = netdev_priv(dev);
Michael Chan88b06bc2005-04-21 17:13:25 -07004514
Michael Chan61487482005-09-05 17:53:19 -07004515 prefetch(tp->hw_status);
4516 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Michael Chan88b06bc2005-04-21 17:13:25 -07004517 /*
David S. Millerfac9b832005-05-18 22:46:34 -07004518 * Writing any value to intr-mbox-0 clears PCI INTA# and
Michael Chan88b06bc2005-04-21 17:13:25 -07004519 * chip-internal interrupt pending events.
David S. Millerfac9b832005-05-18 22:46:34 -07004520 * Writing non-zero to intr-mbox-0 additional tells the
Michael Chan88b06bc2005-04-21 17:13:25 -07004521 * NIC to stop sending us irqs, engaging "in-intr-handler"
4522 * event coalescing.
4523 */
4524 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chan61487482005-09-05 17:53:19 -07004525 if (likely(!tg3_irq_sync(tp)))
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004526 netif_rx_schedule(dev, &tp->napi);
Michael Chan61487482005-09-05 17:53:19 -07004527
Michael Chan88b06bc2005-04-21 17:13:25 -07004528 return IRQ_RETVAL(1);
4529}
4530
David Howells7d12e782006-10-05 14:55:46 +01004531static irqreturn_t tg3_interrupt(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004532{
4533 struct net_device *dev = dev_id;
4534 struct tg3 *tp = netdev_priv(dev);
4535 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004536 unsigned int handled = 1;
4537
Linus Torvalds1da177e2005-04-16 15:20:36 -07004538 /* In INTx mode, it is possible for the interrupt to arrive at
4539 * the CPU before the status block posted prior to the interrupt.
4540 * Reading the PCI State register will confirm whether the
4541 * interrupt is ours and will flush the status block.
4542 */
Michael Chand18edcb2007-03-24 20:57:11 -07004543 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4544 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4545 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4546 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07004547 goto out;
David S. Millerfac9b832005-05-18 22:46:34 -07004548 }
Michael Chand18edcb2007-03-24 20:57:11 -07004549 }
4550
4551 /*
4552 * Writing any value to intr-mbox-0 clears PCI INTA# and
4553 * chip-internal interrupt pending events.
4554 * Writing non-zero to intr-mbox-0 additional tells the
4555 * NIC to stop sending us irqs, engaging "in-intr-handler"
4556 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07004557 *
4558 * Flush the mailbox to de-assert the IRQ immediately to prevent
4559 * spurious interrupts. The flush impacts performance but
4560 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07004561 */
Michael Chanc04cb342007-05-07 00:26:15 -07004562 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chand18edcb2007-03-24 20:57:11 -07004563 if (tg3_irq_sync(tp))
4564 goto out;
4565 sblk->status &= ~SD_STATUS_UPDATED;
4566 if (likely(tg3_has_work(tp))) {
4567 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004568 netif_rx_schedule(dev, &tp->napi);
Michael Chand18edcb2007-03-24 20:57:11 -07004569 } else {
4570 /* No work, shared interrupt perhaps? re-enable
4571 * interrupts, and flush that PCI write
4572 */
4573 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4574 0x00000000);
David S. Millerfac9b832005-05-18 22:46:34 -07004575 }
David S. Millerf47c11e2005-06-24 20:18:35 -07004576out:
David S. Millerfac9b832005-05-18 22:46:34 -07004577 return IRQ_RETVAL(handled);
4578}
4579
David Howells7d12e782006-10-05 14:55:46 +01004580static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
David S. Millerfac9b832005-05-18 22:46:34 -07004581{
4582 struct net_device *dev = dev_id;
4583 struct tg3 *tp = netdev_priv(dev);
4584 struct tg3_hw_status *sblk = tp->hw_status;
David S. Millerfac9b832005-05-18 22:46:34 -07004585 unsigned int handled = 1;
4586
David S. Millerfac9b832005-05-18 22:46:34 -07004587 /* In INTx mode, it is possible for the interrupt to arrive at
4588 * the CPU before the status block posted prior to the interrupt.
4589 * Reading the PCI State register will confirm whether the
4590 * interrupt is ours and will flush the status block.
4591 */
Michael Chand18edcb2007-03-24 20:57:11 -07004592 if (unlikely(sblk->status_tag == tp->last_tag)) {
4593 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4594 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4595 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07004596 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004597 }
Michael Chand18edcb2007-03-24 20:57:11 -07004598 }
4599
4600 /*
4601 * writing any value to intr-mbox-0 clears PCI INTA# and
4602 * chip-internal interrupt pending events.
4603 * writing non-zero to intr-mbox-0 additional tells the
4604 * NIC to stop sending us irqs, engaging "in-intr-handler"
4605 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07004606 *
4607 * Flush the mailbox to de-assert the IRQ immediately to prevent
4608 * spurious interrupts. The flush impacts performance but
4609 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07004610 */
Michael Chanc04cb342007-05-07 00:26:15 -07004611 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chand18edcb2007-03-24 20:57:11 -07004612 if (tg3_irq_sync(tp))
4613 goto out;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004614 if (netif_rx_schedule_prep(dev, &tp->napi)) {
Michael Chand18edcb2007-03-24 20:57:11 -07004615 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4616 /* Update last_tag to mark that this status has been
4617 * seen. Because interrupt may be shared, we may be
4618 * racing with tg3_poll(), so only update last_tag
4619 * if tg3_poll() is not scheduled.
4620 */
4621 tp->last_tag = sblk->status_tag;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004622 __netif_rx_schedule(dev, &tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004623 }
David S. Millerf47c11e2005-06-24 20:18:35 -07004624out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004625 return IRQ_RETVAL(handled);
4626}
4627
Michael Chan79381092005-04-21 17:13:59 -07004628/* ISR for interrupt test */
David Howells7d12e782006-10-05 14:55:46 +01004629static irqreturn_t tg3_test_isr(int irq, void *dev_id)
Michael Chan79381092005-04-21 17:13:59 -07004630{
4631 struct net_device *dev = dev_id;
4632 struct tg3 *tp = netdev_priv(dev);
4633 struct tg3_hw_status *sblk = tp->hw_status;
4634
Michael Chanf9804dd2005-09-27 12:13:10 -07004635 if ((sblk->status & SD_STATUS_UPDATED) ||
4636 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
Michael Chanb16250e2006-09-27 16:10:14 -07004637 tg3_disable_ints(tp);
Michael Chan79381092005-04-21 17:13:59 -07004638 return IRQ_RETVAL(1);
4639 }
4640 return IRQ_RETVAL(0);
4641}
4642
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07004643static int tg3_init_hw(struct tg3 *, int);
Michael Chan944d9802005-05-29 14:57:48 -07004644static int tg3_halt(struct tg3 *, int, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004645
Michael Chanb9ec6c12006-07-25 16:37:27 -07004646/* Restart hardware after configuration changes, self-test, etc.
4647 * Invoked with tp->lock held.
4648 */
4649static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
Eric Dumazet78c61462008-04-24 23:33:06 -07004650 __releases(tp->lock)
4651 __acquires(tp->lock)
Michael Chanb9ec6c12006-07-25 16:37:27 -07004652{
4653 int err;
4654
4655 err = tg3_init_hw(tp, reset_phy);
4656 if (err) {
4657 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4658 "aborting.\n", tp->dev->name);
4659 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4660 tg3_full_unlock(tp);
4661 del_timer_sync(&tp->timer);
4662 tp->irq_sync = 0;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004663 napi_enable(&tp->napi);
Michael Chanb9ec6c12006-07-25 16:37:27 -07004664 dev_close(tp->dev);
4665 tg3_full_lock(tp, 0);
4666 }
4667 return err;
4668}
4669
Linus Torvalds1da177e2005-04-16 15:20:36 -07004670#ifdef CONFIG_NET_POLL_CONTROLLER
4671static void tg3_poll_controller(struct net_device *dev)
4672{
Michael Chan88b06bc2005-04-21 17:13:25 -07004673 struct tg3 *tp = netdev_priv(dev);
4674
David Howells7d12e782006-10-05 14:55:46 +01004675 tg3_interrupt(tp->pdev->irq, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004676}
4677#endif
4678
David Howellsc4028952006-11-22 14:57:56 +00004679static void tg3_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004680{
David Howellsc4028952006-11-22 14:57:56 +00004681 struct tg3 *tp = container_of(work, struct tg3, reset_task);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004682 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004683 unsigned int restart_timer;
4684
Michael Chan7faa0062006-02-02 17:29:28 -08004685 tg3_full_lock(tp, 0);
Michael Chan7faa0062006-02-02 17:29:28 -08004686
4687 if (!netif_running(tp->dev)) {
Michael Chan7faa0062006-02-02 17:29:28 -08004688 tg3_full_unlock(tp);
4689 return;
4690 }
4691
4692 tg3_full_unlock(tp);
4693
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004694 tg3_phy_stop(tp);
4695
Linus Torvalds1da177e2005-04-16 15:20:36 -07004696 tg3_netif_stop(tp);
4697
David S. Millerf47c11e2005-06-24 20:18:35 -07004698 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004699
4700 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4701 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4702
Michael Chandf3e6542006-05-26 17:48:07 -07004703 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4704 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4705 tp->write32_rx_mbox = tg3_write_flush_reg32;
4706 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4707 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4708 }
4709
Michael Chan944d9802005-05-29 14:57:48 -07004710 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004711 err = tg3_init_hw(tp, 1);
4712 if (err)
Michael Chanb9ec6c12006-07-25 16:37:27 -07004713 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004714
4715 tg3_netif_start(tp);
4716
Linus Torvalds1da177e2005-04-16 15:20:36 -07004717 if (restart_timer)
4718 mod_timer(&tp->timer, jiffies + 1);
Michael Chan7faa0062006-02-02 17:29:28 -08004719
Michael Chanb9ec6c12006-07-25 16:37:27 -07004720out:
Michael Chan7faa0062006-02-02 17:29:28 -08004721 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004722
4723 if (!err)
4724 tg3_phy_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004725}
4726
Michael Chanb0408752007-02-13 12:18:30 -08004727static void tg3_dump_short_state(struct tg3 *tp)
4728{
4729 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4730 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4731 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4732 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4733}
4734
Linus Torvalds1da177e2005-04-16 15:20:36 -07004735static void tg3_tx_timeout(struct net_device *dev)
4736{
4737 struct tg3 *tp = netdev_priv(dev);
4738
Michael Chanb0408752007-02-13 12:18:30 -08004739 if (netif_msg_tx_err(tp)) {
Michael Chan9f88f292006-12-07 00:22:54 -08004740 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4741 dev->name);
Michael Chanb0408752007-02-13 12:18:30 -08004742 tg3_dump_short_state(tp);
4743 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004744
4745 schedule_work(&tp->reset_task);
4746}
4747
Michael Chanc58ec932005-09-17 00:46:27 -07004748/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4749static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4750{
4751 u32 base = (u32) mapping & 0xffffffff;
4752
4753 return ((base > 0xffffdcc0) &&
4754 (base + len + 8 < base));
4755}
4756
Michael Chan72f2afb2006-03-06 19:28:35 -08004757/* Test for DMA addresses > 40-bit */
4758static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4759 int len)
4760{
4761#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
Michael Chan6728a8e2006-03-27 23:16:49 -08004762 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
Michael Chan72f2afb2006-03-06 19:28:35 -08004763 return (((u64) mapping + len) > DMA_40BIT_MASK);
4764 return 0;
4765#else
4766 return 0;
4767#endif
4768}
4769
Linus Torvalds1da177e2005-04-16 15:20:36 -07004770static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4771
Michael Chan72f2afb2006-03-06 19:28:35 -08004772/* Workaround 4GB and 40-bit hardware DMA bugs. */
4773static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
Michael Chanc58ec932005-09-17 00:46:27 -07004774 u32 last_plus_one, u32 *start,
4775 u32 base_flags, u32 mss)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004776{
Matt Carlson41588ba2008-04-19 18:12:33 -07004777 struct sk_buff *new_skb;
Michael Chanc58ec932005-09-17 00:46:27 -07004778 dma_addr_t new_addr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004779 u32 entry = *start;
Michael Chanc58ec932005-09-17 00:46:27 -07004780 int i, ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004781
Matt Carlson41588ba2008-04-19 18:12:33 -07004782 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4783 new_skb = skb_copy(skb, GFP_ATOMIC);
4784 else {
4785 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4786
4787 new_skb = skb_copy_expand(skb,
4788 skb_headroom(skb) + more_headroom,
4789 skb_tailroom(skb), GFP_ATOMIC);
4790 }
4791
Linus Torvalds1da177e2005-04-16 15:20:36 -07004792 if (!new_skb) {
Michael Chanc58ec932005-09-17 00:46:27 -07004793 ret = -1;
4794 } else {
4795 /* New SKB is guaranteed to be linear. */
4796 entry = *start;
David S. Miller90079ce2008-09-11 04:52:51 -07004797 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4798 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4799
Michael Chanc58ec932005-09-17 00:46:27 -07004800 /* Make sure new skb does not cross any 4G boundaries.
4801 * Drop the packet if it does.
4802 */
David S. Miller90079ce2008-09-11 04:52:51 -07004803 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
David S. Miller638266f2008-09-11 15:45:19 -07004804 if (!ret)
4805 skb_dma_unmap(&tp->pdev->dev, new_skb,
4806 DMA_TO_DEVICE);
Michael Chanc58ec932005-09-17 00:46:27 -07004807 ret = -1;
4808 dev_kfree_skb(new_skb);
4809 new_skb = NULL;
4810 } else {
4811 tg3_set_txd(tp, entry, new_addr, new_skb->len,
4812 base_flags, 1 | (mss << 1));
4813 *start = NEXT_TX(entry);
4814 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004815 }
4816
Linus Torvalds1da177e2005-04-16 15:20:36 -07004817 /* Now clean up the sw ring entries. */
4818 i = 0;
4819 while (entry != last_plus_one) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004820 if (i == 0) {
4821 tp->tx_buffers[entry].skb = new_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004822 } else {
4823 tp->tx_buffers[entry].skb = NULL;
4824 }
4825 entry = NEXT_TX(entry);
4826 i++;
4827 }
4828
David S. Miller90079ce2008-09-11 04:52:51 -07004829 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004830 dev_kfree_skb(skb);
4831
Michael Chanc58ec932005-09-17 00:46:27 -07004832 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004833}
4834
4835static void tg3_set_txd(struct tg3 *tp, int entry,
4836 dma_addr_t mapping, int len, u32 flags,
4837 u32 mss_and_is_end)
4838{
4839 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4840 int is_end = (mss_and_is_end & 0x1);
4841 u32 mss = (mss_and_is_end >> 1);
4842 u32 vlan_tag = 0;
4843
4844 if (is_end)
4845 flags |= TXD_FLAG_END;
4846 if (flags & TXD_FLAG_VLAN) {
4847 vlan_tag = flags >> 16;
4848 flags &= 0xffff;
4849 }
4850 vlan_tag |= (mss << TXD_MSS_SHIFT);
4851
4852 txd->addr_hi = ((u64) mapping >> 32);
4853 txd->addr_lo = ((u64) mapping & 0xffffffff);
4854 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4855 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4856}
4857
Michael Chan5a6f3072006-03-20 22:28:05 -08004858/* hard_start_xmit for devices that don't have any bugs and
4859 * support TG3_FLG2_HW_TSO_2 only.
4860 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004861static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4862{
4863 struct tg3 *tp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004864 u32 len, entry, base_flags, mss;
David S. Miller90079ce2008-09-11 04:52:51 -07004865 struct skb_shared_info *sp;
4866 dma_addr_t mapping;
Michael Chan5a6f3072006-03-20 22:28:05 -08004867
4868 len = skb_headlen(skb);
4869
Michael Chan00b70502006-06-17 21:58:45 -07004870 /* We are running in BH disabled context with netif_tx_lock
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004871 * and TX reclaim runs via tp->napi.poll inside of a software
Michael Chan5a6f3072006-03-20 22:28:05 -08004872 * interrupt. Furthermore, IRQ processing runs lockless so we have
4873 * no IRQ context deadlocks to worry about either. Rejoice!
4874 */
Michael Chan1b2a7202006-08-07 21:46:02 -07004875 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004876 if (!netif_queue_stopped(dev)) {
4877 netif_stop_queue(dev);
4878
4879 /* This is a hard error, log it. */
4880 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4881 "queue awake!\n", dev->name);
4882 }
Michael Chan5a6f3072006-03-20 22:28:05 -08004883 return NETDEV_TX_BUSY;
4884 }
4885
4886 entry = tp->tx_prod;
4887 base_flags = 0;
Michael Chan5a6f3072006-03-20 22:28:05 -08004888 mss = 0;
Matt Carlsonc13e3712007-05-05 11:50:04 -07004889 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004890 int tcp_opt_len, ip_tcp_len;
4891
4892 if (skb_header_cloned(skb) &&
4893 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4894 dev_kfree_skb(skb);
4895 goto out_unlock;
4896 }
4897
Michael Chanb0026622006-07-03 19:42:14 -07004898 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4899 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4900 else {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004901 struct iphdr *iph = ip_hdr(skb);
4902
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004903 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -03004904 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
Michael Chanb0026622006-07-03 19:42:14 -07004905
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004906 iph->check = 0;
4907 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
Michael Chanb0026622006-07-03 19:42:14 -07004908 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4909 }
Michael Chan5a6f3072006-03-20 22:28:05 -08004910
4911 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4912 TXD_FLAG_CPU_POST_DMA);
4913
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004914 tcp_hdr(skb)->check = 0;
Michael Chan5a6f3072006-03-20 22:28:05 -08004915
Michael Chan5a6f3072006-03-20 22:28:05 -08004916 }
Patrick McHardy84fa7932006-08-29 16:44:56 -07004917 else if (skb->ip_summed == CHECKSUM_PARTIAL)
Michael Chan5a6f3072006-03-20 22:28:05 -08004918 base_flags |= TXD_FLAG_TCPUDP_CSUM;
Michael Chan5a6f3072006-03-20 22:28:05 -08004919#if TG3_VLAN_TAG_USED
4920 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4921 base_flags |= (TXD_FLAG_VLAN |
4922 (vlan_tx_tag_get(skb) << 16));
4923#endif
4924
David S. Miller90079ce2008-09-11 04:52:51 -07004925 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4926 dev_kfree_skb(skb);
4927 goto out_unlock;
4928 }
4929
4930 sp = skb_shinfo(skb);
4931
4932 mapping = sp->dma_maps[0];
Michael Chan5a6f3072006-03-20 22:28:05 -08004933
4934 tp->tx_buffers[entry].skb = skb;
Michael Chan5a6f3072006-03-20 22:28:05 -08004935
4936 tg3_set_txd(tp, entry, mapping, len, base_flags,
4937 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4938
4939 entry = NEXT_TX(entry);
4940
4941 /* Now loop through additional data fragments, and queue them. */
4942 if (skb_shinfo(skb)->nr_frags > 0) {
4943 unsigned int i, last;
4944
4945 last = skb_shinfo(skb)->nr_frags - 1;
4946 for (i = 0; i <= last; i++) {
4947 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4948
4949 len = frag->size;
David S. Miller90079ce2008-09-11 04:52:51 -07004950 mapping = sp->dma_maps[i + 1];
Michael Chan5a6f3072006-03-20 22:28:05 -08004951 tp->tx_buffers[entry].skb = NULL;
Michael Chan5a6f3072006-03-20 22:28:05 -08004952
4953 tg3_set_txd(tp, entry, mapping, len,
4954 base_flags, (i == last) | (mss << 1));
4955
4956 entry = NEXT_TX(entry);
4957 }
4958 }
4959
4960 /* Packets are ready, update Tx producer idx local and on card. */
4961 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4962
4963 tp->tx_prod = entry;
Michael Chan1b2a7202006-08-07 21:46:02 -07004964 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004965 netif_stop_queue(dev);
Ranjit Manomohan42952232006-10-18 20:54:26 -07004966 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
Michael Chan5a6f3072006-03-20 22:28:05 -08004967 netif_wake_queue(tp->dev);
4968 }
4969
4970out_unlock:
4971 mmiowb();
Michael Chan5a6f3072006-03-20 22:28:05 -08004972
4973 dev->trans_start = jiffies;
4974
4975 return NETDEV_TX_OK;
4976}
4977
Michael Chan52c0fd82006-06-29 20:15:54 -07004978static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4979
4980/* Use GSO to workaround a rare TSO bug that may be triggered when the
4981 * TSO header is greater than 80 bytes.
4982 */
4983static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4984{
4985 struct sk_buff *segs, *nskb;
4986
4987 /* Estimate the number of fragments in the worst case */
Michael Chan1b2a7202006-08-07 21:46:02 -07004988 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
Michael Chan52c0fd82006-06-29 20:15:54 -07004989 netif_stop_queue(tp->dev);
Michael Chan7f62ad52007-02-20 23:25:40 -08004990 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4991 return NETDEV_TX_BUSY;
4992
4993 netif_wake_queue(tp->dev);
Michael Chan52c0fd82006-06-29 20:15:54 -07004994 }
4995
4996 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07004997 if (IS_ERR(segs))
Michael Chan52c0fd82006-06-29 20:15:54 -07004998 goto tg3_tso_bug_end;
4999
5000 do {
5001 nskb = segs;
5002 segs = segs->next;
5003 nskb->next = NULL;
5004 tg3_start_xmit_dma_bug(nskb, tp->dev);
5005 } while (segs);
5006
5007tg3_tso_bug_end:
5008 dev_kfree_skb(skb);
5009
5010 return NETDEV_TX_OK;
5011}
Michael Chan52c0fd82006-06-29 20:15:54 -07005012
Michael Chan5a6f3072006-03-20 22:28:05 -08005013/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5014 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5015 */
5016static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5017{
5018 struct tg3 *tp = netdev_priv(dev);
Michael Chan5a6f3072006-03-20 22:28:05 -08005019 u32 len, entry, base_flags, mss;
David S. Miller90079ce2008-09-11 04:52:51 -07005020 struct skb_shared_info *sp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005021 int would_hit_hwbug;
David S. Miller90079ce2008-09-11 04:52:51 -07005022 dma_addr_t mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005023
5024 len = skb_headlen(skb);
5025
Michael Chan00b70502006-06-17 21:58:45 -07005026 /* We are running in BH disabled context with netif_tx_lock
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005027 * and TX reclaim runs via tp->napi.poll inside of a software
David S. Millerf47c11e2005-06-24 20:18:35 -07005028 * interrupt. Furthermore, IRQ processing runs lockless so we have
5029 * no IRQ context deadlocks to worry about either. Rejoice!
Linus Torvalds1da177e2005-04-16 15:20:36 -07005030 */
Michael Chan1b2a7202006-08-07 21:46:02 -07005031 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
Stephen Hemminger1f064a82005-12-06 17:36:44 -08005032 if (!netif_queue_stopped(dev)) {
5033 netif_stop_queue(dev);
5034
5035 /* This is a hard error, log it. */
5036 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5037 "queue awake!\n", dev->name);
5038 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005039 return NETDEV_TX_BUSY;
5040 }
5041
5042 entry = tp->tx_prod;
5043 base_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07005044 if (skb->ip_summed == CHECKSUM_PARTIAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005045 base_flags |= TXD_FLAG_TCPUDP_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005046 mss = 0;
Matt Carlsonc13e3712007-05-05 11:50:04 -07005047 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005048 struct iphdr *iph;
Michael Chan52c0fd82006-06-29 20:15:54 -07005049 int tcp_opt_len, ip_tcp_len, hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005050
5051 if (skb_header_cloned(skb) &&
5052 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5053 dev_kfree_skb(skb);
5054 goto out_unlock;
5055 }
5056
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07005057 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -03005058 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005059
Michael Chan52c0fd82006-06-29 20:15:54 -07005060 hdr_len = ip_tcp_len + tcp_opt_len;
5061 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
Michael Chan7f62ad52007-02-20 23:25:40 -08005062 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
Michael Chan52c0fd82006-06-29 20:15:54 -07005063 return (tg3_tso_bug(tp, skb));
5064
Linus Torvalds1da177e2005-04-16 15:20:36 -07005065 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5066 TXD_FLAG_CPU_POST_DMA);
5067
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005068 iph = ip_hdr(skb);
5069 iph->check = 0;
5070 iph->tot_len = htons(mss + hdr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005071 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07005072 tcp_hdr(skb)->check = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005073 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07005074 } else
5075 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5076 iph->daddr, 0,
5077 IPPROTO_TCP,
5078 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005079
5080 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
5081 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005082 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005083 int tsflags;
5084
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005085 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005086 mss |= (tsflags << 11);
5087 }
5088 } else {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005089 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005090 int tsflags;
5091
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005092 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005093 base_flags |= tsflags << 12;
5094 }
5095 }
5096 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005097#if TG3_VLAN_TAG_USED
5098 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5099 base_flags |= (TXD_FLAG_VLAN |
5100 (vlan_tx_tag_get(skb) << 16));
5101#endif
5102
David S. Miller90079ce2008-09-11 04:52:51 -07005103 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5104 dev_kfree_skb(skb);
5105 goto out_unlock;
5106 }
5107
5108 sp = skb_shinfo(skb);
5109
5110 mapping = sp->dma_maps[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -07005111
5112 tp->tx_buffers[entry].skb = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005113
5114 would_hit_hwbug = 0;
5115
Matt Carlson41588ba2008-04-19 18:12:33 -07005116 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5117 would_hit_hwbug = 1;
5118 else if (tg3_4g_overflow_test(mapping, len))
Michael Chanc58ec932005-09-17 00:46:27 -07005119 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005120
5121 tg3_set_txd(tp, entry, mapping, len, base_flags,
5122 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5123
5124 entry = NEXT_TX(entry);
5125
5126 /* Now loop through additional data fragments, and queue them. */
5127 if (skb_shinfo(skb)->nr_frags > 0) {
5128 unsigned int i, last;
5129
5130 last = skb_shinfo(skb)->nr_frags - 1;
5131 for (i = 0; i <= last; i++) {
5132 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5133
5134 len = frag->size;
David S. Miller90079ce2008-09-11 04:52:51 -07005135 mapping = sp->dma_maps[i + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07005136
5137 tp->tx_buffers[entry].skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005138
Michael Chanc58ec932005-09-17 00:46:27 -07005139 if (tg3_4g_overflow_test(mapping, len))
5140 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005141
Michael Chan72f2afb2006-03-06 19:28:35 -08005142 if (tg3_40bit_overflow_test(tp, mapping, len))
5143 would_hit_hwbug = 1;
5144
Linus Torvalds1da177e2005-04-16 15:20:36 -07005145 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5146 tg3_set_txd(tp, entry, mapping, len,
5147 base_flags, (i == last)|(mss << 1));
5148 else
5149 tg3_set_txd(tp, entry, mapping, len,
5150 base_flags, (i == last));
5151
5152 entry = NEXT_TX(entry);
5153 }
5154 }
5155
5156 if (would_hit_hwbug) {
5157 u32 last_plus_one = entry;
5158 u32 start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005159
Michael Chanc58ec932005-09-17 00:46:27 -07005160 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5161 start &= (TG3_TX_RING_SIZE - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005162
5163 /* If the workaround fails due to memory/mapping
5164 * failure, silently drop this packet.
5165 */
Michael Chan72f2afb2006-03-06 19:28:35 -08005166 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
Michael Chanc58ec932005-09-17 00:46:27 -07005167 &start, base_flags, mss))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005168 goto out_unlock;
5169
5170 entry = start;
5171 }
5172
5173 /* Packets are ready, update Tx producer idx local and on card. */
5174 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5175
5176 tp->tx_prod = entry;
Michael Chan1b2a7202006-08-07 21:46:02 -07005177 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005178 netif_stop_queue(dev);
Ranjit Manomohan42952232006-10-18 20:54:26 -07005179 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
Michael Chan51b91462005-09-01 17:41:28 -07005180 netif_wake_queue(tp->dev);
5181 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005182
5183out_unlock:
5184 mmiowb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005185
5186 dev->trans_start = jiffies;
5187
5188 return NETDEV_TX_OK;
5189}
5190
5191static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5192 int new_mtu)
5193{
5194 dev->mtu = new_mtu;
5195
Michael Chanef7f5ec2005-07-25 12:32:25 -07005196 if (new_mtu > ETH_DATA_LEN) {
Michael Chana4e2b342005-10-26 15:46:52 -07005197 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chanef7f5ec2005-07-25 12:32:25 -07005198 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5199 ethtool_op_set_tso(dev, 0);
5200 }
5201 else
5202 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5203 } else {
Michael Chana4e2b342005-10-26 15:46:52 -07005204 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chanef7f5ec2005-07-25 12:32:25 -07005205 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
Michael Chan0f893dc2005-07-25 12:30:38 -07005206 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
Michael Chanef7f5ec2005-07-25 12:32:25 -07005207 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005208}
5209
5210static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5211{
5212 struct tg3 *tp = netdev_priv(dev);
Michael Chanb9ec6c12006-07-25 16:37:27 -07005213 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005214
5215 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5216 return -EINVAL;
5217
5218 if (!netif_running(dev)) {
5219 /* We'll just catch it later when the
5220 * device is up'd.
5221 */
5222 tg3_set_mtu(dev, tp, new_mtu);
5223 return 0;
5224 }
5225
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07005226 tg3_phy_stop(tp);
5227
Linus Torvalds1da177e2005-04-16 15:20:36 -07005228 tg3_netif_stop(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07005229
5230 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005231
Michael Chan944d9802005-05-29 14:57:48 -07005232 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005233
5234 tg3_set_mtu(dev, tp, new_mtu);
5235
Michael Chanb9ec6c12006-07-25 16:37:27 -07005236 err = tg3_restart_hw(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005237
Michael Chanb9ec6c12006-07-25 16:37:27 -07005238 if (!err)
5239 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005240
David S. Millerf47c11e2005-06-24 20:18:35 -07005241 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005242
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07005243 if (!err)
5244 tg3_phy_start(tp);
5245
Michael Chanb9ec6c12006-07-25 16:37:27 -07005246 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005247}
5248
5249/* Free up pending packets in all rx/tx rings.
5250 *
5251 * The chip has been shut down and the driver detached from
5252 * the networking, so no interrupts or new tx packets will
5253 * end up in the driver. tp->{tx,}lock is not held and we are not
5254 * in an interrupt context and thus may sleep.
5255 */
5256static void tg3_free_rings(struct tg3 *tp)
5257{
5258 struct ring_info *rxp;
5259 int i;
5260
5261 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5262 rxp = &tp->rx_std_buffers[i];
5263
5264 if (rxp->skb == NULL)
5265 continue;
5266 pci_unmap_single(tp->pdev,
5267 pci_unmap_addr(rxp, mapping),
Michael Chan7e72aad2005-07-25 12:31:17 -07005268 tp->rx_pkt_buf_sz - tp->rx_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005269 PCI_DMA_FROMDEVICE);
5270 dev_kfree_skb_any(rxp->skb);
5271 rxp->skb = NULL;
5272 }
5273
5274 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5275 rxp = &tp->rx_jumbo_buffers[i];
5276
5277 if (rxp->skb == NULL)
5278 continue;
5279 pci_unmap_single(tp->pdev,
5280 pci_unmap_addr(rxp, mapping),
5281 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5282 PCI_DMA_FROMDEVICE);
5283 dev_kfree_skb_any(rxp->skb);
5284 rxp->skb = NULL;
5285 }
5286
5287 for (i = 0; i < TG3_TX_RING_SIZE; ) {
5288 struct tx_ring_info *txp;
5289 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005290
5291 txp = &tp->tx_buffers[i];
5292 skb = txp->skb;
5293
5294 if (skb == NULL) {
5295 i++;
5296 continue;
5297 }
5298
David S. Miller90079ce2008-09-11 04:52:51 -07005299 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5300
Linus Torvalds1da177e2005-04-16 15:20:36 -07005301 txp->skb = NULL;
5302
David S. Miller90079ce2008-09-11 04:52:51 -07005303 i += skb_shinfo(skb)->nr_frags + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005304
5305 dev_kfree_skb_any(skb);
5306 }
5307}
5308
5309/* Initialize tx/rx rings for packet processing.
5310 *
5311 * The chip has been shut down and the driver detached from
5312 * the networking, so no interrupts or new tx packets will
5313 * end up in the driver. tp->{tx,}lock are held and thus
5314 * we may not sleep.
5315 */
Michael Chan32d8c572006-07-25 16:38:29 -07005316static int tg3_init_rings(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005317{
5318 u32 i;
5319
5320 /* Free up all the SKBs. */
5321 tg3_free_rings(tp);
5322
5323 /* Zero out all descriptors. */
5324 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5325 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5326 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5327 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5328
Michael Chan7e72aad2005-07-25 12:31:17 -07005329 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
Michael Chana4e2b342005-10-26 15:46:52 -07005330 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
Michael Chan7e72aad2005-07-25 12:31:17 -07005331 (tp->dev->mtu > ETH_DATA_LEN))
5332 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5333
Linus Torvalds1da177e2005-04-16 15:20:36 -07005334 /* Initialize invariants of the rings, we only set this
5335 * stuff once. This works because the card does not
5336 * write into the rx buffer posting rings.
5337 */
5338 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5339 struct tg3_rx_buffer_desc *rxd;
5340
5341 rxd = &tp->rx_std[i];
Michael Chan7e72aad2005-07-25 12:31:17 -07005342 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005343 << RXD_LEN_SHIFT;
5344 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5345 rxd->opaque = (RXD_OPAQUE_RING_STD |
5346 (i << RXD_OPAQUE_INDEX_SHIFT));
5347 }
5348
Michael Chan0f893dc2005-07-25 12:30:38 -07005349 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005350 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5351 struct tg3_rx_buffer_desc *rxd;
5352
5353 rxd = &tp->rx_jumbo[i];
5354 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5355 << RXD_LEN_SHIFT;
5356 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5357 RXD_FLAG_JUMBO;
5358 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5359 (i << RXD_OPAQUE_INDEX_SHIFT));
5360 }
5361 }
5362
5363 /* Now allocate fresh SKBs for each rx ring. */
5364 for (i = 0; i < tp->rx_pending; i++) {
Michael Chan32d8c572006-07-25 16:38:29 -07005365 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5366 printk(KERN_WARNING PFX
5367 "%s: Using a smaller RX standard ring, "
5368 "only %d out of %d buffers were allocated "
5369 "successfully.\n",
5370 tp->dev->name, i, tp->rx_pending);
5371 if (i == 0)
5372 return -ENOMEM;
5373 tp->rx_pending = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005374 break;
Michael Chan32d8c572006-07-25 16:38:29 -07005375 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005376 }
5377
Michael Chan0f893dc2005-07-25 12:30:38 -07005378 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005379 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5380 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
Michael Chan32d8c572006-07-25 16:38:29 -07005381 -1, i) < 0) {
5382 printk(KERN_WARNING PFX
5383 "%s: Using a smaller RX jumbo ring, "
5384 "only %d out of %d buffers were "
5385 "allocated successfully.\n",
5386 tp->dev->name, i, tp->rx_jumbo_pending);
5387 if (i == 0) {
5388 tg3_free_rings(tp);
5389 return -ENOMEM;
5390 }
5391 tp->rx_jumbo_pending = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005392 break;
Michael Chan32d8c572006-07-25 16:38:29 -07005393 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005394 }
5395 }
Michael Chan32d8c572006-07-25 16:38:29 -07005396 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005397}
5398
5399/*
5400 * Must not be invoked with interrupt sources disabled and
5401 * the hardware shutdown down.
5402 */
5403static void tg3_free_consistent(struct tg3 *tp)
5404{
Jesper Juhlb4558ea2005-10-28 16:53:13 -04005405 kfree(tp->rx_std_buffers);
5406 tp->rx_std_buffers = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005407 if (tp->rx_std) {
5408 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5409 tp->rx_std, tp->rx_std_mapping);
5410 tp->rx_std = NULL;
5411 }
5412 if (tp->rx_jumbo) {
5413 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5414 tp->rx_jumbo, tp->rx_jumbo_mapping);
5415 tp->rx_jumbo = NULL;
5416 }
5417 if (tp->rx_rcb) {
5418 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5419 tp->rx_rcb, tp->rx_rcb_mapping);
5420 tp->rx_rcb = NULL;
5421 }
5422 if (tp->tx_ring) {
5423 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5424 tp->tx_ring, tp->tx_desc_mapping);
5425 tp->tx_ring = NULL;
5426 }
5427 if (tp->hw_status) {
5428 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5429 tp->hw_status, tp->status_mapping);
5430 tp->hw_status = NULL;
5431 }
5432 if (tp->hw_stats) {
5433 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5434 tp->hw_stats, tp->stats_mapping);
5435 tp->hw_stats = NULL;
5436 }
5437}
5438
5439/*
5440 * Must not be invoked with interrupt sources disabled and
5441 * the hardware shutdown down. Can sleep.
5442 */
5443static int tg3_alloc_consistent(struct tg3 *tp)
5444{
Yan Burmanbd2b3342006-12-14 15:25:00 -08005445 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005446 (TG3_RX_RING_SIZE +
5447 TG3_RX_JUMBO_RING_SIZE)) +
5448 (sizeof(struct tx_ring_info) *
5449 TG3_TX_RING_SIZE),
5450 GFP_KERNEL);
5451 if (!tp->rx_std_buffers)
5452 return -ENOMEM;
5453
Linus Torvalds1da177e2005-04-16 15:20:36 -07005454 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5455 tp->tx_buffers = (struct tx_ring_info *)
5456 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5457
5458 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5459 &tp->rx_std_mapping);
5460 if (!tp->rx_std)
5461 goto err_out;
5462
5463 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5464 &tp->rx_jumbo_mapping);
5465
5466 if (!tp->rx_jumbo)
5467 goto err_out;
5468
5469 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5470 &tp->rx_rcb_mapping);
5471 if (!tp->rx_rcb)
5472 goto err_out;
5473
5474 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5475 &tp->tx_desc_mapping);
5476 if (!tp->tx_ring)
5477 goto err_out;
5478
5479 tp->hw_status = pci_alloc_consistent(tp->pdev,
5480 TG3_HW_STATUS_SIZE,
5481 &tp->status_mapping);
5482 if (!tp->hw_status)
5483 goto err_out;
5484
5485 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5486 sizeof(struct tg3_hw_stats),
5487 &tp->stats_mapping);
5488 if (!tp->hw_stats)
5489 goto err_out;
5490
5491 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5492 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5493
5494 return 0;
5495
5496err_out:
5497 tg3_free_consistent(tp);
5498 return -ENOMEM;
5499}
5500
5501#define MAX_WAIT_CNT 1000
5502
5503/* To stop a block, clear the enable bit and poll till it
5504 * clears. tp->lock is held.
5505 */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005506static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005507{
5508 unsigned int i;
5509 u32 val;
5510
5511 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5512 switch (ofs) {
5513 case RCVLSC_MODE:
5514 case DMAC_MODE:
5515 case MBFREE_MODE:
5516 case BUFMGR_MODE:
5517 case MEMARB_MODE:
5518 /* We can't enable/disable these bits of the
5519 * 5705/5750, just say success.
5520 */
5521 return 0;
5522
5523 default:
5524 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005525 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005526 }
5527
5528 val = tr32(ofs);
5529 val &= ~enable_bit;
5530 tw32_f(ofs, val);
5531
5532 for (i = 0; i < MAX_WAIT_CNT; i++) {
5533 udelay(100);
5534 val = tr32(ofs);
5535 if ((val & enable_bit) == 0)
5536 break;
5537 }
5538
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005539 if (i == MAX_WAIT_CNT && !silent) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005540 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5541 "ofs=%lx enable_bit=%x\n",
5542 ofs, enable_bit);
5543 return -ENODEV;
5544 }
5545
5546 return 0;
5547}
5548
5549/* tp->lock is held. */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005550static int tg3_abort_hw(struct tg3 *tp, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005551{
5552 int i, err;
5553
5554 tg3_disable_ints(tp);
5555
5556 tp->rx_mode &= ~RX_MODE_ENABLE;
5557 tw32_f(MAC_RX_MODE, tp->rx_mode);
5558 udelay(10);
5559
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005560 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5561 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5562 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5563 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5564 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5565 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005566
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005567 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5568 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5569 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5570 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5571 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5572 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5573 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005574
5575 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5576 tw32_f(MAC_MODE, tp->mac_mode);
5577 udelay(40);
5578
5579 tp->tx_mode &= ~TX_MODE_ENABLE;
5580 tw32_f(MAC_TX_MODE, tp->tx_mode);
5581
5582 for (i = 0; i < MAX_WAIT_CNT; i++) {
5583 udelay(100);
5584 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5585 break;
5586 }
5587 if (i >= MAX_WAIT_CNT) {
5588 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5589 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5590 tp->dev->name, tr32(MAC_TX_MODE));
Michael Chane6de8ad2005-05-05 14:42:41 -07005591 err |= -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005592 }
5593
Michael Chane6de8ad2005-05-05 14:42:41 -07005594 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005595 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5596 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005597
5598 tw32(FTQ_RESET, 0xffffffff);
5599 tw32(FTQ_RESET, 0x00000000);
5600
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005601 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5602 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005603
5604 if (tp->hw_status)
5605 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5606 if (tp->hw_stats)
5607 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5608
Linus Torvalds1da177e2005-04-16 15:20:36 -07005609 return err;
5610}
5611
5612/* tp->lock is held. */
5613static int tg3_nvram_lock(struct tg3 *tp)
5614{
5615 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5616 int i;
5617
Michael Chanec41c7d2006-01-17 02:40:55 -08005618 if (tp->nvram_lock_cnt == 0) {
5619 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5620 for (i = 0; i < 8000; i++) {
5621 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5622 break;
5623 udelay(20);
5624 }
5625 if (i == 8000) {
5626 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5627 return -ENODEV;
5628 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005629 }
Michael Chanec41c7d2006-01-17 02:40:55 -08005630 tp->nvram_lock_cnt++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005631 }
5632 return 0;
5633}
5634
5635/* tp->lock is held. */
5636static void tg3_nvram_unlock(struct tg3 *tp)
5637{
Michael Chanec41c7d2006-01-17 02:40:55 -08005638 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5639 if (tp->nvram_lock_cnt > 0)
5640 tp->nvram_lock_cnt--;
5641 if (tp->nvram_lock_cnt == 0)
5642 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5643 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005644}
5645
5646/* tp->lock is held. */
Michael Chane6af3012005-04-21 17:12:05 -07005647static void tg3_enable_nvram_access(struct tg3 *tp)
5648{
5649 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5650 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5651 u32 nvaccess = tr32(NVRAM_ACCESS);
5652
5653 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5654 }
5655}
5656
5657/* tp->lock is held. */
5658static void tg3_disable_nvram_access(struct tg3 *tp)
5659{
5660 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5661 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5662 u32 nvaccess = tr32(NVRAM_ACCESS);
5663
5664 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5665 }
5666}
5667
Matt Carlson0d3031d2007-10-10 18:02:43 -07005668static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5669{
5670 int i;
5671 u32 apedata;
5672
5673 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5674 if (apedata != APE_SEG_SIG_MAGIC)
5675 return;
5676
5677 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
Matt Carlson731fd792008-08-15 14:07:51 -07005678 if (!(apedata & APE_FW_STATUS_READY))
Matt Carlson0d3031d2007-10-10 18:02:43 -07005679 return;
5680
5681 /* Wait for up to 1 millisecond for APE to service previous event. */
5682 for (i = 0; i < 10; i++) {
5683 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5684 return;
5685
5686 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5687
5688 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5689 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5690 event | APE_EVENT_STATUS_EVENT_PENDING);
5691
5692 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5693
5694 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5695 break;
5696
5697 udelay(100);
5698 }
5699
5700 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5701 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5702}
5703
5704static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5705{
5706 u32 event;
5707 u32 apedata;
5708
5709 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5710 return;
5711
5712 switch (kind) {
5713 case RESET_KIND_INIT:
5714 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5715 APE_HOST_SEG_SIG_MAGIC);
5716 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5717 APE_HOST_SEG_LEN_MAGIC);
5718 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5719 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5720 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5721 APE_HOST_DRIVER_ID_MAGIC);
5722 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5723 APE_HOST_BEHAV_NO_PHYLOCK);
5724
5725 event = APE_EVENT_STATUS_STATE_START;
5726 break;
5727 case RESET_KIND_SHUTDOWN:
Matt Carlsonb2aee152008-11-03 16:51:11 -08005728 /* With the interface we are currently using,
5729 * APE does not track driver state. Wiping
5730 * out the HOST SEGMENT SIGNATURE forces
5731 * the APE to assume OS absent status.
5732 */
5733 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
5734
Matt Carlson0d3031d2007-10-10 18:02:43 -07005735 event = APE_EVENT_STATUS_STATE_UNLOAD;
5736 break;
5737 case RESET_KIND_SUSPEND:
5738 event = APE_EVENT_STATUS_STATE_SUSPEND;
5739 break;
5740 default:
5741 return;
5742 }
5743
5744 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5745
5746 tg3_ape_send_event(tp, event);
5747}
5748
Michael Chane6af3012005-04-21 17:12:05 -07005749/* tp->lock is held. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005750static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5751{
David S. Millerf49639e2006-06-09 11:58:36 -07005752 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5753 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005754
5755 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5756 switch (kind) {
5757 case RESET_KIND_INIT:
5758 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5759 DRV_STATE_START);
5760 break;
5761
5762 case RESET_KIND_SHUTDOWN:
5763 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5764 DRV_STATE_UNLOAD);
5765 break;
5766
5767 case RESET_KIND_SUSPEND:
5768 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5769 DRV_STATE_SUSPEND);
5770 break;
5771
5772 default:
5773 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005774 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005775 }
Matt Carlson0d3031d2007-10-10 18:02:43 -07005776
5777 if (kind == RESET_KIND_INIT ||
5778 kind == RESET_KIND_SUSPEND)
5779 tg3_ape_driver_state_change(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005780}
5781
5782/* tp->lock is held. */
5783static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5784{
5785 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5786 switch (kind) {
5787 case RESET_KIND_INIT:
5788 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5789 DRV_STATE_START_DONE);
5790 break;
5791
5792 case RESET_KIND_SHUTDOWN:
5793 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5794 DRV_STATE_UNLOAD_DONE);
5795 break;
5796
5797 default:
5798 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005799 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005800 }
Matt Carlson0d3031d2007-10-10 18:02:43 -07005801
5802 if (kind == RESET_KIND_SHUTDOWN)
5803 tg3_ape_driver_state_change(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005804}
5805
5806/* tp->lock is held. */
5807static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5808{
5809 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5810 switch (kind) {
5811 case RESET_KIND_INIT:
5812 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5813 DRV_STATE_START);
5814 break;
5815
5816 case RESET_KIND_SHUTDOWN:
5817 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5818 DRV_STATE_UNLOAD);
5819 break;
5820
5821 case RESET_KIND_SUSPEND:
5822 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5823 DRV_STATE_SUSPEND);
5824 break;
5825
5826 default:
5827 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005828 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005829 }
5830}
5831
Michael Chan7a6f4362006-09-27 16:03:31 -07005832static int tg3_poll_fw(struct tg3 *tp)
5833{
5834 int i;
5835 u32 val;
5836
Michael Chanb5d37722006-09-27 16:06:21 -07005837 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Gary Zambrano0ccead12006-11-14 16:34:00 -08005838 /* Wait up to 20ms for init done. */
5839 for (i = 0; i < 200; i++) {
Michael Chanb5d37722006-09-27 16:06:21 -07005840 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5841 return 0;
Gary Zambrano0ccead12006-11-14 16:34:00 -08005842 udelay(100);
Michael Chanb5d37722006-09-27 16:06:21 -07005843 }
5844 return -ENODEV;
5845 }
5846
Michael Chan7a6f4362006-09-27 16:03:31 -07005847 /* Wait for firmware initialization to complete. */
5848 for (i = 0; i < 100000; i++) {
5849 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5850 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5851 break;
5852 udelay(10);
5853 }
5854
5855 /* Chip might not be fitted with firmware. Some Sun onboard
5856 * parts are configured like that. So don't signal the timeout
5857 * of the above loop as an error, but do report the lack of
5858 * running firmware once.
5859 */
5860 if (i >= 100000 &&
5861 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5862 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5863
5864 printk(KERN_INFO PFX "%s: No firmware running.\n",
5865 tp->dev->name);
5866 }
5867
5868 return 0;
5869}
5870
Michael Chanee6a99b2007-07-18 21:49:10 -07005871/* Save PCI command register before chip reset */
5872static void tg3_save_pci_state(struct tg3 *tp)
5873{
Matt Carlson8a6eac92007-10-21 16:17:55 -07005874 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07005875}
5876
5877/* Restore PCI state after chip reset */
5878static void tg3_restore_pci_state(struct tg3 *tp)
5879{
5880 u32 val;
5881
5882 /* Re-enable indirect register accesses. */
5883 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5884 tp->misc_host_ctrl);
5885
5886 /* Set MAX PCI retry to zero. */
5887 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5888 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5889 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5890 val |= PCISTATE_RETRY_SAME_DMA;
Matt Carlson0d3031d2007-10-10 18:02:43 -07005891 /* Allow reads and writes to the APE register and memory space. */
5892 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5893 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5894 PCISTATE_ALLOW_APE_SHMEM_WR;
Michael Chanee6a99b2007-07-18 21:49:10 -07005895 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5896
Matt Carlson8a6eac92007-10-21 16:17:55 -07005897 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07005898
Matt Carlsonfcb389d2008-11-03 16:55:44 -08005899 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
5900 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5901 pcie_set_readrq(tp->pdev, 4096);
5902 else {
5903 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5904 tp->pci_cacheline_sz);
5905 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5906 tp->pci_lat_timer);
5907 }
Michael Chan114342f2007-10-15 02:12:26 -07005908 }
Matt Carlson5f5c51e2007-11-12 21:19:37 -08005909
Michael Chanee6a99b2007-07-18 21:49:10 -07005910 /* Make sure PCI-X relaxed ordering bit is clear. */
Matt Carlson52f44902008-11-21 17:17:04 -08005911 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
Matt Carlson9974a352007-10-07 23:27:28 -07005912 u16 pcix_cmd;
5913
5914 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5915 &pcix_cmd);
5916 pcix_cmd &= ~PCI_X_CMD_ERO;
5917 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5918 pcix_cmd);
5919 }
Michael Chanee6a99b2007-07-18 21:49:10 -07005920
5921 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chanee6a99b2007-07-18 21:49:10 -07005922
5923 /* Chip reset on 5780 will reset MSI enable bit,
5924 * so need to restore it.
5925 */
5926 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5927 u16 ctrl;
5928
5929 pci_read_config_word(tp->pdev,
5930 tp->msi_cap + PCI_MSI_FLAGS,
5931 &ctrl);
5932 pci_write_config_word(tp->pdev,
5933 tp->msi_cap + PCI_MSI_FLAGS,
5934 ctrl | PCI_MSI_FLAGS_ENABLE);
5935 val = tr32(MSGINT_MODE);
5936 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5937 }
5938 }
5939}
5940
Linus Torvalds1da177e2005-04-16 15:20:36 -07005941static void tg3_stop_fw(struct tg3 *);
5942
5943/* tp->lock is held. */
5944static int tg3_chip_reset(struct tg3 *tp)
5945{
5946 u32 val;
Michael Chan1ee582d2005-08-09 20:16:46 -07005947 void (*write_op)(struct tg3 *, u32, u32);
Michael Chan7a6f4362006-09-27 16:03:31 -07005948 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005949
David S. Millerf49639e2006-06-09 11:58:36 -07005950 tg3_nvram_lock(tp);
5951
Matt Carlson158d7ab2008-05-29 01:37:54 -07005952 tg3_mdio_stop(tp);
5953
Matt Carlson77b483f2008-08-15 14:07:24 -07005954 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5955
David S. Millerf49639e2006-06-09 11:58:36 -07005956 /* No matching tg3_nvram_unlock() after this because
5957 * chip reset below will undo the nvram lock.
5958 */
5959 tp->nvram_lock_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005960
Michael Chanee6a99b2007-07-18 21:49:10 -07005961 /* GRC_MISC_CFG core clock reset will clear the memory
5962 * enable bit in PCI register 4 and the MSI enable bit
5963 * on some chips, so we save relevant registers here.
5964 */
5965 tg3_save_pci_state(tp);
5966
Michael Chand9ab5ad2006-03-20 22:27:35 -08005967 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -08005968 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07005969 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07005970 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -07005971 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5972 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chand9ab5ad2006-03-20 22:27:35 -08005973 tw32(GRC_FASTBOOT_PC, 0);
5974
Linus Torvalds1da177e2005-04-16 15:20:36 -07005975 /*
5976 * We must avoid the readl() that normally takes place.
5977 * It locks machines, causes machine checks, and other
5978 * fun things. So, temporarily disable the 5701
5979 * hardware workaround, while we do the reset.
5980 */
Michael Chan1ee582d2005-08-09 20:16:46 -07005981 write_op = tp->write32;
5982 if (write_op == tg3_write_flush_reg32)
5983 tp->write32 = tg3_write32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005984
Michael Chand18edcb2007-03-24 20:57:11 -07005985 /* Prevent the irq handler from reading or writing PCI registers
5986 * during chip reset when the memory enable bit in the PCI command
5987 * register may be cleared. The chip does not generate interrupt
5988 * at this time, but the irq handler may still be called due to irq
5989 * sharing or irqpoll.
5990 */
5991 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
Michael Chanb8fa2f32007-04-06 17:35:37 -07005992 if (tp->hw_status) {
5993 tp->hw_status->status = 0;
5994 tp->hw_status->status_tag = 0;
5995 }
Michael Chand18edcb2007-03-24 20:57:11 -07005996 tp->last_tag = 0;
5997 smp_mb();
5998 synchronize_irq(tp->pdev->irq);
5999
Linus Torvalds1da177e2005-04-16 15:20:36 -07006000 /* do the reset */
6001 val = GRC_MISC_CFG_CORECLK_RESET;
6002
6003 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6004 if (tr32(0x7e2c) == 0x60) {
6005 tw32(0x7e2c, 0x20);
6006 }
6007 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6008 tw32(GRC_MISC_CFG, (1 << 29));
6009 val |= (1 << 29);
6010 }
6011 }
6012
Michael Chanb5d37722006-09-27 16:06:21 -07006013 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6014 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
6015 tw32(GRC_VCPU_EXT_CTRL,
6016 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6017 }
6018
Linus Torvalds1da177e2005-04-16 15:20:36 -07006019 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6020 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6021 tw32(GRC_MISC_CFG, val);
6022
Michael Chan1ee582d2005-08-09 20:16:46 -07006023 /* restore 5701 hardware bug workaround write method */
6024 tp->write32 = write_op;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006025
6026 /* Unfortunately, we have to delay before the PCI read back.
6027 * Some 575X chips even will not respond to a PCI cfg access
6028 * when the reset command is given to the chip.
6029 *
6030 * How do these hardware designers expect things to work
6031 * properly if the PCI write is posted for a long period
6032 * of time? It is always necessary to have some method by
6033 * which a register read back can occur to push the write
6034 * out which does the reset.
6035 *
6036 * For most tg3 variants the trick below was working.
6037 * Ho hum...
6038 */
6039 udelay(120);
6040
6041 /* Flush PCI posted writes. The normal MMIO registers
6042 * are inaccessible at this time so this is the only
6043 * way to make this reliably (actually, this is no longer
6044 * the case, see above). I tried to use indirect
6045 * register read/write but this upset some 5701 variants.
6046 */
6047 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6048
6049 udelay(120);
6050
Matt Carlson5e7dfd02008-11-21 17:18:16 -08006051 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006052 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6053 int i;
6054 u32 cfg_val;
6055
6056 /* Wait for link training to complete. */
6057 for (i = 0; i < 5000; i++)
6058 udelay(100);
6059
6060 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6061 pci_write_config_dword(tp->pdev, 0xc4,
6062 cfg_val | (1 << 15));
6063 }
Matt Carlson5e7dfd02008-11-21 17:18:16 -08006064
6065 /* Set PCIE max payload size to 128 bytes and
6066 * clear the "no snoop" and "relaxed ordering" bits.
6067 */
6068 pci_write_config_word(tp->pdev,
6069 tp->pcie_cap + PCI_EXP_DEVCTL,
6070 0);
6071
6072 pcie_set_readrq(tp->pdev, 4096);
6073
6074 /* Clear error status */
6075 pci_write_config_word(tp->pdev,
6076 tp->pcie_cap + PCI_EXP_DEVSTA,
6077 PCI_EXP_DEVSTA_CED |
6078 PCI_EXP_DEVSTA_NFED |
6079 PCI_EXP_DEVSTA_FED |
6080 PCI_EXP_DEVSTA_URD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006081 }
6082
Michael Chanee6a99b2007-07-18 21:49:10 -07006083 tg3_restore_pci_state(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006084
Michael Chand18edcb2007-03-24 20:57:11 -07006085 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6086
Michael Chanee6a99b2007-07-18 21:49:10 -07006087 val = 0;
6088 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan4cf78e42005-07-25 12:29:19 -07006089 val = tr32(MEMARB_MODE);
Michael Chanee6a99b2007-07-18 21:49:10 -07006090 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006091
6092 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6093 tg3_stop_fw(tp);
6094 tw32(0x5000, 0x400);
6095 }
6096
6097 tw32(GRC_MODE, tp->grc_mode);
6098
6099 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01006100 val = tr32(0xc4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006101
6102 tw32(0xc4, val | (1 << 15));
6103 }
6104
6105 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6106 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6107 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6108 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6109 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6110 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6111 }
6112
6113 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6114 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6115 tw32_f(MAC_MODE, tp->mac_mode);
Michael Chan747e8f82005-07-25 12:33:22 -07006116 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6117 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6118 tw32_f(MAC_MODE, tp->mac_mode);
Matt Carlson3bda1252008-08-15 14:08:22 -07006119 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6120 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6121 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6122 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6123 tw32_f(MAC_MODE, tp->mac_mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006124 } else
6125 tw32_f(MAC_MODE, 0);
6126 udelay(40);
6127
Matt Carlson158d7ab2008-05-29 01:37:54 -07006128 tg3_mdio_start(tp);
6129
Matt Carlson77b483f2008-08-15 14:07:24 -07006130 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6131
Michael Chan7a6f4362006-09-27 16:03:31 -07006132 err = tg3_poll_fw(tp);
6133 if (err)
6134 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006135
6136 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6137 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01006138 val = tr32(0x7c00);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006139
6140 tw32(0x7c00, val | (1 << 25));
6141 }
6142
6143 /* Reprobe ASF enable state. */
6144 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6145 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6146 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6147 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6148 u32 nic_cfg;
6149
6150 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6151 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6152 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
Matt Carlson4ba526c2008-08-15 14:10:04 -07006153 tp->last_event_jiffies = jiffies;
John W. Linvillecbf46852005-04-21 17:01:29 -07006154 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006155 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6156 }
6157 }
6158
6159 return 0;
6160}
6161
6162/* tp->lock is held. */
6163static void tg3_stop_fw(struct tg3 *tp)
6164{
Matt Carlson0d3031d2007-10-10 18:02:43 -07006165 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6166 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Matt Carlson7c5026a2008-05-02 16:49:29 -07006167 /* Wait for RX cpu to ACK the previous event. */
6168 tg3_wait_for_event_ack(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006169
6170 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
Matt Carlson4ba526c2008-08-15 14:10:04 -07006171
6172 tg3_generate_fw_event(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006173
Matt Carlson7c5026a2008-05-02 16:49:29 -07006174 /* Wait for RX cpu to ACK this event. */
6175 tg3_wait_for_event_ack(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006176 }
6177}
6178
6179/* tp->lock is held. */
Michael Chan944d9802005-05-29 14:57:48 -07006180static int tg3_halt(struct tg3 *tp, int kind, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006181{
6182 int err;
6183
6184 tg3_stop_fw(tp);
6185
Michael Chan944d9802005-05-29 14:57:48 -07006186 tg3_write_sig_pre_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006187
David S. Millerb3b7d6b2005-05-05 14:40:20 -07006188 tg3_abort_hw(tp, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006189 err = tg3_chip_reset(tp);
6190
Michael Chan944d9802005-05-29 14:57:48 -07006191 tg3_write_sig_legacy(tp, kind);
6192 tg3_write_sig_post_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006193
6194 if (err)
6195 return err;
6196
6197 return 0;
6198}
6199
6200#define TG3_FW_RELEASE_MAJOR 0x0
6201#define TG3_FW_RELASE_MINOR 0x0
6202#define TG3_FW_RELEASE_FIX 0x0
6203#define TG3_FW_START_ADDR 0x08000000
6204#define TG3_FW_TEXT_ADDR 0x08000000
6205#define TG3_FW_TEXT_LEN 0x9c0
6206#define TG3_FW_RODATA_ADDR 0x080009c0
6207#define TG3_FW_RODATA_LEN 0x60
6208#define TG3_FW_DATA_ADDR 0x08000a40
6209#define TG3_FW_DATA_LEN 0x20
6210#define TG3_FW_SBSS_ADDR 0x08000a60
6211#define TG3_FW_SBSS_LEN 0xc
6212#define TG3_FW_BSS_ADDR 0x08000a70
6213#define TG3_FW_BSS_LEN 0x10
6214
Andreas Mohr50da8592006-08-14 23:54:30 -07006215static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006216 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6217 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6218 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6219 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6220 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6221 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6222 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6223 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6224 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6225 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6226 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6227 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6228 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6229 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6230 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6231 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6232 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6233 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6234 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6235 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6236 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6237 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6238 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6239 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6240 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6241 0, 0, 0, 0, 0, 0,
6242 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6243 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6244 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6245 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6246 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6247 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6248 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6249 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6250 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6251 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6252 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6253 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6254 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6255 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6256 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6257 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6258 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6259 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6260 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6261 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6262 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6263 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6264 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6265 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6266 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6267 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6268 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6269 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6270 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6271 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6272 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6273 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6274 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6275 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6276 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6277 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6278 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6279 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6280 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6281 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6282 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6283 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6284 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6285 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6286 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6287 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6288 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6289 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6290 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6291 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6292 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6293 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6294 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6295 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6296 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6297 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6298 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6299 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6300 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6301 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6302 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6303 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6304 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6305 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6306 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6307};
6308
Andreas Mohr50da8592006-08-14 23:54:30 -07006309static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006310 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6311 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6312 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6313 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6314 0x00000000
6315};
6316
6317#if 0 /* All zeros, don't eat up space with it. */
6318u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6319 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6320 0x00000000, 0x00000000, 0x00000000, 0x00000000
6321};
6322#endif
6323
6324#define RX_CPU_SCRATCH_BASE 0x30000
6325#define RX_CPU_SCRATCH_SIZE 0x04000
6326#define TX_CPU_SCRATCH_BASE 0x34000
6327#define TX_CPU_SCRATCH_SIZE 0x04000
6328
6329/* tp->lock is held. */
6330static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6331{
6332 int i;
6333
Eric Sesterhenn5d9428d2006-04-02 13:52:48 +02006334 BUG_ON(offset == TX_CPU_BASE &&
6335 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006336
Michael Chanb5d37722006-09-27 16:06:21 -07006337 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6338 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6339
6340 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6341 return 0;
6342 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006343 if (offset == RX_CPU_BASE) {
6344 for (i = 0; i < 10000; i++) {
6345 tw32(offset + CPU_STATE, 0xffffffff);
6346 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6347 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6348 break;
6349 }
6350
6351 tw32(offset + CPU_STATE, 0xffffffff);
6352 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
6353 udelay(10);
6354 } else {
6355 for (i = 0; i < 10000; i++) {
6356 tw32(offset + CPU_STATE, 0xffffffff);
6357 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6358 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6359 break;
6360 }
6361 }
6362
6363 if (i >= 10000) {
6364 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6365 "and %s CPU\n",
6366 tp->dev->name,
6367 (offset == RX_CPU_BASE ? "RX" : "TX"));
6368 return -ENODEV;
6369 }
Michael Chanec41c7d2006-01-17 02:40:55 -08006370
6371 /* Clear firmware's nvram arbitration. */
6372 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6373 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006374 return 0;
6375}
6376
6377struct fw_info {
6378 unsigned int text_base;
6379 unsigned int text_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006380 const u32 *text_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006381 unsigned int rodata_base;
6382 unsigned int rodata_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006383 const u32 *rodata_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006384 unsigned int data_base;
6385 unsigned int data_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006386 const u32 *data_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006387};
6388
6389/* tp->lock is held. */
6390static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6391 int cpu_scratch_size, struct fw_info *info)
6392{
Michael Chanec41c7d2006-01-17 02:40:55 -08006393 int err, lock_err, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006394 void (*write_op)(struct tg3 *, u32, u32);
6395
6396 if (cpu_base == TX_CPU_BASE &&
6397 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6398 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6399 "TX cpu firmware on %s which is 5705.\n",
6400 tp->dev->name);
6401 return -EINVAL;
6402 }
6403
6404 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6405 write_op = tg3_write_mem;
6406 else
6407 write_op = tg3_write_indirect_reg32;
6408
Michael Chan1b628152005-05-29 14:59:49 -07006409 /* It is possible that bootcode is still loading at this point.
6410 * Get the nvram lock first before halting the cpu.
6411 */
Michael Chanec41c7d2006-01-17 02:40:55 -08006412 lock_err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006413 err = tg3_halt_cpu(tp, cpu_base);
Michael Chanec41c7d2006-01-17 02:40:55 -08006414 if (!lock_err)
6415 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006416 if (err)
6417 goto out;
6418
6419 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6420 write_op(tp, cpu_scratch_base + i, 0);
6421 tw32(cpu_base + CPU_STATE, 0xffffffff);
6422 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6423 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6424 write_op(tp, (cpu_scratch_base +
6425 (info->text_base & 0xffff) +
6426 (i * sizeof(u32))),
6427 (info->text_data ?
6428 info->text_data[i] : 0));
6429 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6430 write_op(tp, (cpu_scratch_base +
6431 (info->rodata_base & 0xffff) +
6432 (i * sizeof(u32))),
6433 (info->rodata_data ?
6434 info->rodata_data[i] : 0));
6435 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6436 write_op(tp, (cpu_scratch_base +
6437 (info->data_base & 0xffff) +
6438 (i * sizeof(u32))),
6439 (info->data_data ?
6440 info->data_data[i] : 0));
6441
6442 err = 0;
6443
6444out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006445 return err;
6446}
6447
6448/* tp->lock is held. */
6449static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6450{
6451 struct fw_info info;
6452 int err, i;
6453
6454 info.text_base = TG3_FW_TEXT_ADDR;
6455 info.text_len = TG3_FW_TEXT_LEN;
6456 info.text_data = &tg3FwText[0];
6457 info.rodata_base = TG3_FW_RODATA_ADDR;
6458 info.rodata_len = TG3_FW_RODATA_LEN;
6459 info.rodata_data = &tg3FwRodata[0];
6460 info.data_base = TG3_FW_DATA_ADDR;
6461 info.data_len = TG3_FW_DATA_LEN;
6462 info.data_data = NULL;
6463
6464 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6465 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6466 &info);
6467 if (err)
6468 return err;
6469
6470 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6471 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6472 &info);
6473 if (err)
6474 return err;
6475
6476 /* Now startup only the RX cpu. */
6477 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6478 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6479
6480 for (i = 0; i < 5; i++) {
6481 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6482 break;
6483 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6484 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
6485 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6486 udelay(1000);
6487 }
6488 if (i >= 5) {
6489 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6490 "to set RX CPU PC, is %08x should be %08x\n",
6491 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6492 TG3_FW_TEXT_ADDR);
6493 return -ENODEV;
6494 }
6495 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6496 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
6497
6498 return 0;
6499}
6500
Linus Torvalds1da177e2005-04-16 15:20:36 -07006501
6502#define TG3_TSO_FW_RELEASE_MAJOR 0x1
6503#define TG3_TSO_FW_RELASE_MINOR 0x6
6504#define TG3_TSO_FW_RELEASE_FIX 0x0
6505#define TG3_TSO_FW_START_ADDR 0x08000000
6506#define TG3_TSO_FW_TEXT_ADDR 0x08000000
6507#define TG3_TSO_FW_TEXT_LEN 0x1aa0
6508#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
6509#define TG3_TSO_FW_RODATA_LEN 0x60
6510#define TG3_TSO_FW_DATA_ADDR 0x08001b20
6511#define TG3_TSO_FW_DATA_LEN 0x30
6512#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
6513#define TG3_TSO_FW_SBSS_LEN 0x2c
6514#define TG3_TSO_FW_BSS_ADDR 0x08001b80
6515#define TG3_TSO_FW_BSS_LEN 0x894
6516
Andreas Mohr50da8592006-08-14 23:54:30 -07006517static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006518 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6519 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6520 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6521 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6522 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6523 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6524 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6525 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6526 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6527 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6528 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6529 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6530 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6531 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6532 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6533 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6534 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6535 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6536 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6537 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6538 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6539 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6540 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6541 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6542 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6543 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6544 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6545 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6546 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6547 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6548 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6549 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6550 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6551 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6552 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6553 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6554 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6555 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6556 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6557 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6558 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6559 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6560 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6561 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6562 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6563 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6564 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6565 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6566 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6567 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6568 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6569 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6570 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6571 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6572 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6573 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6574 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6575 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6576 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6577 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6578 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6579 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6580 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6581 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6582 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6583 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6584 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6585 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6586 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6587 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6588 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6589 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6590 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6591 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6592 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6593 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6594 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6595 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6596 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6597 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6598 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6599 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6600 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6601 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6602 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6603 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6604 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6605 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6606 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6607 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6608 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6609 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6610 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6611 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6612 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6613 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6614 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6615 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6616 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6617 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6618 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6619 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6620 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6621 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6622 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6623 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6624 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6625 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6626 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6627 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6628 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6629 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6630 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6631 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6632 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6633 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6634 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6635 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6636 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6637 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6638 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6639 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6640 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6641 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6642 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6643 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6644 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6645 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6646 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6647 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6648 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6649 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6650 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6651 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6652 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6653 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6654 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6655 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6656 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6657 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6658 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6659 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6660 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6661 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6662 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6663 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6664 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6665 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6666 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6667 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6668 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6669 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6670 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6671 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6672 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6673 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6674 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6675 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6676 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6677 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6678 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6679 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6680 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6681 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6682 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6683 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6684 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6685 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6686 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6687 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6688 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6689 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6690 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6691 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6692 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6693 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6694 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6695 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6696 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6697 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6698 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6699 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6700 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6701 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6702 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6703 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6704 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6705 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6706 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6707 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6708 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6709 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6710 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6711 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6712 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6713 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6714 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6715 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6716 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6717 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6718 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6719 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6720 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6721 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6722 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6723 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6724 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6725 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6726 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6727 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6728 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6729 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6730 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6731 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6732 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6733 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6734 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6735 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6736 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6737 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6738 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6739 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6740 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6741 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6742 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6743 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6744 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6745 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6746 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6747 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6748 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6749 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6750 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6751 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6752 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6753 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6754 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6755 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6756 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6757 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6758 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6759 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6760 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6761 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6762 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6763 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6764 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6765 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6766 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6767 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6768 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6769 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6770 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6771 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6772 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6773 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6774 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6775 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6776 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6777 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6778 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6779 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6780 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6781 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6782 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6783 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6784 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6785 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6786 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6787 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6788 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6789 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6790 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6791 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6792 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6793 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6794 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6795 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6796 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6797 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6798 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6799 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6800 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6801 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6802};
6803
Andreas Mohr50da8592006-08-14 23:54:30 -07006804static const u32 tg3TsoFwRodata[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006805 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6806 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6807 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6808 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6809 0x00000000,
6810};
6811
Andreas Mohr50da8592006-08-14 23:54:30 -07006812static const u32 tg3TsoFwData[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006813 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6814 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6815 0x00000000,
6816};
6817
6818/* 5705 needs a special version of the TSO firmware. */
6819#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
6820#define TG3_TSO5_FW_RELASE_MINOR 0x2
6821#define TG3_TSO5_FW_RELEASE_FIX 0x0
6822#define TG3_TSO5_FW_START_ADDR 0x00010000
6823#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
6824#define TG3_TSO5_FW_TEXT_LEN 0xe90
6825#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
6826#define TG3_TSO5_FW_RODATA_LEN 0x50
6827#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
6828#define TG3_TSO5_FW_DATA_LEN 0x20
6829#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
6830#define TG3_TSO5_FW_SBSS_LEN 0x28
6831#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
6832#define TG3_TSO5_FW_BSS_LEN 0x88
6833
Andreas Mohr50da8592006-08-14 23:54:30 -07006834static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006835 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6836 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6837 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6838 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6839 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6840 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6841 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6842 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6843 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6844 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6845 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6846 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6847 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6848 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6849 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6850 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6851 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6852 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6853 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6854 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6855 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6856 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6857 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6858 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6859 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6860 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6861 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6862 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6863 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6864 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6865 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6866 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6867 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6868 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6869 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6870 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6871 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6872 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6873 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6874 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6875 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6876 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6877 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6878 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6879 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6880 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6881 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6882 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6883 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6884 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6885 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6886 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6887 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6888 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6889 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6890 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6891 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6892 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6893 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6894 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6895 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6896 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6897 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6898 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6899 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6900 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6901 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6902 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6903 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6904 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6905 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6906 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6907 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6908 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6909 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6910 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6911 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6912 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6913 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6914 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6915 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6916 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6917 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6918 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6919 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6920 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6921 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6922 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6923 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6924 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6925 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6926 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6927 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6928 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6929 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6930 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6931 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6932 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6933 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6934 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6935 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6936 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6937 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6938 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6939 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6940 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6941 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6942 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6943 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6944 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6945 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6946 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6947 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6948 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6949 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6950 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6951 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6952 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6953 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6954 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6955 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6956 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6957 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6958 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6959 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6960 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6961 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6962 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6963 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6964 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6965 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6966 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6967 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6968 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6969 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6970 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6971 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6972 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6973 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6974 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6975 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6976 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6977 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6978 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6979 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6980 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6981 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6982 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6983 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6984 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6985 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6986 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6987 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6988 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6989 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6990 0x00000000, 0x00000000, 0x00000000,
6991};
6992
Andreas Mohr50da8592006-08-14 23:54:30 -07006993static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006994 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6995 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6996 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6997 0x00000000, 0x00000000, 0x00000000,
6998};
6999
Andreas Mohr50da8592006-08-14 23:54:30 -07007000static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007001 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
7002 0x00000000, 0x00000000, 0x00000000,
7003};
7004
7005/* tp->lock is held. */
7006static int tg3_load_tso_firmware(struct tg3 *tp)
7007{
7008 struct fw_info info;
7009 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7010 int err, i;
7011
7012 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7013 return 0;
7014
7015 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7016 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
7017 info.text_len = TG3_TSO5_FW_TEXT_LEN;
7018 info.text_data = &tg3Tso5FwText[0];
7019 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
7020 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
7021 info.rodata_data = &tg3Tso5FwRodata[0];
7022 info.data_base = TG3_TSO5_FW_DATA_ADDR;
7023 info.data_len = TG3_TSO5_FW_DATA_LEN;
7024 info.data_data = &tg3Tso5FwData[0];
7025 cpu_base = RX_CPU_BASE;
7026 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7027 cpu_scratch_size = (info.text_len +
7028 info.rodata_len +
7029 info.data_len +
7030 TG3_TSO5_FW_SBSS_LEN +
7031 TG3_TSO5_FW_BSS_LEN);
7032 } else {
7033 info.text_base = TG3_TSO_FW_TEXT_ADDR;
7034 info.text_len = TG3_TSO_FW_TEXT_LEN;
7035 info.text_data = &tg3TsoFwText[0];
7036 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
7037 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
7038 info.rodata_data = &tg3TsoFwRodata[0];
7039 info.data_base = TG3_TSO_FW_DATA_ADDR;
7040 info.data_len = TG3_TSO_FW_DATA_LEN;
7041 info.data_data = &tg3TsoFwData[0];
7042 cpu_base = TX_CPU_BASE;
7043 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7044 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7045 }
7046
7047 err = tg3_load_firmware_cpu(tp, cpu_base,
7048 cpu_scratch_base, cpu_scratch_size,
7049 &info);
7050 if (err)
7051 return err;
7052
7053 /* Now startup the cpu. */
7054 tw32(cpu_base + CPU_STATE, 0xffffffff);
7055 tw32_f(cpu_base + CPU_PC, info.text_base);
7056
7057 for (i = 0; i < 5; i++) {
7058 if (tr32(cpu_base + CPU_PC) == info.text_base)
7059 break;
7060 tw32(cpu_base + CPU_STATE, 0xffffffff);
7061 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7062 tw32_f(cpu_base + CPU_PC, info.text_base);
7063 udelay(1000);
7064 }
7065 if (i >= 5) {
7066 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
7067 "to set CPU PC, is %08x should be %08x\n",
7068 tp->dev->name, tr32(cpu_base + CPU_PC),
7069 info.text_base);
7070 return -ENODEV;
7071 }
7072 tw32(cpu_base + CPU_STATE, 0xffffffff);
7073 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7074 return 0;
7075}
7076
Linus Torvalds1da177e2005-04-16 15:20:36 -07007077
Linus Torvalds1da177e2005-04-16 15:20:36 -07007078static int tg3_set_mac_addr(struct net_device *dev, void *p)
7079{
7080 struct tg3 *tp = netdev_priv(dev);
7081 struct sockaddr *addr = p;
Michael Chan986e0ae2007-05-05 12:10:20 -07007082 int err = 0, skip_mac_1 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007083
Michael Chanf9804dd2005-09-27 12:13:10 -07007084 if (!is_valid_ether_addr(addr->sa_data))
7085 return -EINVAL;
7086
Linus Torvalds1da177e2005-04-16 15:20:36 -07007087 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7088
Michael Chane75f7c92006-03-20 21:33:26 -08007089 if (!netif_running(dev))
7090 return 0;
7091
Michael Chan58712ef2006-04-29 18:58:01 -07007092 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
Michael Chan986e0ae2007-05-05 12:10:20 -07007093 u32 addr0_high, addr0_low, addr1_high, addr1_low;
Michael Chan58712ef2006-04-29 18:58:01 -07007094
Michael Chan986e0ae2007-05-05 12:10:20 -07007095 addr0_high = tr32(MAC_ADDR_0_HIGH);
7096 addr0_low = tr32(MAC_ADDR_0_LOW);
7097 addr1_high = tr32(MAC_ADDR_1_HIGH);
7098 addr1_low = tr32(MAC_ADDR_1_LOW);
7099
7100 /* Skip MAC addr 1 if ASF is using it. */
7101 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7102 !(addr1_high == 0 && addr1_low == 0))
7103 skip_mac_1 = 1;
Michael Chan58712ef2006-04-29 18:58:01 -07007104 }
Michael Chan986e0ae2007-05-05 12:10:20 -07007105 spin_lock_bh(&tp->lock);
7106 __tg3_set_mac_addr(tp, skip_mac_1);
7107 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007108
Michael Chanb9ec6c12006-07-25 16:37:27 -07007109 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007110}
7111
7112/* tp->lock is held. */
7113static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7114 dma_addr_t mapping, u32 maxlen_flags,
7115 u32 nic_addr)
7116{
7117 tg3_write_mem(tp,
7118 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7119 ((u64) mapping >> 32));
7120 tg3_write_mem(tp,
7121 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7122 ((u64) mapping & 0xffffffff));
7123 tg3_write_mem(tp,
7124 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7125 maxlen_flags);
7126
7127 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7128 tg3_write_mem(tp,
7129 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7130 nic_addr);
7131}
7132
7133static void __tg3_set_rx_mode(struct net_device *);
Michael Chand244c892005-07-05 14:42:33 -07007134static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
David S. Miller15f98502005-05-18 22:49:26 -07007135{
7136 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7137 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7138 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7139 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7140 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7141 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7142 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7143 }
7144 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7145 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7146 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7147 u32 val = ec->stats_block_coalesce_usecs;
7148
7149 if (!netif_carrier_ok(tp->dev))
7150 val = 0;
7151
7152 tw32(HOSTCC_STAT_COAL_TICKS, val);
7153 }
7154}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007155
7156/* tp->lock is held. */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007157static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007158{
7159 u32 val, rdmac_mode;
7160 int i, err, limit;
7161
7162 tg3_disable_ints(tp);
7163
7164 tg3_stop_fw(tp);
7165
7166 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7167
7168 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
Michael Chane6de8ad2005-05-05 14:42:41 -07007169 tg3_abort_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007170 }
7171
Matt Carlsondd477002008-05-25 23:45:58 -07007172 if (reset_phy &&
7173 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
Michael Chand4d2c552006-03-20 17:47:20 -08007174 tg3_phy_reset(tp);
7175
Linus Torvalds1da177e2005-04-16 15:20:36 -07007176 err = tg3_chip_reset(tp);
7177 if (err)
7178 return err;
7179
7180 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7181
Matt Carlsonbcb37f62008-11-03 16:52:09 -08007182 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07007183 val = tr32(TG3_CPMU_CTRL);
7184 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7185 tw32(TG3_CPMU_CTRL, val);
Matt Carlson9acb9612007-11-12 21:10:06 -08007186
7187 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7188 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7189 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7190 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7191
7192 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7193 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7194 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7195 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7196
7197 val = tr32(TG3_CPMU_HST_ACC);
7198 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7199 val |= CPMU_HST_ACC_MACCLK_6_25;
7200 tw32(TG3_CPMU_HST_ACC, val);
Matt Carlsond30cdd22007-10-07 23:28:35 -07007201 }
7202
Linus Torvalds1da177e2005-04-16 15:20:36 -07007203 /* This works around an issue with Athlon chipsets on
7204 * B3 tigon3 silicon. This bit has no effect on any
7205 * other revision. But do not set this on PCI Express
Matt Carlson795d01c2007-10-07 23:28:17 -07007206 * chips and don't even touch the clocks if the CPMU is present.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007207 */
Matt Carlson795d01c2007-10-07 23:28:17 -07007208 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7209 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7210 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7211 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7212 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007213
7214 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7215 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7216 val = tr32(TG3PCI_PCISTATE);
7217 val |= PCISTATE_RETRY_SAME_DMA;
7218 tw32(TG3PCI_PCISTATE, val);
7219 }
7220
Matt Carlson0d3031d2007-10-10 18:02:43 -07007221 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7222 /* Allow reads and writes to the
7223 * APE register and memory space.
7224 */
7225 val = tr32(TG3PCI_PCISTATE);
7226 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7227 PCISTATE_ALLOW_APE_SHMEM_WR;
7228 tw32(TG3PCI_PCISTATE, val);
7229 }
7230
Linus Torvalds1da177e2005-04-16 15:20:36 -07007231 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7232 /* Enable some hw fixes. */
7233 val = tr32(TG3PCI_MSI_DATA);
7234 val |= (1 << 26) | (1 << 28) | (1 << 29);
7235 tw32(TG3PCI_MSI_DATA, val);
7236 }
7237
7238 /* Descriptor ring init may make accesses to the
7239 * NIC SRAM area to setup the TX descriptors, so we
7240 * can only do this after the hardware has been
7241 * successfully reset.
7242 */
Michael Chan32d8c572006-07-25 16:38:29 -07007243 err = tg3_init_rings(tp);
7244 if (err)
7245 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007246
Matt Carlson9936bcf2007-10-10 18:03:07 -07007247 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
Matt Carlsonfcb389d2008-11-03 16:55:44 -08007248 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07007249 /* This value is determined during the probe time DMA
7250 * engine test, tg3_test_dma.
7251 */
7252 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007254
7255 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7256 GRC_MODE_4X_NIC_SEND_RINGS |
7257 GRC_MODE_NO_TX_PHDR_CSUM |
7258 GRC_MODE_NO_RX_PHDR_CSUM);
7259 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
Michael Chand2d746f2006-04-06 21:45:39 -07007260
7261 /* Pseudo-header checksum is done by hardware logic and not
7262 * the offload processers, so make the chip do the pseudo-
7263 * header checksums on receive. For transmit it is more
7264 * convenient to do the pseudo-header checksum in software
7265 * as Linux does that on transmit for us in all cases.
7266 */
7267 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007268
7269 tw32(GRC_MODE,
7270 tp->grc_mode |
7271 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7272
7273 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7274 val = tr32(GRC_MISC_CFG);
7275 val &= ~0xff;
7276 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7277 tw32(GRC_MISC_CFG, val);
7278
7279 /* Initialize MBUF/DESC pool. */
John W. Linvillecbf46852005-04-21 17:01:29 -07007280 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007281 /* Do nothing. */
7282 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7283 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7284 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7285 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7286 else
7287 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7288 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7289 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7290 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007291 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7292 int fw_len;
7293
7294 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7295 TG3_TSO5_FW_RODATA_LEN +
7296 TG3_TSO5_FW_DATA_LEN +
7297 TG3_TSO5_FW_SBSS_LEN +
7298 TG3_TSO5_FW_BSS_LEN);
7299 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7300 tw32(BUFMGR_MB_POOL_ADDR,
7301 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7302 tw32(BUFMGR_MB_POOL_SIZE,
7303 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7304 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007305
Michael Chan0f893dc2005-07-25 12:30:38 -07007306 if (tp->dev->mtu <= ETH_DATA_LEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007307 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7308 tp->bufmgr_config.mbuf_read_dma_low_water);
7309 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7310 tp->bufmgr_config.mbuf_mac_rx_low_water);
7311 tw32(BUFMGR_MB_HIGH_WATER,
7312 tp->bufmgr_config.mbuf_high_water);
7313 } else {
7314 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7315 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7316 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7317 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7318 tw32(BUFMGR_MB_HIGH_WATER,
7319 tp->bufmgr_config.mbuf_high_water_jumbo);
7320 }
7321 tw32(BUFMGR_DMA_LOW_WATER,
7322 tp->bufmgr_config.dma_low_water);
7323 tw32(BUFMGR_DMA_HIGH_WATER,
7324 tp->bufmgr_config.dma_high_water);
7325
7326 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7327 for (i = 0; i < 2000; i++) {
7328 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7329 break;
7330 udelay(10);
7331 }
7332 if (i >= 2000) {
7333 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7334 tp->dev->name);
7335 return -ENODEV;
7336 }
7337
7338 /* Setup replenish threshold. */
Michael Chanf92905d2006-06-29 20:14:29 -07007339 val = tp->rx_pending / 8;
7340 if (val == 0)
7341 val = 1;
7342 else if (val > tp->rx_std_max_post)
7343 val = tp->rx_std_max_post;
Michael Chanb5d37722006-09-27 16:06:21 -07007344 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7345 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7346 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7347
7348 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7349 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7350 }
Michael Chanf92905d2006-06-29 20:14:29 -07007351
7352 tw32(RCVBDI_STD_THRESH, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007353
7354 /* Initialize TG3_BDINFO's at:
7355 * RCVDBDI_STD_BD: standard eth size rx ring
7356 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7357 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7358 *
7359 * like so:
7360 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7361 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7362 * ring attribute flags
7363 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7364 *
7365 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7366 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7367 *
7368 * The size of each ring is fixed in the firmware, but the location is
7369 * configurable.
7370 */
7371 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7372 ((u64) tp->rx_std_mapping >> 32));
7373 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7374 ((u64) tp->rx_std_mapping & 0xffffffff));
7375 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7376 NIC_SRAM_RX_BUFFER_DESC);
7377
7378 /* Don't even try to program the JUMBO/MINI buffer descriptor
7379 * configs on 5705.
7380 */
7381 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7382 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7383 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7384 } else {
7385 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7386 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7387
7388 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7389 BDINFO_FLAGS_DISABLED);
7390
7391 /* Setup replenish threshold. */
7392 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7393
Michael Chan0f893dc2005-07-25 12:30:38 -07007394 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007395 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7396 ((u64) tp->rx_jumbo_mapping >> 32));
7397 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7398 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7399 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7400 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7401 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7402 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7403 } else {
7404 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7405 BDINFO_FLAGS_DISABLED);
7406 }
7407
7408 }
7409
7410 /* There is only one send ring on 5705/5750, no need to explicitly
7411 * disable the others.
7412 */
7413 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7414 /* Clear out send RCB ring in SRAM. */
7415 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7416 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7417 BDINFO_FLAGS_DISABLED);
7418 }
7419
7420 tp->tx_prod = 0;
7421 tp->tx_cons = 0;
7422 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7423 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7424
7425 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7426 tp->tx_desc_mapping,
7427 (TG3_TX_RING_SIZE <<
7428 BDINFO_FLAGS_MAXLEN_SHIFT),
7429 NIC_SRAM_TX_BUFFER_DESC);
7430
7431 /* There is only one receive return ring on 5705/5750, no need
7432 * to explicitly disable the others.
7433 */
7434 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7435 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7436 i += TG3_BDINFO_SIZE) {
7437 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7438 BDINFO_FLAGS_DISABLED);
7439 }
7440 }
7441
7442 tp->rx_rcb_ptr = 0;
7443 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7444
7445 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7446 tp->rx_rcb_mapping,
7447 (TG3_RX_RCB_RING_SIZE(tp) <<
7448 BDINFO_FLAGS_MAXLEN_SHIFT),
7449 0);
7450
7451 tp->rx_std_ptr = tp->rx_pending;
7452 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7453 tp->rx_std_ptr);
7454
Michael Chan0f893dc2005-07-25 12:30:38 -07007455 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
Linus Torvalds1da177e2005-04-16 15:20:36 -07007456 tp->rx_jumbo_pending : 0;
7457 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7458 tp->rx_jumbo_ptr);
7459
7460 /* Initialize MAC address and backoff seed. */
Michael Chan986e0ae2007-05-05 12:10:20 -07007461 __tg3_set_mac_addr(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007462
7463 /* MTU + ethernet header + FCS + optional VLAN tag */
7464 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7465
7466 /* The slot time is changed by tg3_setup_phy if we
7467 * run at gigabit with half duplex.
7468 */
7469 tw32(MAC_TX_LENGTHS,
7470 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7471 (6 << TX_LENGTHS_IPG_SHIFT) |
7472 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7473
7474 /* Receive rules. */
7475 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7476 tw32(RCVLPC_CONFIG, 0x0181);
7477
7478 /* Calculate RDMAC_MODE setting early, we need it to determine
7479 * the RCVLPC_STATE_ENABLE mask.
7480 */
7481 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7482 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7483 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7484 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7485 RDMAC_MODE_LNGREAD_ENAB);
Michael Chan85e94ce2005-04-21 17:05:28 -07007486
Matt Carlson57e69832008-05-25 23:48:31 -07007487 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7488 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsond30cdd22007-10-07 23:28:35 -07007489 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7490 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7491 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7492
Michael Chan85e94ce2005-04-21 17:05:28 -07007493 /* If statement applies to 5705 and 5750 PCI devices only */
7494 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7495 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7496 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007497 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
Matt Carlsonc13e3712007-05-05 11:50:04 -07007498 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007499 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7500 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7501 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7502 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7503 }
7504 }
7505
Michael Chan85e94ce2005-04-21 17:05:28 -07007506 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7507 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7508
Linus Torvalds1da177e2005-04-16 15:20:36 -07007509 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7510 rdmac_mode |= (1 << 27);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007511
7512 /* Receive/send statistics. */
Michael Chan16613942006-06-29 20:15:13 -07007513 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7514 val = tr32(RCVLPC_STATS_ENABLE);
7515 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7516 tw32(RCVLPC_STATS_ENABLE, val);
7517 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7518 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007519 val = tr32(RCVLPC_STATS_ENABLE);
7520 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7521 tw32(RCVLPC_STATS_ENABLE, val);
7522 } else {
7523 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7524 }
7525 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7526 tw32(SNDDATAI_STATSENAB, 0xffffff);
7527 tw32(SNDDATAI_STATSCTRL,
7528 (SNDDATAI_SCTRL_ENABLE |
7529 SNDDATAI_SCTRL_FASTUPD));
7530
7531 /* Setup host coalescing engine. */
7532 tw32(HOSTCC_MODE, 0);
7533 for (i = 0; i < 2000; i++) {
7534 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7535 break;
7536 udelay(10);
7537 }
7538
Michael Chand244c892005-07-05 14:42:33 -07007539 __tg3_set_coalesce(tp, &tp->coal);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007540
7541 /* set status block DMA address */
7542 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7543 ((u64) tp->status_mapping >> 32));
7544 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7545 ((u64) tp->status_mapping & 0xffffffff));
7546
7547 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7548 /* Status/statistics block address. See tg3_timer,
7549 * the tg3_periodic_fetch_stats call there, and
7550 * tg3_get_stats to see how this works for 5705/5750 chips.
7551 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007552 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7553 ((u64) tp->stats_mapping >> 32));
7554 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7555 ((u64) tp->stats_mapping & 0xffffffff));
7556 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7557 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7558 }
7559
7560 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7561
7562 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7563 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7564 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7565 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7566
7567 /* Clear statistics/status block in chip, and status block in ram. */
7568 for (i = NIC_SRAM_STATS_BLK;
7569 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7570 i += sizeof(u32)) {
7571 tg3_write_mem(tp, i, 0);
7572 udelay(40);
7573 }
7574 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7575
Michael Chanc94e3942005-09-27 12:12:42 -07007576 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7577 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7578 /* reset to prevent losing 1st rx packet intermittently */
7579 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7580 udelay(10);
7581 }
7582
Matt Carlson3bda1252008-08-15 14:08:22 -07007583 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7584 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7585 else
7586 tp->mac_mode = 0;
7587 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
Linus Torvalds1da177e2005-04-16 15:20:36 -07007588 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07007589 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7590 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7591 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7592 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007593 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7594 udelay(40);
7595
Michael Chan314fba32005-04-21 17:07:04 -07007596 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
Michael Chan9d26e212006-12-07 00:21:14 -08007597 * If TG3_FLG2_IS_NIC is zero, we should read the
Michael Chan314fba32005-04-21 17:07:04 -07007598 * register to preserve the GPIO settings for LOMs. The GPIOs,
7599 * whether used as inputs or outputs, are set by boot code after
7600 * reset.
7601 */
Michael Chan9d26e212006-12-07 00:21:14 -08007602 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
Michael Chan314fba32005-04-21 17:07:04 -07007603 u32 gpio_mask;
7604
Michael Chan9d26e212006-12-07 00:21:14 -08007605 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7606 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7607 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chan3e7d83b2005-04-21 17:10:36 -07007608
7609 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7610 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7611 GRC_LCLCTRL_GPIO_OUTPUT3;
7612
Michael Chanaf36e6b2006-03-23 01:28:06 -08007613 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7614 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7615
Gary Zambranoaaf84462007-05-05 11:51:45 -07007616 tp->grc_local_ctrl &= ~gpio_mask;
Michael Chan314fba32005-04-21 17:07:04 -07007617 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7618
7619 /* GPIO1 must be driven high for eeprom write protect */
Michael Chan9d26e212006-12-07 00:21:14 -08007620 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7621 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7622 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan314fba32005-04-21 17:07:04 -07007623 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007624 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7625 udelay(100);
7626
Michael Chan09ee9292005-08-09 20:17:00 -07007627 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
David S. Millerfac9b832005-05-18 22:46:34 -07007628 tp->last_tag = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007629
7630 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7631 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7632 udelay(40);
7633 }
7634
7635 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7636 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7637 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7638 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7639 WDMAC_MODE_LNGREAD_ENAB);
7640
Michael Chan85e94ce2005-04-21 17:05:28 -07007641 /* If statement applies to 5705 and 5750 PCI devices only */
7642 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7643 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7644 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007645 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7646 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7647 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7648 /* nothing */
7649 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7650 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7651 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7652 val |= WDMAC_MODE_RX_ACCEL;
7653 }
7654 }
7655
Michael Chand9ab5ad2006-03-20 22:27:35 -08007656 /* Enable host coalescing bug fix */
Michael Chanaf36e6b2006-03-23 01:28:06 -08007657 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07007658 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07007659 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
Matt Carlson57e69832008-05-25 23:48:31 -07007660 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7661 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
Matt Carlsonf51f3562008-05-25 23:45:08 -07007662 val |= WDMAC_MODE_STATUS_TAG_FIX;
Michael Chand9ab5ad2006-03-20 22:27:35 -08007663
Linus Torvalds1da177e2005-04-16 15:20:36 -07007664 tw32_f(WDMAC_MODE, val);
7665 udelay(40);
7666
Matt Carlson9974a352007-10-07 23:27:28 -07007667 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7668 u16 pcix_cmd;
7669
7670 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7671 &pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007672 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
Matt Carlson9974a352007-10-07 23:27:28 -07007673 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7674 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007675 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
Matt Carlson9974a352007-10-07 23:27:28 -07007676 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7677 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007678 }
Matt Carlson9974a352007-10-07 23:27:28 -07007679 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7680 pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007681 }
7682
7683 tw32_f(RDMAC_MODE, rdmac_mode);
7684 udelay(40);
7685
7686 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7687 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7688 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
Matt Carlson9936bcf2007-10-10 18:03:07 -07007689
7690 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7691 tw32(SNDDATAC_MODE,
7692 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7693 else
7694 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7695
Linus Torvalds1da177e2005-04-16 15:20:36 -07007696 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7697 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7698 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7699 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007700 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7701 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007702 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7703 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7704
7705 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7706 err = tg3_load_5701_a0_firmware_fix(tp);
7707 if (err)
7708 return err;
7709 }
7710
Linus Torvalds1da177e2005-04-16 15:20:36 -07007711 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7712 err = tg3_load_tso_firmware(tp);
7713 if (err)
7714 return err;
7715 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007716
7717 tp->tx_mode = TX_MODE_ENABLE;
7718 tw32_f(MAC_TX_MODE, tp->tx_mode);
7719 udelay(100);
7720
7721 tp->rx_mode = RX_MODE_ENABLE;
Matt Carlson9936bcf2007-10-10 18:03:07 -07007722 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlson57e69832008-05-25 23:48:31 -07007723 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7724 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7725 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chanaf36e6b2006-03-23 01:28:06 -08007726 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7727
Linus Torvalds1da177e2005-04-16 15:20:36 -07007728 tw32_f(MAC_RX_MODE, tp->rx_mode);
7729 udelay(10);
7730
Linus Torvalds1da177e2005-04-16 15:20:36 -07007731 tw32(MAC_LED_CTRL, tp->led_ctrl);
7732
7733 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
Michael Chanc94e3942005-09-27 12:12:42 -07007734 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007735 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7736 udelay(10);
7737 }
7738 tw32_f(MAC_RX_MODE, tp->rx_mode);
7739 udelay(10);
7740
7741 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7742 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7743 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7744 /* Set drive transmission level to 1.2V */
7745 /* only if the signal pre-emphasis bit is not set */
7746 val = tr32(MAC_SERDES_CFG);
7747 val &= 0xfffff000;
7748 val |= 0x880;
7749 tw32(MAC_SERDES_CFG, val);
7750 }
7751 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7752 tw32(MAC_SERDES_CFG, 0x616000);
7753 }
7754
7755 /* Prevent chip from dropping frames when flow control
7756 * is enabled.
7757 */
7758 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7759
7760 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7761 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7762 /* Use hardware link auto-negotiation */
7763 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7764 }
7765
Michael Chand4d2c552006-03-20 17:47:20 -08007766 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7767 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7768 u32 tmp;
7769
7770 tmp = tr32(SERDES_RX_CTRL);
7771 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7772 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7773 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7774 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7775 }
7776
Matt Carlsondd477002008-05-25 23:45:58 -07007777 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7778 if (tp->link_config.phy_is_low_power) {
7779 tp->link_config.phy_is_low_power = 0;
7780 tp->link_config.speed = tp->link_config.orig_speed;
7781 tp->link_config.duplex = tp->link_config.orig_duplex;
7782 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7783 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007784
Matt Carlsondd477002008-05-25 23:45:58 -07007785 err = tg3_setup_phy(tp, 0);
7786 if (err)
7787 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007788
Matt Carlsondd477002008-05-25 23:45:58 -07007789 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7790 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7791 u32 tmp;
7792
7793 /* Clear CRC stats. */
7794 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7795 tg3_writephy(tp, MII_TG3_TEST1,
7796 tmp | MII_TG3_TEST1_CRC_EN);
7797 tg3_readphy(tp, 0x14, &tmp);
7798 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007799 }
7800 }
7801
7802 __tg3_set_rx_mode(tp->dev);
7803
7804 /* Initialize receive rules. */
7805 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7806 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7807 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7808 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7809
Michael Chan4cf78e42005-07-25 12:29:19 -07007810 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
Michael Chana4e2b342005-10-26 15:46:52 -07007811 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007812 limit = 8;
7813 else
7814 limit = 16;
7815 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7816 limit -= 4;
7817 switch (limit) {
7818 case 16:
7819 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7820 case 15:
7821 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7822 case 14:
7823 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7824 case 13:
7825 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7826 case 12:
7827 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7828 case 11:
7829 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7830 case 10:
7831 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7832 case 9:
7833 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7834 case 8:
7835 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7836 case 7:
7837 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7838 case 6:
7839 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7840 case 5:
7841 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7842 case 4:
7843 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7844 case 3:
7845 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7846 case 2:
7847 case 1:
7848
7849 default:
7850 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07007851 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007852
Matt Carlson9ce768e2007-10-11 19:49:11 -07007853 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7854 /* Write our heartbeat update interval to APE. */
7855 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7856 APE_HOST_HEARTBEAT_INT_DISABLE);
Matt Carlson0d3031d2007-10-10 18:02:43 -07007857
Linus Torvalds1da177e2005-04-16 15:20:36 -07007858 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7859
Linus Torvalds1da177e2005-04-16 15:20:36 -07007860 return 0;
7861}
7862
7863/* Called at device open time to get the chip ready for
7864 * packet processing. Invoked with tp->lock held.
7865 */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007866static int tg3_init_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007867{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007868 tg3_switch_clocks(tp);
7869
7870 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7871
Matt Carlson2f751b62008-08-04 23:17:34 -07007872 return tg3_reset_hw(tp, reset_phy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007873}
7874
7875#define TG3_STAT_ADD32(PSTAT, REG) \
7876do { u32 __val = tr32(REG); \
7877 (PSTAT)->low += __val; \
7878 if ((PSTAT)->low < __val) \
7879 (PSTAT)->high += 1; \
7880} while (0)
7881
7882static void tg3_periodic_fetch_stats(struct tg3 *tp)
7883{
7884 struct tg3_hw_stats *sp = tp->hw_stats;
7885
7886 if (!netif_carrier_ok(tp->dev))
7887 return;
7888
7889 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7890 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7891 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7892 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7893 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7894 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7895 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7896 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7897 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7898 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7899 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7900 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7901 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7902
7903 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7904 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7905 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7906 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7907 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7908 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7909 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7910 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7911 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7912 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7913 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7914 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7915 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7916 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
Michael Chan463d3052006-05-22 16:36:27 -07007917
7918 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7919 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7920 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007921}
7922
7923static void tg3_timer(unsigned long __opaque)
7924{
7925 struct tg3 *tp = (struct tg3 *) __opaque;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007926
Michael Chanf475f162006-03-27 23:20:14 -08007927 if (tp->irq_sync)
7928 goto restart_timer;
7929
David S. Millerf47c11e2005-06-24 20:18:35 -07007930 spin_lock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007931
David S. Millerfac9b832005-05-18 22:46:34 -07007932 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7933 /* All of this garbage is because when using non-tagged
7934 * IRQ status the mailbox/status_block protocol the chip
7935 * uses with the cpu is race prone.
7936 */
7937 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7938 tw32(GRC_LOCAL_CTRL,
7939 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7940 } else {
7941 tw32(HOSTCC_MODE, tp->coalesce_mode |
7942 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7943 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007944
David S. Millerfac9b832005-05-18 22:46:34 -07007945 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7946 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
David S. Millerf47c11e2005-06-24 20:18:35 -07007947 spin_unlock(&tp->lock);
David S. Millerfac9b832005-05-18 22:46:34 -07007948 schedule_work(&tp->reset_task);
7949 return;
7950 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007951 }
7952
Linus Torvalds1da177e2005-04-16 15:20:36 -07007953 /* This part only runs once per second. */
7954 if (!--tp->timer_counter) {
David S. Millerfac9b832005-05-18 22:46:34 -07007955 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7956 tg3_periodic_fetch_stats(tp);
7957
Linus Torvalds1da177e2005-04-16 15:20:36 -07007958 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7959 u32 mac_stat;
7960 int phy_event;
7961
7962 mac_stat = tr32(MAC_STATUS);
7963
7964 phy_event = 0;
7965 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7966 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7967 phy_event = 1;
7968 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7969 phy_event = 1;
7970
7971 if (phy_event)
7972 tg3_setup_phy(tp, 0);
7973 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7974 u32 mac_stat = tr32(MAC_STATUS);
7975 int need_setup = 0;
7976
7977 if (netif_carrier_ok(tp->dev) &&
7978 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7979 need_setup = 1;
7980 }
7981 if (! netif_carrier_ok(tp->dev) &&
7982 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7983 MAC_STATUS_SIGNAL_DET))) {
7984 need_setup = 1;
7985 }
7986 if (need_setup) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07007987 if (!tp->serdes_counter) {
7988 tw32_f(MAC_MODE,
7989 (tp->mac_mode &
7990 ~MAC_MODE_PORT_MODE_MASK));
7991 udelay(40);
7992 tw32_f(MAC_MODE, tp->mac_mode);
7993 udelay(40);
7994 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007995 tg3_setup_phy(tp, 0);
7996 }
Michael Chan747e8f82005-07-25 12:33:22 -07007997 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7998 tg3_serdes_parallel_detect(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007999
8000 tp->timer_counter = tp->timer_multiplier;
8001 }
8002
Michael Chan130b8e42006-09-27 16:00:40 -07008003 /* Heartbeat is only sent once every 2 seconds.
8004 *
8005 * The heartbeat is to tell the ASF firmware that the host
8006 * driver is still alive. In the event that the OS crashes,
8007 * ASF needs to reset the hardware to free up the FIFO space
8008 * that may be filled with rx packets destined for the host.
8009 * If the FIFO is full, ASF will no longer function properly.
8010 *
8011 * Unintended resets have been reported on real time kernels
8012 * where the timer doesn't run on time. Netpoll will also have
8013 * same problem.
8014 *
8015 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8016 * to check the ring condition when the heartbeat is expiring
8017 * before doing the reset. This will prevent most unintended
8018 * resets.
8019 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008020 if (!--tp->asf_counter) {
Matt Carlsonbc7959b2008-08-15 14:08:55 -07008021 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
8022 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Matt Carlson7c5026a2008-05-02 16:49:29 -07008023 tg3_wait_for_event_ack(tp);
8024
Michael Chanbbadf502006-04-06 21:46:34 -07008025 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
Michael Chan130b8e42006-09-27 16:00:40 -07008026 FWCMD_NICDRV_ALIVE3);
Michael Chanbbadf502006-04-06 21:46:34 -07008027 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
Michael Chan28fbef72005-10-26 15:48:35 -07008028 /* 5 seconds timeout */
Michael Chanbbadf502006-04-06 21:46:34 -07008029 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
Matt Carlson4ba526c2008-08-15 14:10:04 -07008030
8031 tg3_generate_fw_event(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008032 }
8033 tp->asf_counter = tp->asf_multiplier;
8034 }
8035
David S. Millerf47c11e2005-06-24 20:18:35 -07008036 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008037
Michael Chanf475f162006-03-27 23:20:14 -08008038restart_timer:
Linus Torvalds1da177e2005-04-16 15:20:36 -07008039 tp->timer.expires = jiffies + tp->timer_offset;
8040 add_timer(&tp->timer);
8041}
8042
Adrian Bunk81789ef2006-03-20 23:00:14 -08008043static int tg3_request_irq(struct tg3 *tp)
Michael Chanfcfa0a32006-03-20 22:28:41 -08008044{
David Howells7d12e782006-10-05 14:55:46 +01008045 irq_handler_t fn;
Michael Chanfcfa0a32006-03-20 22:28:41 -08008046 unsigned long flags;
8047 struct net_device *dev = tp->dev;
8048
8049 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8050 fn = tg3_msi;
8051 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8052 fn = tg3_msi_1shot;
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07008053 flags = IRQF_SAMPLE_RANDOM;
Michael Chanfcfa0a32006-03-20 22:28:41 -08008054 } else {
8055 fn = tg3_interrupt;
8056 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8057 fn = tg3_interrupt_tagged;
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07008058 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
Michael Chanfcfa0a32006-03-20 22:28:41 -08008059 }
8060 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
8061}
8062
Michael Chan79381092005-04-21 17:13:59 -07008063static int tg3_test_interrupt(struct tg3 *tp)
8064{
8065 struct net_device *dev = tp->dev;
Michael Chanb16250e2006-09-27 16:10:14 -07008066 int err, i, intr_ok = 0;
Michael Chan79381092005-04-21 17:13:59 -07008067
Michael Chand4bc3922005-05-29 14:59:20 -07008068 if (!netif_running(dev))
8069 return -ENODEV;
8070
Michael Chan79381092005-04-21 17:13:59 -07008071 tg3_disable_ints(tp);
8072
8073 free_irq(tp->pdev->irq, dev);
8074
8075 err = request_irq(tp->pdev->irq, tg3_test_isr,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07008076 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
Michael Chan79381092005-04-21 17:13:59 -07008077 if (err)
8078 return err;
8079
Michael Chan38f38432005-09-05 17:53:32 -07008080 tp->hw_status->status &= ~SD_STATUS_UPDATED;
Michael Chan79381092005-04-21 17:13:59 -07008081 tg3_enable_ints(tp);
8082
8083 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8084 HOSTCC_MODE_NOW);
8085
8086 for (i = 0; i < 5; i++) {
Michael Chanb16250e2006-09-27 16:10:14 -07008087 u32 int_mbox, misc_host_ctrl;
8088
Michael Chan09ee9292005-08-09 20:17:00 -07008089 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
8090 TG3_64BIT_REG_LOW);
Michael Chanb16250e2006-09-27 16:10:14 -07008091 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8092
8093 if ((int_mbox != 0) ||
8094 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8095 intr_ok = 1;
Michael Chan79381092005-04-21 17:13:59 -07008096 break;
Michael Chanb16250e2006-09-27 16:10:14 -07008097 }
8098
Michael Chan79381092005-04-21 17:13:59 -07008099 msleep(10);
8100 }
8101
8102 tg3_disable_ints(tp);
8103
8104 free_irq(tp->pdev->irq, dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008105
Michael Chanfcfa0a32006-03-20 22:28:41 -08008106 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07008107
8108 if (err)
8109 return err;
8110
Michael Chanb16250e2006-09-27 16:10:14 -07008111 if (intr_ok)
Michael Chan79381092005-04-21 17:13:59 -07008112 return 0;
8113
8114 return -EIO;
8115}
8116
8117/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8118 * successfully restored
8119 */
8120static int tg3_test_msi(struct tg3 *tp)
8121{
8122 struct net_device *dev = tp->dev;
8123 int err;
8124 u16 pci_cmd;
8125
8126 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8127 return 0;
8128
8129 /* Turn off SERR reporting in case MSI terminates with Master
8130 * Abort.
8131 */
8132 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8133 pci_write_config_word(tp->pdev, PCI_COMMAND,
8134 pci_cmd & ~PCI_COMMAND_SERR);
8135
8136 err = tg3_test_interrupt(tp);
8137
8138 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8139
8140 if (!err)
8141 return 0;
8142
8143 /* other failures */
8144 if (err != -EIO)
8145 return err;
8146
8147 /* MSI test failed, go back to INTx mode */
8148 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
8149 "switching to INTx mode. Please report this failure to "
8150 "the PCI maintainer and include system chipset information.\n",
8151 tp->dev->name);
8152
8153 free_irq(tp->pdev->irq, dev);
8154 pci_disable_msi(tp->pdev);
8155
8156 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8157
Michael Chanfcfa0a32006-03-20 22:28:41 -08008158 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07008159 if (err)
8160 return err;
8161
8162 /* Need to reset the chip because the MSI cycle may have terminated
8163 * with Master Abort.
8164 */
David S. Millerf47c11e2005-06-24 20:18:35 -07008165 tg3_full_lock(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07008166
Michael Chan944d9802005-05-29 14:57:48 -07008167 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07008168 err = tg3_init_hw(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07008169
David S. Millerf47c11e2005-06-24 20:18:35 -07008170 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07008171
8172 if (err)
8173 free_irq(tp->pdev->irq, dev);
8174
8175 return err;
8176}
8177
Linus Torvalds1da177e2005-04-16 15:20:36 -07008178static int tg3_open(struct net_device *dev)
8179{
8180 struct tg3 *tp = netdev_priv(dev);
8181 int err;
8182
Michael Chanc49a1562006-12-17 17:07:29 -08008183 netif_carrier_off(tp->dev);
8184
Michael Chanbc1c7562006-03-20 17:48:03 -08008185 err = tg3_set_power_state(tp, PCI_D0);
Matt Carlson2f751b62008-08-04 23:17:34 -07008186 if (err)
Michael Chanbc1c7562006-03-20 17:48:03 -08008187 return err;
Matt Carlson2f751b62008-08-04 23:17:34 -07008188
8189 tg3_full_lock(tp, 0);
Michael Chanbc1c7562006-03-20 17:48:03 -08008190
Linus Torvalds1da177e2005-04-16 15:20:36 -07008191 tg3_disable_ints(tp);
8192 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8193
David S. Millerf47c11e2005-06-24 20:18:35 -07008194 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008195
8196 /* The placement of this call is tied
8197 * to the setup and use of Host TX descriptors.
8198 */
8199 err = tg3_alloc_consistent(tp);
8200 if (err)
8201 return err;
8202
Michael Chan7544b092007-05-05 13:08:32 -07008203 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
David S. Millerfac9b832005-05-18 22:46:34 -07008204 /* All MSI supporting chips should support tagged
8205 * status. Assert that this is the case.
8206 */
8207 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8208 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8209 "Not using MSI.\n", tp->dev->name);
8210 } else if (pci_enable_msi(tp->pdev) == 0) {
Michael Chan88b06bc2005-04-21 17:13:25 -07008211 u32 msi_mode;
8212
8213 msi_mode = tr32(MSGINT_MODE);
8214 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8215 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8216 }
8217 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08008218 err = tg3_request_irq(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008219
8220 if (err) {
Michael Chan88b06bc2005-04-21 17:13:25 -07008221 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8222 pci_disable_msi(tp->pdev);
8223 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8224 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008225 tg3_free_consistent(tp);
8226 return err;
8227 }
8228
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008229 napi_enable(&tp->napi);
8230
David S. Millerf47c11e2005-06-24 20:18:35 -07008231 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008232
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07008233 err = tg3_init_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008234 if (err) {
Michael Chan944d9802005-05-29 14:57:48 -07008235 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008236 tg3_free_rings(tp);
8237 } else {
David S. Millerfac9b832005-05-18 22:46:34 -07008238 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8239 tp->timer_offset = HZ;
8240 else
8241 tp->timer_offset = HZ / 10;
8242
8243 BUG_ON(tp->timer_offset > HZ);
8244 tp->timer_counter = tp->timer_multiplier =
8245 (HZ / tp->timer_offset);
8246 tp->asf_counter = tp->asf_multiplier =
Michael Chan28fbef72005-10-26 15:48:35 -07008247 ((HZ / tp->timer_offset) * 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008248
8249 init_timer(&tp->timer);
8250 tp->timer.expires = jiffies + tp->timer_offset;
8251 tp->timer.data = (unsigned long) tp;
8252 tp->timer.function = tg3_timer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008253 }
8254
David S. Millerf47c11e2005-06-24 20:18:35 -07008255 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008256
8257 if (err) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008258 napi_disable(&tp->napi);
Michael Chan88b06bc2005-04-21 17:13:25 -07008259 free_irq(tp->pdev->irq, dev);
8260 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8261 pci_disable_msi(tp->pdev);
8262 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8263 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008264 tg3_free_consistent(tp);
8265 return err;
8266 }
8267
Michael Chan79381092005-04-21 17:13:59 -07008268 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8269 err = tg3_test_msi(tp);
David S. Millerfac9b832005-05-18 22:46:34 -07008270
Michael Chan79381092005-04-21 17:13:59 -07008271 if (err) {
David S. Millerf47c11e2005-06-24 20:18:35 -07008272 tg3_full_lock(tp, 0);
Michael Chan79381092005-04-21 17:13:59 -07008273
8274 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8275 pci_disable_msi(tp->pdev);
8276 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8277 }
Michael Chan944d9802005-05-29 14:57:48 -07008278 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan79381092005-04-21 17:13:59 -07008279 tg3_free_rings(tp);
8280 tg3_free_consistent(tp);
8281
David S. Millerf47c11e2005-06-24 20:18:35 -07008282 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07008283
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008284 napi_disable(&tp->napi);
8285
Michael Chan79381092005-04-21 17:13:59 -07008286 return err;
8287 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08008288
8289 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8290 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
Michael Chanb5d37722006-09-27 16:06:21 -07008291 u32 val = tr32(PCIE_TRANSACTION_CFG);
Michael Chanfcfa0a32006-03-20 22:28:41 -08008292
Michael Chanb5d37722006-09-27 16:06:21 -07008293 tw32(PCIE_TRANSACTION_CFG,
8294 val | PCIE_TRANS_CFG_1SHOT_MSI);
Michael Chanfcfa0a32006-03-20 22:28:41 -08008295 }
8296 }
Michael Chan79381092005-04-21 17:13:59 -07008297 }
8298
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07008299 tg3_phy_start(tp);
8300
David S. Millerf47c11e2005-06-24 20:18:35 -07008301 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008302
Michael Chan79381092005-04-21 17:13:59 -07008303 add_timer(&tp->timer);
8304 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008305 tg3_enable_ints(tp);
8306
David S. Millerf47c11e2005-06-24 20:18:35 -07008307 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008308
8309 netif_start_queue(dev);
8310
8311 return 0;
8312}
8313
8314#if 0
8315/*static*/ void tg3_dump_state(struct tg3 *tp)
8316{
8317 u32 val32, val32_2, val32_3, val32_4, val32_5;
8318 u16 val16;
8319 int i;
8320
8321 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8322 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8323 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8324 val16, val32);
8325
8326 /* MAC block */
8327 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8328 tr32(MAC_MODE), tr32(MAC_STATUS));
8329 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8330 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8331 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8332 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8333 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8334 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8335
8336 /* Send data initiator control block */
8337 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8338 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8339 printk(" SNDDATAI_STATSCTRL[%08x]\n",
8340 tr32(SNDDATAI_STATSCTRL));
8341
8342 /* Send data completion control block */
8343 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8344
8345 /* Send BD ring selector block */
8346 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8347 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8348
8349 /* Send BD initiator control block */
8350 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8351 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8352
8353 /* Send BD completion control block */
8354 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8355
8356 /* Receive list placement control block */
8357 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8358 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8359 printk(" RCVLPC_STATSCTRL[%08x]\n",
8360 tr32(RCVLPC_STATSCTRL));
8361
8362 /* Receive data and receive BD initiator control block */
8363 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8364 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8365
8366 /* Receive data completion control block */
8367 printk("DEBUG: RCVDCC_MODE[%08x]\n",
8368 tr32(RCVDCC_MODE));
8369
8370 /* Receive BD initiator control block */
8371 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8372 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8373
8374 /* Receive BD completion control block */
8375 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8376 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8377
8378 /* Receive list selector control block */
8379 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8380 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8381
8382 /* Mbuf cluster free block */
8383 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8384 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8385
8386 /* Host coalescing control block */
8387 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8388 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8389 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8390 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8391 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8392 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8393 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8394 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8395 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8396 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8397 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8398 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8399
8400 /* Memory arbiter control block */
8401 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8402 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8403
8404 /* Buffer manager control block */
8405 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8406 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8407 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8408 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8409 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8410 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8411 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8412 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8413
8414 /* Read DMA control block */
8415 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8416 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8417
8418 /* Write DMA control block */
8419 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8420 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8421
8422 /* DMA completion block */
8423 printk("DEBUG: DMAC_MODE[%08x]\n",
8424 tr32(DMAC_MODE));
8425
8426 /* GRC block */
8427 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8428 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8429 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8430 tr32(GRC_LOCAL_CTRL));
8431
8432 /* TG3_BDINFOs */
8433 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8434 tr32(RCVDBDI_JUMBO_BD + 0x0),
8435 tr32(RCVDBDI_JUMBO_BD + 0x4),
8436 tr32(RCVDBDI_JUMBO_BD + 0x8),
8437 tr32(RCVDBDI_JUMBO_BD + 0xc));
8438 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8439 tr32(RCVDBDI_STD_BD + 0x0),
8440 tr32(RCVDBDI_STD_BD + 0x4),
8441 tr32(RCVDBDI_STD_BD + 0x8),
8442 tr32(RCVDBDI_STD_BD + 0xc));
8443 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8444 tr32(RCVDBDI_MINI_BD + 0x0),
8445 tr32(RCVDBDI_MINI_BD + 0x4),
8446 tr32(RCVDBDI_MINI_BD + 0x8),
8447 tr32(RCVDBDI_MINI_BD + 0xc));
8448
8449 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8450 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8451 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8452 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8453 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8454 val32, val32_2, val32_3, val32_4);
8455
8456 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8457 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8458 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8459 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8460 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8461 val32, val32_2, val32_3, val32_4);
8462
8463 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8464 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8465 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8466 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8467 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8468 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8469 val32, val32_2, val32_3, val32_4, val32_5);
8470
8471 /* SW status block */
8472 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8473 tp->hw_status->status,
8474 tp->hw_status->status_tag,
8475 tp->hw_status->rx_jumbo_consumer,
8476 tp->hw_status->rx_consumer,
8477 tp->hw_status->rx_mini_consumer,
8478 tp->hw_status->idx[0].rx_producer,
8479 tp->hw_status->idx[0].tx_consumer);
8480
8481 /* SW statistics block */
8482 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8483 ((u32 *)tp->hw_stats)[0],
8484 ((u32 *)tp->hw_stats)[1],
8485 ((u32 *)tp->hw_stats)[2],
8486 ((u32 *)tp->hw_stats)[3]);
8487
8488 /* Mailboxes */
8489 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
Michael Chan09ee9292005-08-09 20:17:00 -07008490 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8491 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8492 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8493 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008494
8495 /* NIC side send descriptors. */
8496 for (i = 0; i < 6; i++) {
8497 unsigned long txd;
8498
8499 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8500 + (i * sizeof(struct tg3_tx_buffer_desc));
8501 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8502 i,
8503 readl(txd + 0x0), readl(txd + 0x4),
8504 readl(txd + 0x8), readl(txd + 0xc));
8505 }
8506
8507 /* NIC side RX descriptors. */
8508 for (i = 0; i < 6; i++) {
8509 unsigned long rxd;
8510
8511 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8512 + (i * sizeof(struct tg3_rx_buffer_desc));
8513 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8514 i,
8515 readl(rxd + 0x0), readl(rxd + 0x4),
8516 readl(rxd + 0x8), readl(rxd + 0xc));
8517 rxd += (4 * sizeof(u32));
8518 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8519 i,
8520 readl(rxd + 0x0), readl(rxd + 0x4),
8521 readl(rxd + 0x8), readl(rxd + 0xc));
8522 }
8523
8524 for (i = 0; i < 6; i++) {
8525 unsigned long rxd;
8526
8527 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8528 + (i * sizeof(struct tg3_rx_buffer_desc));
8529 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8530 i,
8531 readl(rxd + 0x0), readl(rxd + 0x4),
8532 readl(rxd + 0x8), readl(rxd + 0xc));
8533 rxd += (4 * sizeof(u32));
8534 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8535 i,
8536 readl(rxd + 0x0), readl(rxd + 0x4),
8537 readl(rxd + 0x8), readl(rxd + 0xc));
8538 }
8539}
8540#endif
8541
8542static struct net_device_stats *tg3_get_stats(struct net_device *);
8543static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8544
8545static int tg3_close(struct net_device *dev)
8546{
8547 struct tg3 *tp = netdev_priv(dev);
8548
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008549 napi_disable(&tp->napi);
Oleg Nesterov28e53bd2007-05-09 02:34:22 -07008550 cancel_work_sync(&tp->reset_task);
Michael Chan7faa0062006-02-02 17:29:28 -08008551
Linus Torvalds1da177e2005-04-16 15:20:36 -07008552 netif_stop_queue(dev);
8553
8554 del_timer_sync(&tp->timer);
8555
David S. Millerf47c11e2005-06-24 20:18:35 -07008556 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008557#if 0
8558 tg3_dump_state(tp);
8559#endif
8560
8561 tg3_disable_ints(tp);
8562
Michael Chan944d9802005-05-29 14:57:48 -07008563 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008564 tg3_free_rings(tp);
Michael Chan5cf64b82007-05-05 12:11:21 -07008565 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008566
David S. Millerf47c11e2005-06-24 20:18:35 -07008567 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008568
Michael Chan88b06bc2005-04-21 17:13:25 -07008569 free_irq(tp->pdev->irq, dev);
8570 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8571 pci_disable_msi(tp->pdev);
8572 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8573 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008574
8575 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8576 sizeof(tp->net_stats_prev));
8577 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8578 sizeof(tp->estats_prev));
8579
8580 tg3_free_consistent(tp);
8581
Michael Chanbc1c7562006-03-20 17:48:03 -08008582 tg3_set_power_state(tp, PCI_D3hot);
8583
8584 netif_carrier_off(tp->dev);
8585
Linus Torvalds1da177e2005-04-16 15:20:36 -07008586 return 0;
8587}
8588
8589static inline unsigned long get_stat64(tg3_stat64_t *val)
8590{
8591 unsigned long ret;
8592
8593#if (BITS_PER_LONG == 32)
8594 ret = val->low;
8595#else
8596 ret = ((u64)val->high << 32) | ((u64)val->low);
8597#endif
8598 return ret;
8599}
8600
Stefan Buehler816f8b82008-08-15 14:10:54 -07008601static inline u64 get_estat64(tg3_stat64_t *val)
8602{
8603 return ((u64)val->high << 32) | ((u64)val->low);
8604}
8605
Linus Torvalds1da177e2005-04-16 15:20:36 -07008606static unsigned long calc_crc_errors(struct tg3 *tp)
8607{
8608 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8609
8610 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8611 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8612 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008613 u32 val;
8614
David S. Millerf47c11e2005-06-24 20:18:35 -07008615 spin_lock_bh(&tp->lock);
Michael Chan569a5df2007-02-13 12:18:15 -08008616 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8617 tg3_writephy(tp, MII_TG3_TEST1,
8618 val | MII_TG3_TEST1_CRC_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008619 tg3_readphy(tp, 0x14, &val);
8620 } else
8621 val = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07008622 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008623
8624 tp->phy_crc_errors += val;
8625
8626 return tp->phy_crc_errors;
8627 }
8628
8629 return get_stat64(&hw_stats->rx_fcs_errors);
8630}
8631
8632#define ESTAT_ADD(member) \
8633 estats->member = old_estats->member + \
Stefan Buehler816f8b82008-08-15 14:10:54 -07008634 get_estat64(&hw_stats->member)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008635
8636static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8637{
8638 struct tg3_ethtool_stats *estats = &tp->estats;
8639 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8640 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8641
8642 if (!hw_stats)
8643 return old_estats;
8644
8645 ESTAT_ADD(rx_octets);
8646 ESTAT_ADD(rx_fragments);
8647 ESTAT_ADD(rx_ucast_packets);
8648 ESTAT_ADD(rx_mcast_packets);
8649 ESTAT_ADD(rx_bcast_packets);
8650 ESTAT_ADD(rx_fcs_errors);
8651 ESTAT_ADD(rx_align_errors);
8652 ESTAT_ADD(rx_xon_pause_rcvd);
8653 ESTAT_ADD(rx_xoff_pause_rcvd);
8654 ESTAT_ADD(rx_mac_ctrl_rcvd);
8655 ESTAT_ADD(rx_xoff_entered);
8656 ESTAT_ADD(rx_frame_too_long_errors);
8657 ESTAT_ADD(rx_jabbers);
8658 ESTAT_ADD(rx_undersize_packets);
8659 ESTAT_ADD(rx_in_length_errors);
8660 ESTAT_ADD(rx_out_length_errors);
8661 ESTAT_ADD(rx_64_or_less_octet_packets);
8662 ESTAT_ADD(rx_65_to_127_octet_packets);
8663 ESTAT_ADD(rx_128_to_255_octet_packets);
8664 ESTAT_ADD(rx_256_to_511_octet_packets);
8665 ESTAT_ADD(rx_512_to_1023_octet_packets);
8666 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8667 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8668 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8669 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8670 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8671
8672 ESTAT_ADD(tx_octets);
8673 ESTAT_ADD(tx_collisions);
8674 ESTAT_ADD(tx_xon_sent);
8675 ESTAT_ADD(tx_xoff_sent);
8676 ESTAT_ADD(tx_flow_control);
8677 ESTAT_ADD(tx_mac_errors);
8678 ESTAT_ADD(tx_single_collisions);
8679 ESTAT_ADD(tx_mult_collisions);
8680 ESTAT_ADD(tx_deferred);
8681 ESTAT_ADD(tx_excessive_collisions);
8682 ESTAT_ADD(tx_late_collisions);
8683 ESTAT_ADD(tx_collide_2times);
8684 ESTAT_ADD(tx_collide_3times);
8685 ESTAT_ADD(tx_collide_4times);
8686 ESTAT_ADD(tx_collide_5times);
8687 ESTAT_ADD(tx_collide_6times);
8688 ESTAT_ADD(tx_collide_7times);
8689 ESTAT_ADD(tx_collide_8times);
8690 ESTAT_ADD(tx_collide_9times);
8691 ESTAT_ADD(tx_collide_10times);
8692 ESTAT_ADD(tx_collide_11times);
8693 ESTAT_ADD(tx_collide_12times);
8694 ESTAT_ADD(tx_collide_13times);
8695 ESTAT_ADD(tx_collide_14times);
8696 ESTAT_ADD(tx_collide_15times);
8697 ESTAT_ADD(tx_ucast_packets);
8698 ESTAT_ADD(tx_mcast_packets);
8699 ESTAT_ADD(tx_bcast_packets);
8700 ESTAT_ADD(tx_carrier_sense_errors);
8701 ESTAT_ADD(tx_discards);
8702 ESTAT_ADD(tx_errors);
8703
8704 ESTAT_ADD(dma_writeq_full);
8705 ESTAT_ADD(dma_write_prioq_full);
8706 ESTAT_ADD(rxbds_empty);
8707 ESTAT_ADD(rx_discards);
8708 ESTAT_ADD(rx_errors);
8709 ESTAT_ADD(rx_threshold_hit);
8710
8711 ESTAT_ADD(dma_readq_full);
8712 ESTAT_ADD(dma_read_prioq_full);
8713 ESTAT_ADD(tx_comp_queue_full);
8714
8715 ESTAT_ADD(ring_set_send_prod_index);
8716 ESTAT_ADD(ring_status_update);
8717 ESTAT_ADD(nic_irqs);
8718 ESTAT_ADD(nic_avoided_irqs);
8719 ESTAT_ADD(nic_tx_threshold_hit);
8720
8721 return estats;
8722}
8723
8724static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8725{
8726 struct tg3 *tp = netdev_priv(dev);
8727 struct net_device_stats *stats = &tp->net_stats;
8728 struct net_device_stats *old_stats = &tp->net_stats_prev;
8729 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8730
8731 if (!hw_stats)
8732 return old_stats;
8733
8734 stats->rx_packets = old_stats->rx_packets +
8735 get_stat64(&hw_stats->rx_ucast_packets) +
8736 get_stat64(&hw_stats->rx_mcast_packets) +
8737 get_stat64(&hw_stats->rx_bcast_packets);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008738
Linus Torvalds1da177e2005-04-16 15:20:36 -07008739 stats->tx_packets = old_stats->tx_packets +
8740 get_stat64(&hw_stats->tx_ucast_packets) +
8741 get_stat64(&hw_stats->tx_mcast_packets) +
8742 get_stat64(&hw_stats->tx_bcast_packets);
8743
8744 stats->rx_bytes = old_stats->rx_bytes +
8745 get_stat64(&hw_stats->rx_octets);
8746 stats->tx_bytes = old_stats->tx_bytes +
8747 get_stat64(&hw_stats->tx_octets);
8748
8749 stats->rx_errors = old_stats->rx_errors +
John W. Linville4f63b872005-09-12 14:43:18 -07008750 get_stat64(&hw_stats->rx_errors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008751 stats->tx_errors = old_stats->tx_errors +
8752 get_stat64(&hw_stats->tx_errors) +
8753 get_stat64(&hw_stats->tx_mac_errors) +
8754 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8755 get_stat64(&hw_stats->tx_discards);
8756
8757 stats->multicast = old_stats->multicast +
8758 get_stat64(&hw_stats->rx_mcast_packets);
8759 stats->collisions = old_stats->collisions +
8760 get_stat64(&hw_stats->tx_collisions);
8761
8762 stats->rx_length_errors = old_stats->rx_length_errors +
8763 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8764 get_stat64(&hw_stats->rx_undersize_packets);
8765
8766 stats->rx_over_errors = old_stats->rx_over_errors +
8767 get_stat64(&hw_stats->rxbds_empty);
8768 stats->rx_frame_errors = old_stats->rx_frame_errors +
8769 get_stat64(&hw_stats->rx_align_errors);
8770 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8771 get_stat64(&hw_stats->tx_discards);
8772 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8773 get_stat64(&hw_stats->tx_carrier_sense_errors);
8774
8775 stats->rx_crc_errors = old_stats->rx_crc_errors +
8776 calc_crc_errors(tp);
8777
John W. Linville4f63b872005-09-12 14:43:18 -07008778 stats->rx_missed_errors = old_stats->rx_missed_errors +
8779 get_stat64(&hw_stats->rx_discards);
8780
Linus Torvalds1da177e2005-04-16 15:20:36 -07008781 return stats;
8782}
8783
8784static inline u32 calc_crc(unsigned char *buf, int len)
8785{
8786 u32 reg;
8787 u32 tmp;
8788 int j, k;
8789
8790 reg = 0xffffffff;
8791
8792 for (j = 0; j < len; j++) {
8793 reg ^= buf[j];
8794
8795 for (k = 0; k < 8; k++) {
8796 tmp = reg & 0x01;
8797
8798 reg >>= 1;
8799
8800 if (tmp) {
8801 reg ^= 0xedb88320;
8802 }
8803 }
8804 }
8805
8806 return ~reg;
8807}
8808
8809static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8810{
8811 /* accept or reject all multicast frames */
8812 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8813 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8814 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8815 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8816}
8817
8818static void __tg3_set_rx_mode(struct net_device *dev)
8819{
8820 struct tg3 *tp = netdev_priv(dev);
8821 u32 rx_mode;
8822
8823 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8824 RX_MODE_KEEP_VLAN_TAG);
8825
8826 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8827 * flag clear.
8828 */
8829#if TG3_VLAN_TAG_USED
8830 if (!tp->vlgrp &&
8831 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8832 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8833#else
8834 /* By definition, VLAN is disabled always in this
8835 * case.
8836 */
8837 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8838 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8839#endif
8840
8841 if (dev->flags & IFF_PROMISC) {
8842 /* Promiscuous mode. */
8843 rx_mode |= RX_MODE_PROMISC;
8844 } else if (dev->flags & IFF_ALLMULTI) {
8845 /* Accept all multicast. */
8846 tg3_set_multi (tp, 1);
8847 } else if (dev->mc_count < 1) {
8848 /* Reject all multicast. */
8849 tg3_set_multi (tp, 0);
8850 } else {
8851 /* Accept one or more multicast(s). */
8852 struct dev_mc_list *mclist;
8853 unsigned int i;
8854 u32 mc_filter[4] = { 0, };
8855 u32 regidx;
8856 u32 bit;
8857 u32 crc;
8858
8859 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8860 i++, mclist = mclist->next) {
8861
8862 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8863 bit = ~crc & 0x7f;
8864 regidx = (bit & 0x60) >> 5;
8865 bit &= 0x1f;
8866 mc_filter[regidx] |= (1 << bit);
8867 }
8868
8869 tw32(MAC_HASH_REG_0, mc_filter[0]);
8870 tw32(MAC_HASH_REG_1, mc_filter[1]);
8871 tw32(MAC_HASH_REG_2, mc_filter[2]);
8872 tw32(MAC_HASH_REG_3, mc_filter[3]);
8873 }
8874
8875 if (rx_mode != tp->rx_mode) {
8876 tp->rx_mode = rx_mode;
8877 tw32_f(MAC_RX_MODE, rx_mode);
8878 udelay(10);
8879 }
8880}
8881
8882static void tg3_set_rx_mode(struct net_device *dev)
8883{
8884 struct tg3 *tp = netdev_priv(dev);
8885
Michael Chane75f7c92006-03-20 21:33:26 -08008886 if (!netif_running(dev))
8887 return;
8888
David S. Millerf47c11e2005-06-24 20:18:35 -07008889 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008890 __tg3_set_rx_mode(dev);
David S. Millerf47c11e2005-06-24 20:18:35 -07008891 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008892}
8893
8894#define TG3_REGDUMP_LEN (32 * 1024)
8895
8896static int tg3_get_regs_len(struct net_device *dev)
8897{
8898 return TG3_REGDUMP_LEN;
8899}
8900
8901static void tg3_get_regs(struct net_device *dev,
8902 struct ethtool_regs *regs, void *_p)
8903{
8904 u32 *p = _p;
8905 struct tg3 *tp = netdev_priv(dev);
8906 u8 *orig_p = _p;
8907 int i;
8908
8909 regs->version = 0;
8910
8911 memset(p, 0, TG3_REGDUMP_LEN);
8912
Michael Chanbc1c7562006-03-20 17:48:03 -08008913 if (tp->link_config.phy_is_low_power)
8914 return;
8915
David S. Millerf47c11e2005-06-24 20:18:35 -07008916 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008917
8918#define __GET_REG32(reg) (*(p)++ = tr32(reg))
8919#define GET_REG32_LOOP(base,len) \
8920do { p = (u32 *)(orig_p + (base)); \
8921 for (i = 0; i < len; i += 4) \
8922 __GET_REG32((base) + i); \
8923} while (0)
8924#define GET_REG32_1(reg) \
8925do { p = (u32 *)(orig_p + (reg)); \
8926 __GET_REG32((reg)); \
8927} while (0)
8928
8929 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8930 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8931 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8932 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8933 GET_REG32_1(SNDDATAC_MODE);
8934 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8935 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8936 GET_REG32_1(SNDBDC_MODE);
8937 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8938 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8939 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8940 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8941 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8942 GET_REG32_1(RCVDCC_MODE);
8943 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8944 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8945 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8946 GET_REG32_1(MBFREE_MODE);
8947 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8948 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8949 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8950 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8951 GET_REG32_LOOP(WDMAC_MODE, 0x08);
Chris Elmquist091465d2005-12-20 13:25:19 -08008952 GET_REG32_1(RX_CPU_MODE);
8953 GET_REG32_1(RX_CPU_STATE);
8954 GET_REG32_1(RX_CPU_PGMCTR);
8955 GET_REG32_1(RX_CPU_HWBKPT);
8956 GET_REG32_1(TX_CPU_MODE);
8957 GET_REG32_1(TX_CPU_STATE);
8958 GET_REG32_1(TX_CPU_PGMCTR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008959 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8960 GET_REG32_LOOP(FTQ_RESET, 0x120);
8961 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8962 GET_REG32_1(DMAC_MODE);
8963 GET_REG32_LOOP(GRC_MODE, 0x4c);
8964 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8965 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8966
8967#undef __GET_REG32
8968#undef GET_REG32_LOOP
8969#undef GET_REG32_1
8970
David S. Millerf47c11e2005-06-24 20:18:35 -07008971 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008972}
8973
8974static int tg3_get_eeprom_len(struct net_device *dev)
8975{
8976 struct tg3 *tp = netdev_priv(dev);
8977
8978 return tp->nvram_size;
8979}
8980
8981static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
Al Virob9fc7dc2007-12-17 22:59:57 -08008982static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
Michael Chan18201802006-03-20 22:29:15 -08008983static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008984
8985static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8986{
8987 struct tg3 *tp = netdev_priv(dev);
8988 int ret;
8989 u8 *pd;
Al Virob9fc7dc2007-12-17 22:59:57 -08008990 u32 i, offset, len, b_offset, b_count;
8991 __le32 val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008992
Michael Chanbc1c7562006-03-20 17:48:03 -08008993 if (tp->link_config.phy_is_low_power)
8994 return -EAGAIN;
8995
Linus Torvalds1da177e2005-04-16 15:20:36 -07008996 offset = eeprom->offset;
8997 len = eeprom->len;
8998 eeprom->len = 0;
8999
9000 eeprom->magic = TG3_EEPROM_MAGIC;
9001
9002 if (offset & 3) {
9003 /* adjustments to start on required 4 byte boundary */
9004 b_offset = offset & 3;
9005 b_count = 4 - b_offset;
9006 if (b_count > len) {
9007 /* i.e. offset=1 len=2 */
9008 b_count = len;
9009 }
Al Virob9fc7dc2007-12-17 22:59:57 -08009010 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009011 if (ret)
9012 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009013 memcpy(data, ((char*)&val) + b_offset, b_count);
9014 len -= b_count;
9015 offset += b_count;
9016 eeprom->len += b_count;
9017 }
9018
9019 /* read bytes upto the last 4 byte boundary */
9020 pd = &data[eeprom->len];
9021 for (i = 0; i < (len - (len & 3)); i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -08009022 ret = tg3_nvram_read_le(tp, offset + i, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009023 if (ret) {
9024 eeprom->len += i;
9025 return ret;
9026 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009027 memcpy(pd + i, &val, 4);
9028 }
9029 eeprom->len += i;
9030
9031 if (len & 3) {
9032 /* read last bytes not ending on 4 byte boundary */
9033 pd = &data[eeprom->len];
9034 b_count = len & 3;
9035 b_offset = offset + len - b_count;
Al Virob9fc7dc2007-12-17 22:59:57 -08009036 ret = tg3_nvram_read_le(tp, b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009037 if (ret)
9038 return ret;
Al Virob9fc7dc2007-12-17 22:59:57 -08009039 memcpy(pd, &val, b_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009040 eeprom->len += b_count;
9041 }
9042 return 0;
9043}
9044
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009045static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009046
9047static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9048{
9049 struct tg3 *tp = netdev_priv(dev);
9050 int ret;
Al Virob9fc7dc2007-12-17 22:59:57 -08009051 u32 offset, len, b_offset, odd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009052 u8 *buf;
Al Virob9fc7dc2007-12-17 22:59:57 -08009053 __le32 start, end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009054
Michael Chanbc1c7562006-03-20 17:48:03 -08009055 if (tp->link_config.phy_is_low_power)
9056 return -EAGAIN;
9057
Linus Torvalds1da177e2005-04-16 15:20:36 -07009058 if (eeprom->magic != TG3_EEPROM_MAGIC)
9059 return -EINVAL;
9060
9061 offset = eeprom->offset;
9062 len = eeprom->len;
9063
9064 if ((b_offset = (offset & 3))) {
9065 /* adjustments to start on required 4 byte boundary */
Al Virob9fc7dc2007-12-17 22:59:57 -08009066 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009067 if (ret)
9068 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009069 len += b_offset;
9070 offset &= ~3;
Michael Chan1c8594b2005-04-21 17:12:46 -07009071 if (len < 4)
9072 len = 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009073 }
9074
9075 odd_len = 0;
Michael Chan1c8594b2005-04-21 17:12:46 -07009076 if (len & 3) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009077 /* adjustments to end on required 4 byte boundary */
9078 odd_len = 1;
9079 len = (len + 3) & ~3;
Al Virob9fc7dc2007-12-17 22:59:57 -08009080 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009081 if (ret)
9082 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009083 }
9084
9085 buf = data;
9086 if (b_offset || odd_len) {
9087 buf = kmalloc(len, GFP_KERNEL);
Andy Gospodarekab0049b2007-09-06 20:42:14 +01009088 if (!buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009089 return -ENOMEM;
9090 if (b_offset)
9091 memcpy(buf, &start, 4);
9092 if (odd_len)
9093 memcpy(buf+len-4, &end, 4);
9094 memcpy(buf + b_offset, data, eeprom->len);
9095 }
9096
9097 ret = tg3_nvram_write_block(tp, offset, len, buf);
9098
9099 if (buf != data)
9100 kfree(buf);
9101
9102 return ret;
9103}
9104
9105static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9106{
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009107 struct tg3 *tp = netdev_priv(dev);
9108
9109 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9110 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9111 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009112 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009113 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009114
Linus Torvalds1da177e2005-04-16 15:20:36 -07009115 cmd->supported = (SUPPORTED_Autoneg);
9116
9117 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9118 cmd->supported |= (SUPPORTED_1000baseT_Half |
9119 SUPPORTED_1000baseT_Full);
9120
Karsten Keilef348142006-05-12 12:49:08 -07009121 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009122 cmd->supported |= (SUPPORTED_100baseT_Half |
9123 SUPPORTED_100baseT_Full |
9124 SUPPORTED_10baseT_Half |
9125 SUPPORTED_10baseT_Full |
Matt Carlson3bebab52007-11-12 21:22:40 -08009126 SUPPORTED_TP);
Karsten Keilef348142006-05-12 12:49:08 -07009127 cmd->port = PORT_TP;
9128 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009129 cmd->supported |= SUPPORTED_FIBRE;
Karsten Keilef348142006-05-12 12:49:08 -07009130 cmd->port = PORT_FIBRE;
9131 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009132
Linus Torvalds1da177e2005-04-16 15:20:36 -07009133 cmd->advertising = tp->link_config.advertising;
9134 if (netif_running(dev)) {
9135 cmd->speed = tp->link_config.active_speed;
9136 cmd->duplex = tp->link_config.active_duplex;
9137 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009138 cmd->phy_address = PHY_ADDR;
9139 cmd->transceiver = 0;
9140 cmd->autoneg = tp->link_config.autoneg;
9141 cmd->maxtxpkt = 0;
9142 cmd->maxrxpkt = 0;
9143 return 0;
9144}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009145
Linus Torvalds1da177e2005-04-16 15:20:36 -07009146static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9147{
9148 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009149
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009150 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9151 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9152 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009153 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009154 }
9155
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009156 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009157 /* These are the only valid advertisement bits allowed. */
9158 if (cmd->autoneg == AUTONEG_ENABLE &&
9159 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
9160 ADVERTISED_1000baseT_Full |
9161 ADVERTISED_Autoneg |
9162 ADVERTISED_FIBRE)))
9163 return -EINVAL;
Michael Chan37ff2382005-10-26 15:49:51 -07009164 /* Fiber can only do SPEED_1000. */
9165 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9166 (cmd->speed != SPEED_1000))
9167 return -EINVAL;
9168 /* Copper cannot force SPEED_1000. */
9169 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9170 (cmd->speed == SPEED_1000))
9171 return -EINVAL;
9172 else if ((cmd->speed == SPEED_1000) &&
Matt Carlson0ba11fb2008-06-09 15:40:26 -07009173 (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
Michael Chan37ff2382005-10-26 15:49:51 -07009174 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009175
David S. Millerf47c11e2005-06-24 20:18:35 -07009176 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009177
9178 tp->link_config.autoneg = cmd->autoneg;
9179 if (cmd->autoneg == AUTONEG_ENABLE) {
Andy Gospodarek405d8e52007-10-08 01:08:47 -07009180 tp->link_config.advertising = (cmd->advertising |
9181 ADVERTISED_Autoneg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009182 tp->link_config.speed = SPEED_INVALID;
9183 tp->link_config.duplex = DUPLEX_INVALID;
9184 } else {
9185 tp->link_config.advertising = 0;
9186 tp->link_config.speed = cmd->speed;
9187 tp->link_config.duplex = cmd->duplex;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009188 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009189
Michael Chan24fcad62006-12-17 17:06:46 -08009190 tp->link_config.orig_speed = tp->link_config.speed;
9191 tp->link_config.orig_duplex = tp->link_config.duplex;
9192 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9193
Linus Torvalds1da177e2005-04-16 15:20:36 -07009194 if (netif_running(dev))
9195 tg3_setup_phy(tp, 1);
9196
David S. Millerf47c11e2005-06-24 20:18:35 -07009197 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009198
Linus Torvalds1da177e2005-04-16 15:20:36 -07009199 return 0;
9200}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009201
Linus Torvalds1da177e2005-04-16 15:20:36 -07009202static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9203{
9204 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009205
Linus Torvalds1da177e2005-04-16 15:20:36 -07009206 strcpy(info->driver, DRV_MODULE_NAME);
9207 strcpy(info->version, DRV_MODULE_VERSION);
Michael Chanc4e65752006-03-20 22:29:32 -08009208 strcpy(info->fw_version, tp->fw_ver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009209 strcpy(info->bus_info, pci_name(tp->pdev));
9210}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009211
Linus Torvalds1da177e2005-04-16 15:20:36 -07009212static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9213{
9214 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009215
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009216 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9217 device_can_wakeup(&tp->pdev->dev))
Gary Zambranoa85feb82007-05-05 11:52:19 -07009218 wol->supported = WAKE_MAGIC;
9219 else
9220 wol->supported = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009221 wol->wolopts = 0;
Matt Carlson05ac4cb2008-11-03 16:53:46 -08009222 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9223 device_can_wakeup(&tp->pdev->dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009224 wol->wolopts = WAKE_MAGIC;
9225 memset(&wol->sopass, 0, sizeof(wol->sopass));
9226}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009227
Linus Torvalds1da177e2005-04-16 15:20:36 -07009228static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9229{
9230 struct tg3 *tp = netdev_priv(dev);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009231 struct device *dp = &tp->pdev->dev;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009232
Linus Torvalds1da177e2005-04-16 15:20:36 -07009233 if (wol->wolopts & ~WAKE_MAGIC)
9234 return -EINVAL;
9235 if ((wol->wolopts & WAKE_MAGIC) &&
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009236 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009237 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009238
David S. Millerf47c11e2005-06-24 20:18:35 -07009239 spin_lock_bh(&tp->lock);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009240 if (wol->wolopts & WAKE_MAGIC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009241 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009242 device_set_wakeup_enable(dp, true);
9243 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009244 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009245 device_set_wakeup_enable(dp, false);
9246 }
David S. Millerf47c11e2005-06-24 20:18:35 -07009247 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009248
Linus Torvalds1da177e2005-04-16 15:20:36 -07009249 return 0;
9250}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009251
Linus Torvalds1da177e2005-04-16 15:20:36 -07009252static u32 tg3_get_msglevel(struct net_device *dev)
9253{
9254 struct tg3 *tp = netdev_priv(dev);
9255 return tp->msg_enable;
9256}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009257
Linus Torvalds1da177e2005-04-16 15:20:36 -07009258static void tg3_set_msglevel(struct net_device *dev, u32 value)
9259{
9260 struct tg3 *tp = netdev_priv(dev);
9261 tp->msg_enable = value;
9262}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009263
Linus Torvalds1da177e2005-04-16 15:20:36 -07009264static int tg3_set_tso(struct net_device *dev, u32 value)
9265{
9266 struct tg3 *tp = netdev_priv(dev);
9267
9268 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9269 if (value)
9270 return -EINVAL;
9271 return 0;
9272 }
Michael Chanb5d37722006-09-27 16:06:21 -07009273 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
9274 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
Matt Carlson9936bcf2007-10-10 18:03:07 -07009275 if (value) {
Michael Chanb0026622006-07-03 19:42:14 -07009276 dev->features |= NETIF_F_TSO6;
Matt Carlson57e69832008-05-25 23:48:31 -07009277 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9278 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9279 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9280 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlson9936bcf2007-10-10 18:03:07 -07009281 dev->features |= NETIF_F_TSO_ECN;
9282 } else
9283 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
Michael Chanb0026622006-07-03 19:42:14 -07009284 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009285 return ethtool_op_set_tso(dev, value);
9286}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009287
Linus Torvalds1da177e2005-04-16 15:20:36 -07009288static int tg3_nway_reset(struct net_device *dev)
9289{
9290 struct tg3 *tp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009291 int r;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009292
Linus Torvalds1da177e2005-04-16 15:20:36 -07009293 if (!netif_running(dev))
9294 return -EAGAIN;
9295
Michael Chanc94e3942005-09-27 12:12:42 -07009296 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9297 return -EINVAL;
9298
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009299 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9300 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9301 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009302 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009303 } else {
9304 u32 bmcr;
9305
9306 spin_lock_bh(&tp->lock);
9307 r = -EINVAL;
9308 tg3_readphy(tp, MII_BMCR, &bmcr);
9309 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9310 ((bmcr & BMCR_ANENABLE) ||
9311 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9312 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9313 BMCR_ANENABLE);
9314 r = 0;
9315 }
9316 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009317 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009318
Linus Torvalds1da177e2005-04-16 15:20:36 -07009319 return r;
9320}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009321
Linus Torvalds1da177e2005-04-16 15:20:36 -07009322static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9323{
9324 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009325
Linus Torvalds1da177e2005-04-16 15:20:36 -07009326 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9327 ering->rx_mini_max_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08009328 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9329 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9330 else
9331 ering->rx_jumbo_max_pending = 0;
9332
9333 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009334
9335 ering->rx_pending = tp->rx_pending;
9336 ering->rx_mini_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08009337 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9338 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9339 else
9340 ering->rx_jumbo_pending = 0;
9341
Linus Torvalds1da177e2005-04-16 15:20:36 -07009342 ering->tx_pending = tp->tx_pending;
9343}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009344
Linus Torvalds1da177e2005-04-16 15:20:36 -07009345static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9346{
9347 struct tg3 *tp = netdev_priv(dev);
Michael Chanb9ec6c12006-07-25 16:37:27 -07009348 int irq_sync = 0, err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009349
Linus Torvalds1da177e2005-04-16 15:20:36 -07009350 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9351 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
Michael Chanbc3a9252006-10-18 20:55:18 -07009352 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9353 (ering->tx_pending <= MAX_SKB_FRAGS) ||
Michael Chan7f62ad52007-02-20 23:25:40 -08009354 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
Michael Chanbc3a9252006-10-18 20:55:18 -07009355 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009356 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009357
Michael Chanbbe832c2005-06-24 20:20:04 -07009358 if (netif_running(dev)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009359 tg3_phy_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009360 tg3_netif_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07009361 irq_sync = 1;
9362 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009363
Michael Chanbbe832c2005-06-24 20:20:04 -07009364 tg3_full_lock(tp, irq_sync);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009365
Linus Torvalds1da177e2005-04-16 15:20:36 -07009366 tp->rx_pending = ering->rx_pending;
9367
9368 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9369 tp->rx_pending > 63)
9370 tp->rx_pending = 63;
9371 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9372 tp->tx_pending = ering->tx_pending;
9373
9374 if (netif_running(dev)) {
Michael Chan944d9802005-05-29 14:57:48 -07009375 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chanb9ec6c12006-07-25 16:37:27 -07009376 err = tg3_restart_hw(tp, 1);
9377 if (!err)
9378 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009379 }
9380
David S. Millerf47c11e2005-06-24 20:18:35 -07009381 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009382
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009383 if (irq_sync && !err)
9384 tg3_phy_start(tp);
9385
Michael Chanb9ec6c12006-07-25 16:37:27 -07009386 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009387}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009388
Linus Torvalds1da177e2005-04-16 15:20:36 -07009389static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9390{
9391 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009392
Linus Torvalds1da177e2005-04-16 15:20:36 -07009393 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
Matt Carlson8d018622007-12-20 20:05:44 -08009394
9395 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
9396 epause->rx_pause = 1;
9397 else
9398 epause->rx_pause = 0;
9399
9400 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
9401 epause->tx_pause = 1;
9402 else
9403 epause->tx_pause = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009404}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009405
Linus Torvalds1da177e2005-04-16 15:20:36 -07009406static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9407{
9408 struct tg3 *tp = netdev_priv(dev);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009409 int err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009410
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009411 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9412 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9413 return -EAGAIN;
9414
9415 if (epause->autoneg) {
9416 u32 newadv;
9417 struct phy_device *phydev;
9418
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009419 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009420
9421 if (epause->rx_pause) {
9422 if (epause->tx_pause)
9423 newadv = ADVERTISED_Pause;
9424 else
9425 newadv = ADVERTISED_Pause |
9426 ADVERTISED_Asym_Pause;
9427 } else if (epause->tx_pause) {
9428 newadv = ADVERTISED_Asym_Pause;
9429 } else
9430 newadv = 0;
9431
9432 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9433 u32 oldadv = phydev->advertising &
9434 (ADVERTISED_Pause |
9435 ADVERTISED_Asym_Pause);
9436 if (oldadv != newadv) {
9437 phydev->advertising &=
9438 ~(ADVERTISED_Pause |
9439 ADVERTISED_Asym_Pause);
9440 phydev->advertising |= newadv;
9441 err = phy_start_aneg(phydev);
9442 }
9443 } else {
9444 tp->link_config.advertising &=
9445 ~(ADVERTISED_Pause |
9446 ADVERTISED_Asym_Pause);
9447 tp->link_config.advertising |= newadv;
9448 }
9449 } else {
9450 if (epause->rx_pause)
9451 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9452 else
9453 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9454
9455 if (epause->tx_pause)
9456 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9457 else
9458 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9459
9460 if (netif_running(dev))
9461 tg3_setup_flow_control(tp, 0, 0);
9462 }
9463 } else {
9464 int irq_sync = 0;
9465
9466 if (netif_running(dev)) {
9467 tg3_netif_stop(tp);
9468 irq_sync = 1;
9469 }
9470
9471 tg3_full_lock(tp, irq_sync);
9472
9473 if (epause->autoneg)
9474 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9475 else
9476 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9477 if (epause->rx_pause)
9478 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9479 else
9480 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9481 if (epause->tx_pause)
9482 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9483 else
9484 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9485
9486 if (netif_running(dev)) {
9487 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9488 err = tg3_restart_hw(tp, 1);
9489 if (!err)
9490 tg3_netif_start(tp);
9491 }
9492
9493 tg3_full_unlock(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07009494 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009495
Michael Chanb9ec6c12006-07-25 16:37:27 -07009496 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009497}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009498
Linus Torvalds1da177e2005-04-16 15:20:36 -07009499static u32 tg3_get_rx_csum(struct net_device *dev)
9500{
9501 struct tg3 *tp = netdev_priv(dev);
9502 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9503}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009504
Linus Torvalds1da177e2005-04-16 15:20:36 -07009505static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9506{
9507 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009508
Linus Torvalds1da177e2005-04-16 15:20:36 -07009509 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9510 if (data != 0)
9511 return -EINVAL;
9512 return 0;
9513 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009514
David S. Millerf47c11e2005-06-24 20:18:35 -07009515 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009516 if (data)
9517 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9518 else
9519 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
David S. Millerf47c11e2005-06-24 20:18:35 -07009520 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009521
Linus Torvalds1da177e2005-04-16 15:20:36 -07009522 return 0;
9523}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009524
Linus Torvalds1da177e2005-04-16 15:20:36 -07009525static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9526{
9527 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009528
Linus Torvalds1da177e2005-04-16 15:20:36 -07009529 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9530 if (data != 0)
9531 return -EINVAL;
9532 return 0;
9533 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009534
Michael Chanaf36e6b2006-03-23 01:28:06 -08009535 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07009536 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07009537 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -07009538 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9539 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan6460d942007-07-14 19:07:52 -07009540 ethtool_op_set_tx_ipv6_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009541 else
Michael Chan9c27dbd2006-03-20 22:28:27 -08009542 ethtool_op_set_tx_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009543
9544 return 0;
9545}
9546
Jeff Garzikb9f2c042007-10-03 18:07:32 -07009547static int tg3_get_sset_count (struct net_device *dev, int sset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009548{
Jeff Garzikb9f2c042007-10-03 18:07:32 -07009549 switch (sset) {
9550 case ETH_SS_TEST:
9551 return TG3_NUM_TEST;
9552 case ETH_SS_STATS:
9553 return TG3_NUM_STATS;
9554 default:
9555 return -EOPNOTSUPP;
9556 }
Michael Chan4cafd3f2005-05-29 14:56:34 -07009557}
9558
Linus Torvalds1da177e2005-04-16 15:20:36 -07009559static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9560{
9561 switch (stringset) {
9562 case ETH_SS_STATS:
9563 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9564 break;
Michael Chan4cafd3f2005-05-29 14:56:34 -07009565 case ETH_SS_TEST:
9566 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9567 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009568 default:
9569 WARN_ON(1); /* we need a WARN() */
9570 break;
9571 }
9572}
9573
Michael Chan4009a932005-09-05 17:52:54 -07009574static int tg3_phys_id(struct net_device *dev, u32 data)
9575{
9576 struct tg3 *tp = netdev_priv(dev);
9577 int i;
9578
9579 if (!netif_running(tp->dev))
9580 return -EAGAIN;
9581
9582 if (data == 0)
Stephen Hemminger759afc32008-02-23 19:51:59 -08009583 data = UINT_MAX / 2;
Michael Chan4009a932005-09-05 17:52:54 -07009584
9585 for (i = 0; i < (data * 2); i++) {
9586 if ((i % 2) == 0)
9587 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9588 LED_CTRL_1000MBPS_ON |
9589 LED_CTRL_100MBPS_ON |
9590 LED_CTRL_10MBPS_ON |
9591 LED_CTRL_TRAFFIC_OVERRIDE |
9592 LED_CTRL_TRAFFIC_BLINK |
9593 LED_CTRL_TRAFFIC_LED);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009594
Michael Chan4009a932005-09-05 17:52:54 -07009595 else
9596 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9597 LED_CTRL_TRAFFIC_OVERRIDE);
9598
9599 if (msleep_interruptible(500))
9600 break;
9601 }
9602 tw32(MAC_LED_CTRL, tp->led_ctrl);
9603 return 0;
9604}
9605
Linus Torvalds1da177e2005-04-16 15:20:36 -07009606static void tg3_get_ethtool_stats (struct net_device *dev,
9607 struct ethtool_stats *estats, u64 *tmp_stats)
9608{
9609 struct tg3 *tp = netdev_priv(dev);
9610 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9611}
9612
Michael Chan566f86a2005-05-29 14:56:58 -07009613#define NVRAM_TEST_SIZE 0x100
Matt Carlsona5767de2007-11-12 21:10:58 -08009614#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
9615#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
9616#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
Michael Chanb16250e2006-09-27 16:10:14 -07009617#define NVRAM_SELFBOOT_HW_SIZE 0x20
9618#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
Michael Chan566f86a2005-05-29 14:56:58 -07009619
9620static int tg3_test_nvram(struct tg3 *tp)
9621{
Al Virob9fc7dc2007-12-17 22:59:57 -08009622 u32 csum, magic;
9623 __le32 *buf;
Andy Gospodarekab0049b2007-09-06 20:42:14 +01009624 int i, j, k, err = 0, size;
Michael Chan566f86a2005-05-29 14:56:58 -07009625
Michael Chan18201802006-03-20 22:29:15 -08009626 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Michael Chan1b277772006-03-20 22:27:48 -08009627 return -EIO;
9628
Michael Chan1b277772006-03-20 22:27:48 -08009629 if (magic == TG3_EEPROM_MAGIC)
9630 size = NVRAM_TEST_SIZE;
Michael Chanb16250e2006-09-27 16:10:14 -07009631 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
Matt Carlsona5767de2007-11-12 21:10:58 -08009632 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9633 TG3_EEPROM_SB_FORMAT_1) {
9634 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9635 case TG3_EEPROM_SB_REVISION_0:
9636 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9637 break;
9638 case TG3_EEPROM_SB_REVISION_2:
9639 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9640 break;
9641 case TG3_EEPROM_SB_REVISION_3:
9642 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9643 break;
9644 default:
9645 return 0;
9646 }
9647 } else
Michael Chan1b277772006-03-20 22:27:48 -08009648 return 0;
Michael Chanb16250e2006-09-27 16:10:14 -07009649 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9650 size = NVRAM_SELFBOOT_HW_SIZE;
9651 else
Michael Chan1b277772006-03-20 22:27:48 -08009652 return -EIO;
9653
9654 buf = kmalloc(size, GFP_KERNEL);
Michael Chan566f86a2005-05-29 14:56:58 -07009655 if (buf == NULL)
9656 return -ENOMEM;
9657
Michael Chan1b277772006-03-20 22:27:48 -08009658 err = -EIO;
9659 for (i = 0, j = 0; i < size; i += 4, j++) {
Al Virob9fc7dc2007-12-17 22:59:57 -08009660 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
Michael Chan566f86a2005-05-29 14:56:58 -07009661 break;
Michael Chan566f86a2005-05-29 14:56:58 -07009662 }
Michael Chan1b277772006-03-20 22:27:48 -08009663 if (i < size)
Michael Chan566f86a2005-05-29 14:56:58 -07009664 goto out;
9665
Michael Chan1b277772006-03-20 22:27:48 -08009666 /* Selfboot format */
Al Virob9fc7dc2007-12-17 22:59:57 -08009667 magic = swab32(le32_to_cpu(buf[0]));
9668 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -07009669 TG3_EEPROM_MAGIC_FW) {
Michael Chan1b277772006-03-20 22:27:48 -08009670 u8 *buf8 = (u8 *) buf, csum8 = 0;
9671
Al Virob9fc7dc2007-12-17 22:59:57 -08009672 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
Matt Carlsona5767de2007-11-12 21:10:58 -08009673 TG3_EEPROM_SB_REVISION_2) {
9674 /* For rev 2, the csum doesn't include the MBA. */
9675 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9676 csum8 += buf8[i];
9677 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9678 csum8 += buf8[i];
9679 } else {
9680 for (i = 0; i < size; i++)
9681 csum8 += buf8[i];
9682 }
Michael Chan1b277772006-03-20 22:27:48 -08009683
Adrian Bunkad96b482006-04-05 22:21:04 -07009684 if (csum8 == 0) {
9685 err = 0;
9686 goto out;
9687 }
9688
9689 err = -EIO;
9690 goto out;
Michael Chan1b277772006-03-20 22:27:48 -08009691 }
Michael Chan566f86a2005-05-29 14:56:58 -07009692
Al Virob9fc7dc2007-12-17 22:59:57 -08009693 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -07009694 TG3_EEPROM_MAGIC_HW) {
9695 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9696 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9697 u8 *buf8 = (u8 *) buf;
Michael Chanb16250e2006-09-27 16:10:14 -07009698
9699 /* Separate the parity bits and the data bytes. */
9700 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9701 if ((i == 0) || (i == 8)) {
9702 int l;
9703 u8 msk;
9704
9705 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9706 parity[k++] = buf8[i] & msk;
9707 i++;
9708 }
9709 else if (i == 16) {
9710 int l;
9711 u8 msk;
9712
9713 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9714 parity[k++] = buf8[i] & msk;
9715 i++;
9716
9717 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9718 parity[k++] = buf8[i] & msk;
9719 i++;
9720 }
9721 data[j++] = buf8[i];
9722 }
9723
9724 err = -EIO;
9725 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9726 u8 hw8 = hweight8(data[i]);
9727
9728 if ((hw8 & 0x1) && parity[i])
9729 goto out;
9730 else if (!(hw8 & 0x1) && !parity[i])
9731 goto out;
9732 }
9733 err = 0;
9734 goto out;
9735 }
9736
Michael Chan566f86a2005-05-29 14:56:58 -07009737 /* Bootstrap checksum at offset 0x10 */
9738 csum = calc_crc((unsigned char *) buf, 0x10);
Al Virob9fc7dc2007-12-17 22:59:57 -08009739 if(csum != le32_to_cpu(buf[0x10/4]))
Michael Chan566f86a2005-05-29 14:56:58 -07009740 goto out;
9741
9742 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9743 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
Al Virob9fc7dc2007-12-17 22:59:57 -08009744 if (csum != le32_to_cpu(buf[0xfc/4]))
Michael Chan566f86a2005-05-29 14:56:58 -07009745 goto out;
9746
9747 err = 0;
9748
9749out:
9750 kfree(buf);
9751 return err;
9752}
9753
Michael Chanca430072005-05-29 14:57:23 -07009754#define TG3_SERDES_TIMEOUT_SEC 2
9755#define TG3_COPPER_TIMEOUT_SEC 6
9756
9757static int tg3_test_link(struct tg3 *tp)
9758{
9759 int i, max;
9760
9761 if (!netif_running(tp->dev))
9762 return -ENODEV;
9763
Michael Chan4c987482005-09-05 17:52:38 -07009764 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Michael Chanca430072005-05-29 14:57:23 -07009765 max = TG3_SERDES_TIMEOUT_SEC;
9766 else
9767 max = TG3_COPPER_TIMEOUT_SEC;
9768
9769 for (i = 0; i < max; i++) {
9770 if (netif_carrier_ok(tp->dev))
9771 return 0;
9772
9773 if (msleep_interruptible(1000))
9774 break;
9775 }
9776
9777 return -EIO;
9778}
9779
Michael Chana71116d2005-05-29 14:58:11 -07009780/* Only test the commonly used registers */
David S. Miller30ca3e32006-03-20 23:02:36 -08009781static int tg3_test_registers(struct tg3 *tp)
Michael Chana71116d2005-05-29 14:58:11 -07009782{
Michael Chanb16250e2006-09-27 16:10:14 -07009783 int i, is_5705, is_5750;
Michael Chana71116d2005-05-29 14:58:11 -07009784 u32 offset, read_mask, write_mask, val, save_val, read_val;
9785 static struct {
9786 u16 offset;
9787 u16 flags;
9788#define TG3_FL_5705 0x1
9789#define TG3_FL_NOT_5705 0x2
9790#define TG3_FL_NOT_5788 0x4
Michael Chanb16250e2006-09-27 16:10:14 -07009791#define TG3_FL_NOT_5750 0x8
Michael Chana71116d2005-05-29 14:58:11 -07009792 u32 read_mask;
9793 u32 write_mask;
9794 } reg_tbl[] = {
9795 /* MAC Control Registers */
9796 { MAC_MODE, TG3_FL_NOT_5705,
9797 0x00000000, 0x00ef6f8c },
9798 { MAC_MODE, TG3_FL_5705,
9799 0x00000000, 0x01ef6b8c },
9800 { MAC_STATUS, TG3_FL_NOT_5705,
9801 0x03800107, 0x00000000 },
9802 { MAC_STATUS, TG3_FL_5705,
9803 0x03800100, 0x00000000 },
9804 { MAC_ADDR_0_HIGH, 0x0000,
9805 0x00000000, 0x0000ffff },
9806 { MAC_ADDR_0_LOW, 0x0000,
9807 0x00000000, 0xffffffff },
9808 { MAC_RX_MTU_SIZE, 0x0000,
9809 0x00000000, 0x0000ffff },
9810 { MAC_TX_MODE, 0x0000,
9811 0x00000000, 0x00000070 },
9812 { MAC_TX_LENGTHS, 0x0000,
9813 0x00000000, 0x00003fff },
9814 { MAC_RX_MODE, TG3_FL_NOT_5705,
9815 0x00000000, 0x000007fc },
9816 { MAC_RX_MODE, TG3_FL_5705,
9817 0x00000000, 0x000007dc },
9818 { MAC_HASH_REG_0, 0x0000,
9819 0x00000000, 0xffffffff },
9820 { MAC_HASH_REG_1, 0x0000,
9821 0x00000000, 0xffffffff },
9822 { MAC_HASH_REG_2, 0x0000,
9823 0x00000000, 0xffffffff },
9824 { MAC_HASH_REG_3, 0x0000,
9825 0x00000000, 0xffffffff },
9826
9827 /* Receive Data and Receive BD Initiator Control Registers. */
9828 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9829 0x00000000, 0xffffffff },
9830 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9831 0x00000000, 0xffffffff },
9832 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9833 0x00000000, 0x00000003 },
9834 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9835 0x00000000, 0xffffffff },
9836 { RCVDBDI_STD_BD+0, 0x0000,
9837 0x00000000, 0xffffffff },
9838 { RCVDBDI_STD_BD+4, 0x0000,
9839 0x00000000, 0xffffffff },
9840 { RCVDBDI_STD_BD+8, 0x0000,
9841 0x00000000, 0xffff0002 },
9842 { RCVDBDI_STD_BD+0xc, 0x0000,
9843 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009844
Michael Chana71116d2005-05-29 14:58:11 -07009845 /* Receive BD Initiator Control Registers. */
9846 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9847 0x00000000, 0xffffffff },
9848 { RCVBDI_STD_THRESH, TG3_FL_5705,
9849 0x00000000, 0x000003ff },
9850 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9851 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009852
Michael Chana71116d2005-05-29 14:58:11 -07009853 /* Host Coalescing Control Registers. */
9854 { HOSTCC_MODE, TG3_FL_NOT_5705,
9855 0x00000000, 0x00000004 },
9856 { HOSTCC_MODE, TG3_FL_5705,
9857 0x00000000, 0x000000f6 },
9858 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9859 0x00000000, 0xffffffff },
9860 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9861 0x00000000, 0x000003ff },
9862 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9863 0x00000000, 0xffffffff },
9864 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9865 0x00000000, 0x000003ff },
9866 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9867 0x00000000, 0xffffffff },
9868 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9869 0x00000000, 0x000000ff },
9870 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9871 0x00000000, 0xffffffff },
9872 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9873 0x00000000, 0x000000ff },
9874 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9875 0x00000000, 0xffffffff },
9876 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9877 0x00000000, 0xffffffff },
9878 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9879 0x00000000, 0xffffffff },
9880 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9881 0x00000000, 0x000000ff },
9882 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9883 0x00000000, 0xffffffff },
9884 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9885 0x00000000, 0x000000ff },
9886 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9887 0x00000000, 0xffffffff },
9888 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9889 0x00000000, 0xffffffff },
9890 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9891 0x00000000, 0xffffffff },
9892 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9893 0x00000000, 0xffffffff },
9894 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9895 0x00000000, 0xffffffff },
9896 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9897 0xffffffff, 0x00000000 },
9898 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9899 0xffffffff, 0x00000000 },
9900
9901 /* Buffer Manager Control Registers. */
Michael Chanb16250e2006-09-27 16:10:14 -07009902 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -07009903 0x00000000, 0x007fff80 },
Michael Chanb16250e2006-09-27 16:10:14 -07009904 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -07009905 0x00000000, 0x007fffff },
9906 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9907 0x00000000, 0x0000003f },
9908 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9909 0x00000000, 0x000001ff },
9910 { BUFMGR_MB_HIGH_WATER, 0x0000,
9911 0x00000000, 0x000001ff },
9912 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9913 0xffffffff, 0x00000000 },
9914 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9915 0xffffffff, 0x00000000 },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009916
Michael Chana71116d2005-05-29 14:58:11 -07009917 /* Mailbox Registers */
9918 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9919 0x00000000, 0x000001ff },
9920 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9921 0x00000000, 0x000001ff },
9922 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9923 0x00000000, 0x000007ff },
9924 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9925 0x00000000, 0x000001ff },
9926
9927 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9928 };
9929
Michael Chanb16250e2006-09-27 16:10:14 -07009930 is_5705 = is_5750 = 0;
9931 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
Michael Chana71116d2005-05-29 14:58:11 -07009932 is_5705 = 1;
Michael Chanb16250e2006-09-27 16:10:14 -07009933 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9934 is_5750 = 1;
9935 }
Michael Chana71116d2005-05-29 14:58:11 -07009936
9937 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9938 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9939 continue;
9940
9941 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9942 continue;
9943
9944 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9945 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9946 continue;
9947
Michael Chanb16250e2006-09-27 16:10:14 -07009948 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9949 continue;
9950
Michael Chana71116d2005-05-29 14:58:11 -07009951 offset = (u32) reg_tbl[i].offset;
9952 read_mask = reg_tbl[i].read_mask;
9953 write_mask = reg_tbl[i].write_mask;
9954
9955 /* Save the original register content */
9956 save_val = tr32(offset);
9957
9958 /* Determine the read-only value. */
9959 read_val = save_val & read_mask;
9960
9961 /* Write zero to the register, then make sure the read-only bits
9962 * are not changed and the read/write bits are all zeros.
9963 */
9964 tw32(offset, 0);
9965
9966 val = tr32(offset);
9967
9968 /* Test the read-only and read/write bits. */
9969 if (((val & read_mask) != read_val) || (val & write_mask))
9970 goto out;
9971
9972 /* Write ones to all the bits defined by RdMask and WrMask, then
9973 * make sure the read-only bits are not changed and the
9974 * read/write bits are all ones.
9975 */
9976 tw32(offset, read_mask | write_mask);
9977
9978 val = tr32(offset);
9979
9980 /* Test the read-only bits. */
9981 if ((val & read_mask) != read_val)
9982 goto out;
9983
9984 /* Test the read/write bits. */
9985 if ((val & write_mask) != write_mask)
9986 goto out;
9987
9988 tw32(offset, save_val);
9989 }
9990
9991 return 0;
9992
9993out:
Michael Chan9f88f292006-12-07 00:22:54 -08009994 if (netif_msg_hw(tp))
9995 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9996 offset);
Michael Chana71116d2005-05-29 14:58:11 -07009997 tw32(offset, save_val);
9998 return -EIO;
9999}
10000
Michael Chan7942e1d2005-05-29 14:58:36 -070010001static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10002{
Arjan van de Venf71e1302006-03-03 21:33:57 -050010003 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
Michael Chan7942e1d2005-05-29 14:58:36 -070010004 int i;
10005 u32 j;
10006
Alejandro Martinez Ruize9edda62007-10-15 03:37:43 +020010007 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
Michael Chan7942e1d2005-05-29 14:58:36 -070010008 for (j = 0; j < len; j += 4) {
10009 u32 val;
10010
10011 tg3_write_mem(tp, offset + j, test_pattern[i]);
10012 tg3_read_mem(tp, offset + j, &val);
10013 if (val != test_pattern[i])
10014 return -EIO;
10015 }
10016 }
10017 return 0;
10018}
10019
10020static int tg3_test_memory(struct tg3 *tp)
10021{
10022 static struct mem_entry {
10023 u32 offset;
10024 u32 len;
10025 } mem_tbl_570x[] = {
Michael Chan38690192005-12-19 16:27:28 -080010026 { 0x00000000, 0x00b50},
Michael Chan7942e1d2005-05-29 14:58:36 -070010027 { 0x00002000, 0x1c000},
10028 { 0xffffffff, 0x00000}
10029 }, mem_tbl_5705[] = {
10030 { 0x00000100, 0x0000c},
10031 { 0x00000200, 0x00008},
Michael Chan7942e1d2005-05-29 14:58:36 -070010032 { 0x00004000, 0x00800},
10033 { 0x00006000, 0x01000},
10034 { 0x00008000, 0x02000},
10035 { 0x00010000, 0x0e000},
10036 { 0xffffffff, 0x00000}
Michael Chan79f4d132006-03-20 22:28:57 -080010037 }, mem_tbl_5755[] = {
10038 { 0x00000200, 0x00008},
10039 { 0x00004000, 0x00800},
10040 { 0x00006000, 0x00800},
10041 { 0x00008000, 0x02000},
10042 { 0x00010000, 0x0c000},
10043 { 0xffffffff, 0x00000}
Michael Chanb16250e2006-09-27 16:10:14 -070010044 }, mem_tbl_5906[] = {
10045 { 0x00000200, 0x00008},
10046 { 0x00004000, 0x00400},
10047 { 0x00006000, 0x00400},
10048 { 0x00008000, 0x01000},
10049 { 0x00010000, 0x01000},
10050 { 0xffffffff, 0x00000}
Michael Chan7942e1d2005-05-29 14:58:36 -070010051 };
10052 struct mem_entry *mem_tbl;
10053 int err = 0;
10054 int i;
10055
Michael Chan79f4d132006-03-20 22:28:57 -080010056 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
Michael Chanaf36e6b2006-03-23 01:28:06 -080010057 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070010058 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070010059 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010060 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10061 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan79f4d132006-03-20 22:28:57 -080010062 mem_tbl = mem_tbl_5755;
Michael Chanb16250e2006-09-27 16:10:14 -070010063 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10064 mem_tbl = mem_tbl_5906;
Michael Chan79f4d132006-03-20 22:28:57 -080010065 else
10066 mem_tbl = mem_tbl_5705;
10067 } else
Michael Chan7942e1d2005-05-29 14:58:36 -070010068 mem_tbl = mem_tbl_570x;
10069
10070 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10071 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
10072 mem_tbl[i].len)) != 0)
10073 break;
10074 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010075
Michael Chan7942e1d2005-05-29 14:58:36 -070010076 return err;
10077}
10078
Michael Chan9f40dea2005-09-05 17:53:06 -070010079#define TG3_MAC_LOOPBACK 0
10080#define TG3_PHY_LOOPBACK 1
10081
10082static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
Michael Chanc76949a2005-05-29 14:58:59 -070010083{
Michael Chan9f40dea2005-09-05 17:53:06 -070010084 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
Michael Chanc76949a2005-05-29 14:58:59 -070010085 u32 desc_idx;
10086 struct sk_buff *skb, *rx_skb;
10087 u8 *tx_data;
10088 dma_addr_t map;
10089 int num_pkts, tx_len, rx_len, i, err;
10090 struct tg3_rx_buffer_desc *desc;
10091
Michael Chan9f40dea2005-09-05 17:53:06 -070010092 if (loopback_mode == TG3_MAC_LOOPBACK) {
Michael Chanc94e3942005-09-27 12:12:42 -070010093 /* HW errata - mac loopback fails in some cases on 5780.
10094 * Normal traffic and PHY loopback are not affected by
10095 * errata.
10096 */
10097 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10098 return 0;
10099
Michael Chan9f40dea2005-09-05 17:53:06 -070010100 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
Matt Carlsone8f3f6c2007-07-11 19:47:55 -070010101 MAC_MODE_PORT_INT_LPBACK;
10102 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10103 mac_mode |= MAC_MODE_LINK_POLARITY;
Michael Chan3f7045c2006-09-27 16:02:29 -070010104 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10105 mac_mode |= MAC_MODE_PORT_MODE_MII;
10106 else
10107 mac_mode |= MAC_MODE_PORT_MODE_GMII;
Michael Chan9f40dea2005-09-05 17:53:06 -070010108 tw32(MAC_MODE, mac_mode);
10109 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
Michael Chan3f7045c2006-09-27 16:02:29 -070010110 u32 val;
10111
Michael Chanb16250e2006-09-27 16:10:14 -070010112 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10113 u32 phytest;
10114
10115 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
10116 u32 phy;
10117
10118 tg3_writephy(tp, MII_TG3_EPHY_TEST,
10119 phytest | MII_TG3_EPHY_SHADOW_EN);
10120 if (!tg3_readphy(tp, 0x1b, &phy))
10121 tg3_writephy(tp, 0x1b, phy & ~0x20);
Michael Chanb16250e2006-09-27 16:10:14 -070010122 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
10123 }
Michael Chan5d64ad32006-12-07 00:19:40 -080010124 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10125 } else
10126 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
Michael Chan3f7045c2006-09-27 16:02:29 -070010127
Matt Carlson9ef8ca92007-07-11 19:48:29 -070010128 tg3_phy_toggle_automdix(tp, 0);
10129
Michael Chan3f7045c2006-09-27 16:02:29 -070010130 tg3_writephy(tp, MII_BMCR, val);
Michael Chanc94e3942005-09-27 12:12:42 -070010131 udelay(40);
Michael Chan5d64ad32006-12-07 00:19:40 -080010132
Matt Carlsone8f3f6c2007-07-11 19:47:55 -070010133 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
Michael Chan5d64ad32006-12-07 00:19:40 -080010134 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chanb16250e2006-09-27 16:10:14 -070010135 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
Michael Chan5d64ad32006-12-07 00:19:40 -080010136 mac_mode |= MAC_MODE_PORT_MODE_MII;
10137 } else
10138 mac_mode |= MAC_MODE_PORT_MODE_GMII;
Michael Chanb16250e2006-09-27 16:10:14 -070010139
Michael Chanc94e3942005-09-27 12:12:42 -070010140 /* reset to prevent losing 1st rx packet intermittently */
10141 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10142 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10143 udelay(10);
10144 tw32_f(MAC_RX_MODE, tp->rx_mode);
10145 }
Matt Carlsone8f3f6c2007-07-11 19:47:55 -070010146 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10147 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10148 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10149 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10150 mac_mode |= MAC_MODE_LINK_POLARITY;
Michael Chanff18ff02006-03-27 23:17:27 -080010151 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10152 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10153 }
Michael Chan9f40dea2005-09-05 17:53:06 -070010154 tw32(MAC_MODE, mac_mode);
Michael Chan9f40dea2005-09-05 17:53:06 -070010155 }
10156 else
10157 return -EINVAL;
Michael Chanc76949a2005-05-29 14:58:59 -070010158
10159 err = -EIO;
10160
Michael Chanc76949a2005-05-29 14:58:59 -070010161 tx_len = 1514;
David S. Millera20e9c62006-07-31 22:38:16 -070010162 skb = netdev_alloc_skb(tp->dev, tx_len);
Jesper Juhla50bb7b2006-05-09 23:14:35 -070010163 if (!skb)
10164 return -ENOMEM;
10165
Michael Chanc76949a2005-05-29 14:58:59 -070010166 tx_data = skb_put(skb, tx_len);
10167 memcpy(tx_data, tp->dev->dev_addr, 6);
10168 memset(tx_data + 6, 0x0, 8);
10169
10170 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10171
10172 for (i = 14; i < tx_len; i++)
10173 tx_data[i] = (u8) (i & 0xff);
10174
10175 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10176
10177 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10178 HOSTCC_MODE_NOW);
10179
10180 udelay(10);
10181
10182 rx_start_idx = tp->hw_status->idx[0].rx_producer;
10183
Michael Chanc76949a2005-05-29 14:58:59 -070010184 num_pkts = 0;
10185
Michael Chan9f40dea2005-09-05 17:53:06 -070010186 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
Michael Chanc76949a2005-05-29 14:58:59 -070010187
Michael Chan9f40dea2005-09-05 17:53:06 -070010188 tp->tx_prod++;
Michael Chanc76949a2005-05-29 14:58:59 -070010189 num_pkts++;
10190
Michael Chan9f40dea2005-09-05 17:53:06 -070010191 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10192 tp->tx_prod);
Michael Chan09ee9292005-08-09 20:17:00 -070010193 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
Michael Chanc76949a2005-05-29 14:58:59 -070010194
10195 udelay(10);
10196
Michael Chan3f7045c2006-09-27 16:02:29 -070010197 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
10198 for (i = 0; i < 25; i++) {
Michael Chanc76949a2005-05-29 14:58:59 -070010199 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10200 HOSTCC_MODE_NOW);
10201
10202 udelay(10);
10203
10204 tx_idx = tp->hw_status->idx[0].tx_consumer;
10205 rx_idx = tp->hw_status->idx[0].rx_producer;
Michael Chan9f40dea2005-09-05 17:53:06 -070010206 if ((tx_idx == tp->tx_prod) &&
Michael Chanc76949a2005-05-29 14:58:59 -070010207 (rx_idx == (rx_start_idx + num_pkts)))
10208 break;
10209 }
10210
10211 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10212 dev_kfree_skb(skb);
10213
Michael Chan9f40dea2005-09-05 17:53:06 -070010214 if (tx_idx != tp->tx_prod)
Michael Chanc76949a2005-05-29 14:58:59 -070010215 goto out;
10216
10217 if (rx_idx != rx_start_idx + num_pkts)
10218 goto out;
10219
10220 desc = &tp->rx_rcb[rx_start_idx];
10221 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10222 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10223 if (opaque_key != RXD_OPAQUE_RING_STD)
10224 goto out;
10225
10226 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10227 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10228 goto out;
10229
10230 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10231 if (rx_len != tx_len)
10232 goto out;
10233
10234 rx_skb = tp->rx_std_buffers[desc_idx].skb;
10235
10236 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10237 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10238
10239 for (i = 14; i < tx_len; i++) {
10240 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10241 goto out;
10242 }
10243 err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010244
Michael Chanc76949a2005-05-29 14:58:59 -070010245 /* tg3_free_rings will unmap and free the rx_skb */
10246out:
10247 return err;
10248}
10249
Michael Chan9f40dea2005-09-05 17:53:06 -070010250#define TG3_MAC_LOOPBACK_FAILED 1
10251#define TG3_PHY_LOOPBACK_FAILED 2
10252#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10253 TG3_PHY_LOOPBACK_FAILED)
10254
10255static int tg3_test_loopback(struct tg3 *tp)
10256{
10257 int err = 0;
Matt Carlson9936bcf2007-10-10 18:03:07 -070010258 u32 cpmuctrl = 0;
Michael Chan9f40dea2005-09-05 17:53:06 -070010259
10260 if (!netif_running(tp->dev))
10261 return TG3_LOOPBACK_FAILED;
10262
Michael Chanb9ec6c12006-07-25 16:37:27 -070010263 err = tg3_reset_hw(tp, 1);
10264 if (err)
10265 return TG3_LOOPBACK_FAILED;
Michael Chan9f40dea2005-09-05 17:53:06 -070010266
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010267 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010268 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10269 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Matt Carlson9936bcf2007-10-10 18:03:07 -070010270 int i;
10271 u32 status;
10272
10273 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10274
10275 /* Wait for up to 40 microseconds to acquire lock. */
10276 for (i = 0; i < 4; i++) {
10277 status = tr32(TG3_CPMU_MUTEX_GNT);
10278 if (status == CPMU_MUTEX_GNT_DRIVER)
10279 break;
10280 udelay(10);
10281 }
10282
10283 if (status != CPMU_MUTEX_GNT_DRIVER)
10284 return TG3_LOOPBACK_FAILED;
10285
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010286 /* Turn off link-based power management. */
Matt Carlsone8750932007-11-12 21:11:51 -080010287 cpmuctrl = tr32(TG3_CPMU_CTRL);
Matt Carlson109115e2008-05-02 16:48:59 -070010288 tw32(TG3_CPMU_CTRL,
10289 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10290 CPMU_CTRL_LINK_AWARE_MODE));
Matt Carlson9936bcf2007-10-10 18:03:07 -070010291 }
10292
Michael Chan9f40dea2005-09-05 17:53:06 -070010293 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10294 err |= TG3_MAC_LOOPBACK_FAILED;
Matt Carlson9936bcf2007-10-10 18:03:07 -070010295
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010296 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010297 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10298 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Matt Carlson9936bcf2007-10-10 18:03:07 -070010299 tw32(TG3_CPMU_CTRL, cpmuctrl);
10300
10301 /* Release the mutex */
10302 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10303 }
10304
Matt Carlsondd477002008-05-25 23:45:58 -070010305 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10306 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
Michael Chan9f40dea2005-09-05 17:53:06 -070010307 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10308 err |= TG3_PHY_LOOPBACK_FAILED;
10309 }
10310
10311 return err;
10312}
10313
Michael Chan4cafd3f2005-05-29 14:56:34 -070010314static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10315 u64 *data)
10316{
Michael Chan566f86a2005-05-29 14:56:58 -070010317 struct tg3 *tp = netdev_priv(dev);
10318
Michael Chanbc1c7562006-03-20 17:48:03 -080010319 if (tp->link_config.phy_is_low_power)
10320 tg3_set_power_state(tp, PCI_D0);
10321
Michael Chan566f86a2005-05-29 14:56:58 -070010322 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10323
10324 if (tg3_test_nvram(tp) != 0) {
10325 etest->flags |= ETH_TEST_FL_FAILED;
10326 data[0] = 1;
10327 }
Michael Chanca430072005-05-29 14:57:23 -070010328 if (tg3_test_link(tp) != 0) {
10329 etest->flags |= ETH_TEST_FL_FAILED;
10330 data[1] = 1;
10331 }
Michael Chana71116d2005-05-29 14:58:11 -070010332 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010333 int err, err2 = 0, irq_sync = 0;
Michael Chana71116d2005-05-29 14:58:11 -070010334
Michael Chanbbe832c2005-06-24 20:20:04 -070010335 if (netif_running(dev)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010336 tg3_phy_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -070010337 tg3_netif_stop(tp);
10338 irq_sync = 1;
10339 }
10340
10341 tg3_full_lock(tp, irq_sync);
Michael Chana71116d2005-05-29 14:58:11 -070010342
10343 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
Michael Chanec41c7d2006-01-17 02:40:55 -080010344 err = tg3_nvram_lock(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010345 tg3_halt_cpu(tp, RX_CPU_BASE);
10346 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10347 tg3_halt_cpu(tp, TX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -080010348 if (!err)
10349 tg3_nvram_unlock(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010350
Michael Chand9ab5ad2006-03-20 22:27:35 -080010351 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10352 tg3_phy_reset(tp);
10353
Michael Chana71116d2005-05-29 14:58:11 -070010354 if (tg3_test_registers(tp) != 0) {
10355 etest->flags |= ETH_TEST_FL_FAILED;
10356 data[2] = 1;
10357 }
Michael Chan7942e1d2005-05-29 14:58:36 -070010358 if (tg3_test_memory(tp) != 0) {
10359 etest->flags |= ETH_TEST_FL_FAILED;
10360 data[3] = 1;
10361 }
Michael Chan9f40dea2005-09-05 17:53:06 -070010362 if ((data[4] = tg3_test_loopback(tp)) != 0)
Michael Chanc76949a2005-05-29 14:58:59 -070010363 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chana71116d2005-05-29 14:58:11 -070010364
David S. Millerf47c11e2005-06-24 20:18:35 -070010365 tg3_full_unlock(tp);
10366
Michael Chand4bc3922005-05-29 14:59:20 -070010367 if (tg3_test_interrupt(tp) != 0) {
10368 etest->flags |= ETH_TEST_FL_FAILED;
10369 data[5] = 1;
10370 }
David S. Millerf47c11e2005-06-24 20:18:35 -070010371
10372 tg3_full_lock(tp, 0);
Michael Chand4bc3922005-05-29 14:59:20 -070010373
Michael Chana71116d2005-05-29 14:58:11 -070010374 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10375 if (netif_running(dev)) {
10376 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010377 err2 = tg3_restart_hw(tp, 1);
10378 if (!err2)
Michael Chanb9ec6c12006-07-25 16:37:27 -070010379 tg3_netif_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010380 }
David S. Millerf47c11e2005-06-24 20:18:35 -070010381
10382 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010383
10384 if (irq_sync && !err2)
10385 tg3_phy_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010386 }
Michael Chanbc1c7562006-03-20 17:48:03 -080010387 if (tp->link_config.phy_is_low_power)
10388 tg3_set_power_state(tp, PCI_D3hot);
10389
Michael Chan4cafd3f2005-05-29 14:56:34 -070010390}
10391
Linus Torvalds1da177e2005-04-16 15:20:36 -070010392static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10393{
10394 struct mii_ioctl_data *data = if_mii(ifr);
10395 struct tg3 *tp = netdev_priv(dev);
10396 int err;
10397
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010398 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10399 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10400 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -070010401 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010402 }
10403
Linus Torvalds1da177e2005-04-16 15:20:36 -070010404 switch(cmd) {
10405 case SIOCGMIIPHY:
10406 data->phy_id = PHY_ADDR;
10407
10408 /* fallthru */
10409 case SIOCGMIIREG: {
10410 u32 mii_regval;
10411
10412 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10413 break; /* We have no PHY */
10414
Michael Chanbc1c7562006-03-20 17:48:03 -080010415 if (tp->link_config.phy_is_low_power)
10416 return -EAGAIN;
10417
David S. Millerf47c11e2005-06-24 20:18:35 -070010418 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010419 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
David S. Millerf47c11e2005-06-24 20:18:35 -070010420 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010421
10422 data->val_out = mii_regval;
10423
10424 return err;
10425 }
10426
10427 case SIOCSMIIREG:
10428 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10429 break; /* We have no PHY */
10430
10431 if (!capable(CAP_NET_ADMIN))
10432 return -EPERM;
10433
Michael Chanbc1c7562006-03-20 17:48:03 -080010434 if (tp->link_config.phy_is_low_power)
10435 return -EAGAIN;
10436
David S. Millerf47c11e2005-06-24 20:18:35 -070010437 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010438 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
David S. Millerf47c11e2005-06-24 20:18:35 -070010439 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010440
10441 return err;
10442
10443 default:
10444 /* do nothing */
10445 break;
10446 }
10447 return -EOPNOTSUPP;
10448}
10449
10450#if TG3_VLAN_TAG_USED
10451static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10452{
10453 struct tg3 *tp = netdev_priv(dev);
10454
Michael Chan29315e82006-06-29 20:12:30 -070010455 if (netif_running(dev))
10456 tg3_netif_stop(tp);
10457
David S. Millerf47c11e2005-06-24 20:18:35 -070010458 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010459
10460 tp->vlgrp = grp;
10461
10462 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10463 __tg3_set_rx_mode(dev);
10464
Michael Chan29315e82006-06-29 20:12:30 -070010465 if (netif_running(dev))
10466 tg3_netif_start(tp);
Michael Chan46966542007-07-11 19:47:19 -070010467
10468 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010469}
Linus Torvalds1da177e2005-04-16 15:20:36 -070010470#endif
10471
David S. Miller15f98502005-05-18 22:49:26 -070010472static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10473{
10474 struct tg3 *tp = netdev_priv(dev);
10475
10476 memcpy(ec, &tp->coal, sizeof(*ec));
10477 return 0;
10478}
10479
Michael Chand244c892005-07-05 14:42:33 -070010480static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10481{
10482 struct tg3 *tp = netdev_priv(dev);
10483 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10484 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10485
10486 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10487 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10488 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10489 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10490 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10491 }
10492
10493 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10494 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10495 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10496 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10497 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10498 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10499 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10500 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10501 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10502 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10503 return -EINVAL;
10504
10505 /* No rx interrupts will be generated if both are zero */
10506 if ((ec->rx_coalesce_usecs == 0) &&
10507 (ec->rx_max_coalesced_frames == 0))
10508 return -EINVAL;
10509
10510 /* No tx interrupts will be generated if both are zero */
10511 if ((ec->tx_coalesce_usecs == 0) &&
10512 (ec->tx_max_coalesced_frames == 0))
10513 return -EINVAL;
10514
10515 /* Only copy relevant parameters, ignore all others. */
10516 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10517 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10518 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10519 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10520 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10521 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10522 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10523 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10524 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10525
10526 if (netif_running(dev)) {
10527 tg3_full_lock(tp, 0);
10528 __tg3_set_coalesce(tp, &tp->coal);
10529 tg3_full_unlock(tp);
10530 }
10531 return 0;
10532}
10533
Jeff Garzik7282d492006-09-13 14:30:00 -040010534static const struct ethtool_ops tg3_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010535 .get_settings = tg3_get_settings,
10536 .set_settings = tg3_set_settings,
10537 .get_drvinfo = tg3_get_drvinfo,
10538 .get_regs_len = tg3_get_regs_len,
10539 .get_regs = tg3_get_regs,
10540 .get_wol = tg3_get_wol,
10541 .set_wol = tg3_set_wol,
10542 .get_msglevel = tg3_get_msglevel,
10543 .set_msglevel = tg3_set_msglevel,
10544 .nway_reset = tg3_nway_reset,
10545 .get_link = ethtool_op_get_link,
10546 .get_eeprom_len = tg3_get_eeprom_len,
10547 .get_eeprom = tg3_get_eeprom,
10548 .set_eeprom = tg3_set_eeprom,
10549 .get_ringparam = tg3_get_ringparam,
10550 .set_ringparam = tg3_set_ringparam,
10551 .get_pauseparam = tg3_get_pauseparam,
10552 .set_pauseparam = tg3_set_pauseparam,
10553 .get_rx_csum = tg3_get_rx_csum,
10554 .set_rx_csum = tg3_set_rx_csum,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010555 .set_tx_csum = tg3_set_tx_csum,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010556 .set_sg = ethtool_op_set_sg,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010557 .set_tso = tg3_set_tso,
Michael Chan4cafd3f2005-05-29 14:56:34 -070010558 .self_test = tg3_self_test,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010559 .get_strings = tg3_get_strings,
Michael Chan4009a932005-09-05 17:52:54 -070010560 .phys_id = tg3_phys_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010561 .get_ethtool_stats = tg3_get_ethtool_stats,
David S. Miller15f98502005-05-18 22:49:26 -070010562 .get_coalesce = tg3_get_coalesce,
Michael Chand244c892005-07-05 14:42:33 -070010563 .set_coalesce = tg3_set_coalesce,
Jeff Garzikb9f2c042007-10-03 18:07:32 -070010564 .get_sset_count = tg3_get_sset_count,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010565};
10566
10567static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10568{
Michael Chan1b277772006-03-20 22:27:48 -080010569 u32 cursize, val, magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010570
10571 tp->nvram_size = EEPROM_CHIP_SIZE;
10572
Michael Chan18201802006-03-20 22:29:15 -080010573 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010574 return;
10575
Michael Chanb16250e2006-09-27 16:10:14 -070010576 if ((magic != TG3_EEPROM_MAGIC) &&
10577 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10578 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
Linus Torvalds1da177e2005-04-16 15:20:36 -070010579 return;
10580
10581 /*
10582 * Size the chip by reading offsets at increasing powers of two.
10583 * When we encounter our validation signature, we know the addressing
10584 * has wrapped around, and thus have our chip size.
10585 */
Michael Chan1b277772006-03-20 22:27:48 -080010586 cursize = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010587
10588 while (cursize < tp->nvram_size) {
Michael Chan18201802006-03-20 22:29:15 -080010589 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010590 return;
10591
Michael Chan18201802006-03-20 22:29:15 -080010592 if (val == magic)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010593 break;
10594
10595 cursize <<= 1;
10596 }
10597
10598 tp->nvram_size = cursize;
10599}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010600
Linus Torvalds1da177e2005-04-16 15:20:36 -070010601static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10602{
10603 u32 val;
10604
Michael Chan18201802006-03-20 22:29:15 -080010605 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
Michael Chan1b277772006-03-20 22:27:48 -080010606 return;
10607
10608 /* Selfboot format */
Michael Chan18201802006-03-20 22:29:15 -080010609 if (val != TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -080010610 tg3_get_eeprom_size(tp);
10611 return;
10612 }
10613
Linus Torvalds1da177e2005-04-16 15:20:36 -070010614 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10615 if (val != 0) {
10616 tp->nvram_size = (val >> 16) * 1024;
10617 return;
10618 }
10619 }
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010620 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010621}
10622
10623static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10624{
10625 u32 nvcfg1;
10626
10627 nvcfg1 = tr32(NVRAM_CFG1);
10628 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10629 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10630 }
10631 else {
10632 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10633 tw32(NVRAM_CFG1, nvcfg1);
10634 }
10635
Michael Chan4c987482005-09-05 17:52:38 -070010636 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
Michael Chana4e2b342005-10-26 15:46:52 -070010637 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010638 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10639 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10640 tp->nvram_jedecnum = JEDEC_ATMEL;
10641 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10642 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10643 break;
10644 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10645 tp->nvram_jedecnum = JEDEC_ATMEL;
10646 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10647 break;
10648 case FLASH_VENDOR_ATMEL_EEPROM:
10649 tp->nvram_jedecnum = JEDEC_ATMEL;
10650 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10651 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10652 break;
10653 case FLASH_VENDOR_ST:
10654 tp->nvram_jedecnum = JEDEC_ST;
10655 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10656 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10657 break;
10658 case FLASH_VENDOR_SAIFUN:
10659 tp->nvram_jedecnum = JEDEC_SAIFUN;
10660 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10661 break;
10662 case FLASH_VENDOR_SST_SMALL:
10663 case FLASH_VENDOR_SST_LARGE:
10664 tp->nvram_jedecnum = JEDEC_SST;
10665 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10666 break;
10667 }
10668 }
10669 else {
10670 tp->nvram_jedecnum = JEDEC_ATMEL;
10671 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10672 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10673 }
10674}
10675
Michael Chan361b4ac2005-04-21 17:11:21 -070010676static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10677{
10678 u32 nvcfg1;
10679
10680 nvcfg1 = tr32(NVRAM_CFG1);
10681
Michael Chane6af3012005-04-21 17:12:05 -070010682 /* NVRAM protection for TPM */
10683 if (nvcfg1 & (1 << 27))
10684 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10685
Michael Chan361b4ac2005-04-21 17:11:21 -070010686 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10687 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10688 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10689 tp->nvram_jedecnum = JEDEC_ATMEL;
10690 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10691 break;
10692 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10693 tp->nvram_jedecnum = JEDEC_ATMEL;
10694 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10695 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10696 break;
10697 case FLASH_5752VENDOR_ST_M45PE10:
10698 case FLASH_5752VENDOR_ST_M45PE20:
10699 case FLASH_5752VENDOR_ST_M45PE40:
10700 tp->nvram_jedecnum = JEDEC_ST;
10701 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10702 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10703 break;
10704 }
10705
10706 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10707 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10708 case FLASH_5752PAGE_SIZE_256:
10709 tp->nvram_pagesize = 256;
10710 break;
10711 case FLASH_5752PAGE_SIZE_512:
10712 tp->nvram_pagesize = 512;
10713 break;
10714 case FLASH_5752PAGE_SIZE_1K:
10715 tp->nvram_pagesize = 1024;
10716 break;
10717 case FLASH_5752PAGE_SIZE_2K:
10718 tp->nvram_pagesize = 2048;
10719 break;
10720 case FLASH_5752PAGE_SIZE_4K:
10721 tp->nvram_pagesize = 4096;
10722 break;
10723 case FLASH_5752PAGE_SIZE_264:
10724 tp->nvram_pagesize = 264;
10725 break;
10726 }
10727 }
10728 else {
10729 /* For eeprom, set pagesize to maximum eeprom size */
10730 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10731
10732 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10733 tw32(NVRAM_CFG1, nvcfg1);
10734 }
10735}
10736
Michael Chand3c7b882006-03-23 01:28:25 -080010737static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10738{
Matt Carlson989a9d22007-05-05 11:51:05 -070010739 u32 nvcfg1, protect = 0;
Michael Chand3c7b882006-03-23 01:28:25 -080010740
10741 nvcfg1 = tr32(NVRAM_CFG1);
10742
10743 /* NVRAM protection for TPM */
Matt Carlson989a9d22007-05-05 11:51:05 -070010744 if (nvcfg1 & (1 << 27)) {
Michael Chand3c7b882006-03-23 01:28:25 -080010745 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
Matt Carlson989a9d22007-05-05 11:51:05 -070010746 protect = 1;
10747 }
Michael Chand3c7b882006-03-23 01:28:25 -080010748
Matt Carlson989a9d22007-05-05 11:51:05 -070010749 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10750 switch (nvcfg1) {
Michael Chand3c7b882006-03-23 01:28:25 -080010751 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10752 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10753 case FLASH_5755VENDOR_ATMEL_FLASH_3:
Matt Carlson70b65a22007-07-11 19:48:50 -070010754 case FLASH_5755VENDOR_ATMEL_FLASH_5:
Michael Chand3c7b882006-03-23 01:28:25 -080010755 tp->nvram_jedecnum = JEDEC_ATMEL;
10756 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10757 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10758 tp->nvram_pagesize = 264;
Matt Carlson70b65a22007-07-11 19:48:50 -070010759 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10760 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010761 tp->nvram_size = (protect ? 0x3e200 :
10762 TG3_NVRAM_SIZE_512KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010763 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010764 tp->nvram_size = (protect ? 0x1f200 :
10765 TG3_NVRAM_SIZE_256KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010766 else
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010767 tp->nvram_size = (protect ? 0x1f200 :
10768 TG3_NVRAM_SIZE_128KB);
Michael Chand3c7b882006-03-23 01:28:25 -080010769 break;
10770 case FLASH_5752VENDOR_ST_M45PE10:
10771 case FLASH_5752VENDOR_ST_M45PE20:
10772 case FLASH_5752VENDOR_ST_M45PE40:
10773 tp->nvram_jedecnum = JEDEC_ST;
10774 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10775 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10776 tp->nvram_pagesize = 256;
Matt Carlson989a9d22007-05-05 11:51:05 -070010777 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010778 tp->nvram_size = (protect ?
10779 TG3_NVRAM_SIZE_64KB :
10780 TG3_NVRAM_SIZE_128KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010781 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010782 tp->nvram_size = (protect ?
10783 TG3_NVRAM_SIZE_64KB :
10784 TG3_NVRAM_SIZE_256KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010785 else
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010786 tp->nvram_size = (protect ?
10787 TG3_NVRAM_SIZE_128KB :
10788 TG3_NVRAM_SIZE_512KB);
Michael Chand3c7b882006-03-23 01:28:25 -080010789 break;
10790 }
10791}
10792
Michael Chan1b277772006-03-20 22:27:48 -080010793static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10794{
10795 u32 nvcfg1;
10796
10797 nvcfg1 = tr32(NVRAM_CFG1);
10798
10799 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10800 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10801 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10802 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10803 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10804 tp->nvram_jedecnum = JEDEC_ATMEL;
10805 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10806 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10807
10808 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10809 tw32(NVRAM_CFG1, nvcfg1);
10810 break;
10811 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10812 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10813 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10814 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10815 tp->nvram_jedecnum = JEDEC_ATMEL;
10816 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10817 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10818 tp->nvram_pagesize = 264;
10819 break;
10820 case FLASH_5752VENDOR_ST_M45PE10:
10821 case FLASH_5752VENDOR_ST_M45PE20:
10822 case FLASH_5752VENDOR_ST_M45PE40:
10823 tp->nvram_jedecnum = JEDEC_ST;
10824 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10825 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10826 tp->nvram_pagesize = 256;
10827 break;
10828 }
10829}
10830
Matt Carlson6b91fa02007-10-10 18:01:09 -070010831static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10832{
10833 u32 nvcfg1, protect = 0;
10834
10835 nvcfg1 = tr32(NVRAM_CFG1);
10836
10837 /* NVRAM protection for TPM */
10838 if (nvcfg1 & (1 << 27)) {
10839 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10840 protect = 1;
10841 }
10842
10843 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10844 switch (nvcfg1) {
10845 case FLASH_5761VENDOR_ATMEL_ADB021D:
10846 case FLASH_5761VENDOR_ATMEL_ADB041D:
10847 case FLASH_5761VENDOR_ATMEL_ADB081D:
10848 case FLASH_5761VENDOR_ATMEL_ADB161D:
10849 case FLASH_5761VENDOR_ATMEL_MDB021D:
10850 case FLASH_5761VENDOR_ATMEL_MDB041D:
10851 case FLASH_5761VENDOR_ATMEL_MDB081D:
10852 case FLASH_5761VENDOR_ATMEL_MDB161D:
10853 tp->nvram_jedecnum = JEDEC_ATMEL;
10854 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10855 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10856 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10857 tp->nvram_pagesize = 256;
10858 break;
10859 case FLASH_5761VENDOR_ST_A_M45PE20:
10860 case FLASH_5761VENDOR_ST_A_M45PE40:
10861 case FLASH_5761VENDOR_ST_A_M45PE80:
10862 case FLASH_5761VENDOR_ST_A_M45PE16:
10863 case FLASH_5761VENDOR_ST_M_M45PE20:
10864 case FLASH_5761VENDOR_ST_M_M45PE40:
10865 case FLASH_5761VENDOR_ST_M_M45PE80:
10866 case FLASH_5761VENDOR_ST_M_M45PE16:
10867 tp->nvram_jedecnum = JEDEC_ST;
10868 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10869 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10870 tp->nvram_pagesize = 256;
10871 break;
10872 }
10873
10874 if (protect) {
10875 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10876 } else {
10877 switch (nvcfg1) {
10878 case FLASH_5761VENDOR_ATMEL_ADB161D:
10879 case FLASH_5761VENDOR_ATMEL_MDB161D:
10880 case FLASH_5761VENDOR_ST_A_M45PE16:
10881 case FLASH_5761VENDOR_ST_M_M45PE16:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010882 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010883 break;
10884 case FLASH_5761VENDOR_ATMEL_ADB081D:
10885 case FLASH_5761VENDOR_ATMEL_MDB081D:
10886 case FLASH_5761VENDOR_ST_A_M45PE80:
10887 case FLASH_5761VENDOR_ST_M_M45PE80:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010888 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010889 break;
10890 case FLASH_5761VENDOR_ATMEL_ADB041D:
10891 case FLASH_5761VENDOR_ATMEL_MDB041D:
10892 case FLASH_5761VENDOR_ST_A_M45PE40:
10893 case FLASH_5761VENDOR_ST_M_M45PE40:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010894 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010895 break;
10896 case FLASH_5761VENDOR_ATMEL_ADB021D:
10897 case FLASH_5761VENDOR_ATMEL_MDB021D:
10898 case FLASH_5761VENDOR_ST_A_M45PE20:
10899 case FLASH_5761VENDOR_ST_M_M45PE20:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010900 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010901 break;
10902 }
10903 }
10904}
10905
Michael Chanb5d37722006-09-27 16:06:21 -070010906static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10907{
10908 tp->nvram_jedecnum = JEDEC_ATMEL;
10909 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10910 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10911}
10912
Linus Torvalds1da177e2005-04-16 15:20:36 -070010913/* Chips other than 5700/5701 use the NVRAM for fetching info. */
10914static void __devinit tg3_nvram_init(struct tg3 *tp)
10915{
Linus Torvalds1da177e2005-04-16 15:20:36 -070010916 tw32_f(GRC_EEPROM_ADDR,
10917 (EEPROM_ADDR_FSM_RESET |
10918 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10919 EEPROM_ADDR_CLKPERD_SHIFT)));
10920
Michael Chan9d57f012006-12-07 00:23:25 -080010921 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010922
10923 /* Enable seeprom accesses. */
10924 tw32_f(GRC_LOCAL_CTRL,
10925 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10926 udelay(100);
10927
10928 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10929 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10930 tp->tg3_flags |= TG3_FLAG_NVRAM;
10931
Michael Chanec41c7d2006-01-17 02:40:55 -080010932 if (tg3_nvram_lock(tp)) {
10933 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10934 "tg3_nvram_init failed.\n", tp->dev->name);
10935 return;
10936 }
Michael Chane6af3012005-04-21 17:12:05 -070010937 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010938
Matt Carlson989a9d22007-05-05 11:51:05 -070010939 tp->nvram_size = 0;
10940
Michael Chan361b4ac2005-04-21 17:11:21 -070010941 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10942 tg3_get_5752_nvram_info(tp);
Michael Chand3c7b882006-03-23 01:28:25 -080010943 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10944 tg3_get_5755_nvram_info(tp);
Matt Carlsond30cdd22007-10-07 23:28:35 -070010945 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010946 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10947 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan1b277772006-03-20 22:27:48 -080010948 tg3_get_5787_nvram_info(tp);
Matt Carlson6b91fa02007-10-10 18:01:09 -070010949 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10950 tg3_get_5761_nvram_info(tp);
Michael Chanb5d37722006-09-27 16:06:21 -070010951 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10952 tg3_get_5906_nvram_info(tp);
Michael Chan361b4ac2005-04-21 17:11:21 -070010953 else
10954 tg3_get_nvram_info(tp);
10955
Matt Carlson989a9d22007-05-05 11:51:05 -070010956 if (tp->nvram_size == 0)
10957 tg3_get_nvram_size(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010958
Michael Chane6af3012005-04-21 17:12:05 -070010959 tg3_disable_nvram_access(tp);
Michael Chan381291b2005-12-13 21:08:21 -080010960 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010961
10962 } else {
10963 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10964
10965 tg3_get_eeprom_size(tp);
10966 }
10967}
10968
10969static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10970 u32 offset, u32 *val)
10971{
10972 u32 tmp;
10973 int i;
10974
10975 if (offset > EEPROM_ADDR_ADDR_MASK ||
10976 (offset % 4) != 0)
10977 return -EINVAL;
10978
10979 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10980 EEPROM_ADDR_DEVID_MASK |
10981 EEPROM_ADDR_READ);
10982 tw32(GRC_EEPROM_ADDR,
10983 tmp |
10984 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10985 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10986 EEPROM_ADDR_ADDR_MASK) |
10987 EEPROM_ADDR_READ | EEPROM_ADDR_START);
10988
Michael Chan9d57f012006-12-07 00:23:25 -080010989 for (i = 0; i < 1000; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010990 tmp = tr32(GRC_EEPROM_ADDR);
10991
10992 if (tmp & EEPROM_ADDR_COMPLETE)
10993 break;
Michael Chan9d57f012006-12-07 00:23:25 -080010994 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010995 }
10996 if (!(tmp & EEPROM_ADDR_COMPLETE))
10997 return -EBUSY;
10998
10999 *val = tr32(GRC_EEPROM_DATA);
11000 return 0;
11001}
11002
11003#define NVRAM_CMD_TIMEOUT 10000
11004
11005static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
11006{
11007 int i;
11008
11009 tw32(NVRAM_CMD, nvram_cmd);
11010 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
11011 udelay(10);
11012 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
11013 udelay(10);
11014 break;
11015 }
11016 }
11017 if (i == NVRAM_CMD_TIMEOUT) {
11018 return -EBUSY;
11019 }
11020 return 0;
11021}
11022
Michael Chan18201802006-03-20 22:29:15 -080011023static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
11024{
11025 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
11026 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
11027 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
Matt Carlson6b91fa02007-10-10 18:01:09 -070011028 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
Michael Chan18201802006-03-20 22:29:15 -080011029 (tp->nvram_jedecnum == JEDEC_ATMEL))
11030
11031 addr = ((addr / tp->nvram_pagesize) <<
11032 ATMEL_AT45DB0X1B_PAGE_POS) +
11033 (addr % tp->nvram_pagesize);
11034
11035 return addr;
11036}
11037
Michael Chanc4e65752006-03-20 22:29:32 -080011038static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
11039{
11040 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
11041 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
11042 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
Matt Carlson6b91fa02007-10-10 18:01:09 -070011043 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
Michael Chanc4e65752006-03-20 22:29:32 -080011044 (tp->nvram_jedecnum == JEDEC_ATMEL))
11045
11046 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
11047 tp->nvram_pagesize) +
11048 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
11049
11050 return addr;
11051}
11052
Linus Torvalds1da177e2005-04-16 15:20:36 -070011053static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
11054{
11055 int ret;
11056
Linus Torvalds1da177e2005-04-16 15:20:36 -070011057 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
11058 return tg3_nvram_read_using_eeprom(tp, offset, val);
11059
Michael Chan18201802006-03-20 22:29:15 -080011060 offset = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011061
11062 if (offset > NVRAM_ADDR_MSK)
11063 return -EINVAL;
11064
Michael Chanec41c7d2006-01-17 02:40:55 -080011065 ret = tg3_nvram_lock(tp);
11066 if (ret)
11067 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011068
Michael Chane6af3012005-04-21 17:12:05 -070011069 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011070
11071 tw32(NVRAM_ADDR, offset);
11072 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
11073 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
11074
11075 if (ret == 0)
11076 *val = swab32(tr32(NVRAM_RDDATA));
11077
Michael Chane6af3012005-04-21 17:12:05 -070011078 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011079
Michael Chan381291b2005-12-13 21:08:21 -080011080 tg3_nvram_unlock(tp);
11081
Linus Torvalds1da177e2005-04-16 15:20:36 -070011082 return ret;
11083}
11084
Al Virob9fc7dc2007-12-17 22:59:57 -080011085static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
11086{
11087 u32 v;
11088 int res = tg3_nvram_read(tp, offset, &v);
11089 if (!res)
11090 *val = cpu_to_le32(v);
11091 return res;
11092}
11093
Michael Chan18201802006-03-20 22:29:15 -080011094static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
11095{
11096 int err;
11097 u32 tmp;
11098
11099 err = tg3_nvram_read(tp, offset, &tmp);
11100 *val = swab32(tmp);
11101 return err;
11102}
11103
Linus Torvalds1da177e2005-04-16 15:20:36 -070011104static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11105 u32 offset, u32 len, u8 *buf)
11106{
11107 int i, j, rc = 0;
11108 u32 val;
11109
11110 for (i = 0; i < len; i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011111 u32 addr;
11112 __le32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011113
11114 addr = offset + i;
11115
11116 memcpy(&data, buf + i, 4);
11117
Al Virob9fc7dc2007-12-17 22:59:57 -080011118 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011119
11120 val = tr32(GRC_EEPROM_ADDR);
11121 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11122
11123 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11124 EEPROM_ADDR_READ);
11125 tw32(GRC_EEPROM_ADDR, val |
11126 (0 << EEPROM_ADDR_DEVID_SHIFT) |
11127 (addr & EEPROM_ADDR_ADDR_MASK) |
11128 EEPROM_ADDR_START |
11129 EEPROM_ADDR_WRITE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011130
Michael Chan9d57f012006-12-07 00:23:25 -080011131 for (j = 0; j < 1000; j++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011132 val = tr32(GRC_EEPROM_ADDR);
11133
11134 if (val & EEPROM_ADDR_COMPLETE)
11135 break;
Michael Chan9d57f012006-12-07 00:23:25 -080011136 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011137 }
11138 if (!(val & EEPROM_ADDR_COMPLETE)) {
11139 rc = -EBUSY;
11140 break;
11141 }
11142 }
11143
11144 return rc;
11145}
11146
11147/* offset and length are dword aligned */
11148static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11149 u8 *buf)
11150{
11151 int ret = 0;
11152 u32 pagesize = tp->nvram_pagesize;
11153 u32 pagemask = pagesize - 1;
11154 u32 nvram_cmd;
11155 u8 *tmp;
11156
11157 tmp = kmalloc(pagesize, GFP_KERNEL);
11158 if (tmp == NULL)
11159 return -ENOMEM;
11160
11161 while (len) {
11162 int j;
Michael Chane6af3012005-04-21 17:12:05 -070011163 u32 phy_addr, page_off, size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011164
11165 phy_addr = offset & ~pagemask;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011166
Linus Torvalds1da177e2005-04-16 15:20:36 -070011167 for (j = 0; j < pagesize; j += 4) {
Al Viro286e3102007-12-17 23:00:31 -080011168 if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
Al Virob9fc7dc2007-12-17 22:59:57 -080011169 (__le32 *) (tmp + j))))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011170 break;
11171 }
11172 if (ret)
11173 break;
11174
11175 page_off = offset & pagemask;
11176 size = pagesize;
11177 if (len < size)
11178 size = len;
11179
11180 len -= size;
11181
11182 memcpy(tmp + page_off, buf, size);
11183
11184 offset = offset + (pagesize - page_off);
11185
Michael Chane6af3012005-04-21 17:12:05 -070011186 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011187
11188 /*
11189 * Before we can erase the flash page, we need
11190 * to issue a special "write enable" command.
11191 */
11192 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11193
11194 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11195 break;
11196
11197 /* Erase the target page */
11198 tw32(NVRAM_ADDR, phy_addr);
11199
11200 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11201 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11202
11203 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11204 break;
11205
11206 /* Issue another write enable to start the write. */
11207 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11208
11209 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11210 break;
11211
11212 for (j = 0; j < pagesize; j += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011213 __be32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011214
Al Virob9fc7dc2007-12-17 22:59:57 -080011215 data = *((__be32 *) (tmp + j));
11216 /* swab32(le32_to_cpu(data)), actually */
11217 tw32(NVRAM_WRDATA, be32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011218
11219 tw32(NVRAM_ADDR, phy_addr + j);
11220
11221 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11222 NVRAM_CMD_WR;
11223
11224 if (j == 0)
11225 nvram_cmd |= NVRAM_CMD_FIRST;
11226 else if (j == (pagesize - 4))
11227 nvram_cmd |= NVRAM_CMD_LAST;
11228
11229 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11230 break;
11231 }
11232 if (ret)
11233 break;
11234 }
11235
11236 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11237 tg3_nvram_exec_cmd(tp, nvram_cmd);
11238
11239 kfree(tmp);
11240
11241 return ret;
11242}
11243
11244/* offset and length are dword aligned */
11245static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11246 u8 *buf)
11247{
11248 int i, ret = 0;
11249
11250 for (i = 0; i < len; i += 4, offset += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011251 u32 page_off, phy_addr, nvram_cmd;
11252 __be32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011253
11254 memcpy(&data, buf + i, 4);
Al Virob9fc7dc2007-12-17 22:59:57 -080011255 tw32(NVRAM_WRDATA, be32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011256
11257 page_off = offset % tp->nvram_pagesize;
11258
Michael Chan18201802006-03-20 22:29:15 -080011259 phy_addr = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011260
11261 tw32(NVRAM_ADDR, phy_addr);
11262
11263 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11264
11265 if ((page_off == 0) || (i == 0))
11266 nvram_cmd |= NVRAM_CMD_FIRST;
Michael Chanf6d9a252006-04-29 19:00:24 -070011267 if (page_off == (tp->nvram_pagesize - 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011268 nvram_cmd |= NVRAM_CMD_LAST;
11269
11270 if (i == (len - 4))
11271 nvram_cmd |= NVRAM_CMD_LAST;
11272
Michael Chan4c987482005-09-05 17:52:38 -070011273 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
Michael Chanaf36e6b2006-03-23 01:28:06 -080011274 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
Michael Chan1b277772006-03-20 22:27:48 -080011275 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
Matt Carlsond30cdd22007-10-07 23:28:35 -070011276 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
Matt Carlson9936bcf2007-10-10 18:03:07 -070011277 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
Matt Carlson57e69832008-05-25 23:48:31 -070011278 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
Michael Chan4c987482005-09-05 17:52:38 -070011279 (tp->nvram_jedecnum == JEDEC_ST) &&
11280 (nvram_cmd & NVRAM_CMD_FIRST)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011281
11282 if ((ret = tg3_nvram_exec_cmd(tp,
11283 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11284 NVRAM_CMD_DONE)))
11285
11286 break;
11287 }
11288 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11289 /* We always do complete word writes to eeprom. */
11290 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11291 }
11292
11293 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11294 break;
11295 }
11296 return ret;
11297}
11298
11299/* offset and length are dword aligned */
11300static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11301{
11302 int ret;
11303
Linus Torvalds1da177e2005-04-16 15:20:36 -070011304 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -070011305 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11306 ~GRC_LCLCTRL_GPIO_OUTPUT1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011307 udelay(40);
11308 }
11309
11310 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11311 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11312 }
11313 else {
11314 u32 grc_mode;
11315
Michael Chanec41c7d2006-01-17 02:40:55 -080011316 ret = tg3_nvram_lock(tp);
11317 if (ret)
11318 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011319
Michael Chane6af3012005-04-21 17:12:05 -070011320 tg3_enable_nvram_access(tp);
11321 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11322 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011323 tw32(NVRAM_WRITE1, 0x406);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011324
11325 grc_mode = tr32(GRC_MODE);
11326 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11327
11328 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11329 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11330
11331 ret = tg3_nvram_write_block_buffered(tp, offset, len,
11332 buf);
11333 }
11334 else {
11335 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11336 buf);
11337 }
11338
11339 grc_mode = tr32(GRC_MODE);
11340 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11341
Michael Chane6af3012005-04-21 17:12:05 -070011342 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011343 tg3_nvram_unlock(tp);
11344 }
11345
11346 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -070011347 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011348 udelay(40);
11349 }
11350
11351 return ret;
11352}
11353
11354struct subsys_tbl_ent {
11355 u16 subsys_vendor, subsys_devid;
11356 u32 phy_id;
11357};
11358
11359static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11360 /* Broadcom boards. */
11361 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11362 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11363 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11364 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
11365 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11366 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11367 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
11368 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11369 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11370 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11371 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11372
11373 /* 3com boards. */
11374 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11375 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11376 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
11377 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11378 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11379
11380 /* DELL boards. */
11381 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11382 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11383 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11384 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11385
11386 /* Compaq boards. */
11387 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11388 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11389 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
11390 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11391 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11392
11393 /* IBM boards. */
11394 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11395};
11396
11397static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11398{
11399 int i;
11400
11401 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11402 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11403 tp->pdev->subsystem_vendor) &&
11404 (subsys_id_to_phy_id[i].subsys_devid ==
11405 tp->pdev->subsystem_device))
11406 return &subsys_id_to_phy_id[i];
11407 }
11408 return NULL;
11409}
11410
Michael Chan7d0c41e2005-04-21 17:06:20 -070011411static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011412{
Linus Torvalds1da177e2005-04-16 15:20:36 -070011413 u32 val;
Michael Chancaf636c72006-03-22 01:05:31 -080011414 u16 pmcsr;
11415
11416 /* On some early chips the SRAM cannot be accessed in D3hot state,
11417 * so need make sure we're in D0.
11418 */
11419 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11420 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11421 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11422 msleep(1);
Michael Chan7d0c41e2005-04-21 17:06:20 -070011423
11424 /* Make sure register accesses (indirect or otherwise)
11425 * will function correctly.
11426 */
11427 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11428 tp->misc_host_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011429
David S. Millerf49639e2006-06-09 11:58:36 -070011430 /* The memory arbiter has to be enabled in order for SRAM accesses
11431 * to succeed. Normally on powerup the tg3 chip firmware will make
11432 * sure it is enabled, but other entities such as system netboot
11433 * code might disable it.
11434 */
11435 val = tr32(MEMARB_MODE);
11436 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11437
Linus Torvalds1da177e2005-04-16 15:20:36 -070011438 tp->phy_id = PHY_ID_INVALID;
Michael Chan7d0c41e2005-04-21 17:06:20 -070011439 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11440
Gary Zambranoa85feb82007-05-05 11:52:19 -070011441 /* Assume an onboard device and WOL capable by default. */
11442 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
David S. Miller72b845e2006-03-14 14:11:48 -080011443
Michael Chanb5d37722006-09-27 16:06:21 -070011444 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan9d26e212006-12-07 00:21:14 -080011445 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
Michael Chanb5d37722006-09-27 16:06:21 -070011446 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011447 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11448 }
Matt Carlson0527ba32007-10-10 18:03:30 -070011449 val = tr32(VCPU_CFGSHDW);
11450 if (val & VCPU_CFGSHDW_ASPM_DBNC)
Matt Carlson8ed5d972007-05-07 00:25:49 -070011451 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
Matt Carlson0527ba32007-10-10 18:03:30 -070011452 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070011453 (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11454 device_may_wakeup(&tp->pdev->dev))
Matt Carlson0527ba32007-10-10 18:03:30 -070011455 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
Matt Carlson05ac4cb2008-11-03 16:53:46 -080011456 goto done;
Michael Chanb5d37722006-09-27 16:06:21 -070011457 }
11458
Linus Torvalds1da177e2005-04-16 15:20:36 -070011459 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11460 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11461 u32 nic_cfg, led_cfg;
Matt Carlsona9daf362008-05-25 23:49:44 -070011462 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
Michael Chan7d0c41e2005-04-21 17:06:20 -070011463 int eeprom_phy_serdes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011464
11465 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11466 tp->nic_sram_data_cfg = nic_cfg;
11467
11468 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11469 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11470 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11471 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11472 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11473 (ver > 0) && (ver < 0x100))
11474 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11475
Matt Carlsona9daf362008-05-25 23:49:44 -070011476 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11477 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11478
Linus Torvalds1da177e2005-04-16 15:20:36 -070011479 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11480 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11481 eeprom_phy_serdes = 1;
11482
11483 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11484 if (nic_phy_id != 0) {
11485 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11486 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11487
11488 eeprom_phy_id = (id1 >> 16) << 10;
11489 eeprom_phy_id |= (id2 & 0xfc00) << 16;
11490 eeprom_phy_id |= (id2 & 0x03ff) << 0;
11491 } else
11492 eeprom_phy_id = 0;
11493
Michael Chan7d0c41e2005-04-21 17:06:20 -070011494 tp->phy_id = eeprom_phy_id;
Michael Chan747e8f82005-07-25 12:33:22 -070011495 if (eeprom_phy_serdes) {
Michael Chana4e2b342005-10-26 15:46:52 -070011496 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan747e8f82005-07-25 12:33:22 -070011497 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11498 else
11499 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11500 }
Michael Chan7d0c41e2005-04-21 17:06:20 -070011501
John W. Linvillecbf46852005-04-21 17:01:29 -070011502 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011503 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11504 SHASTA_EXT_LED_MODE_MASK);
John W. Linvillecbf46852005-04-21 17:01:29 -070011505 else
Linus Torvalds1da177e2005-04-16 15:20:36 -070011506 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11507
11508 switch (led_cfg) {
11509 default:
11510 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11511 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11512 break;
11513
11514 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11515 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11516 break;
11517
11518 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11519 tp->led_ctrl = LED_CTRL_MODE_MAC;
Michael Chan9ba27792005-06-06 15:16:20 -070011520
11521 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11522 * read on some older 5700/5701 bootcode.
11523 */
11524 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11525 ASIC_REV_5700 ||
11526 GET_ASIC_REV(tp->pci_chip_rev_id) ==
11527 ASIC_REV_5701)
11528 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11529
Linus Torvalds1da177e2005-04-16 15:20:36 -070011530 break;
11531
11532 case SHASTA_EXT_LED_SHARED:
11533 tp->led_ctrl = LED_CTRL_MODE_SHARED;
11534 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11535 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11536 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11537 LED_CTRL_MODE_PHY_2);
11538 break;
11539
11540 case SHASTA_EXT_LED_MAC:
11541 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11542 break;
11543
11544 case SHASTA_EXT_LED_COMBO:
11545 tp->led_ctrl = LED_CTRL_MODE_COMBO;
11546 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11547 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11548 LED_CTRL_MODE_PHY_2);
11549 break;
11550
Stephen Hemminger855e1112008-04-16 16:37:28 -070011551 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011552
11553 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11554 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11555 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11556 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11557
Matt Carlsonb2a5c192008-04-03 21:44:44 -070011558 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11559 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
Matt Carlson5f608912007-11-12 21:17:07 -080011560
Michael Chan9d26e212006-12-07 00:21:14 -080011561 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011562 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011563 if ((tp->pdev->subsystem_vendor ==
11564 PCI_VENDOR_ID_ARIMA) &&
11565 (tp->pdev->subsystem_device == 0x205a ||
11566 tp->pdev->subsystem_device == 0x2063))
11567 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11568 } else {
David S. Millerf49639e2006-06-09 11:58:36 -070011569 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011570 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11571 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011572
11573 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11574 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
John W. Linvillecbf46852005-04-21 17:01:29 -070011575 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011576 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11577 }
Matt Carlsonb2b98d42008-11-03 16:52:32 -080011578
11579 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11580 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
Matt Carlson0d3031d2007-10-10 18:02:43 -070011581 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
Matt Carlsonb2b98d42008-11-03 16:52:32 -080011582
Gary Zambranoa85feb82007-05-05 11:52:19 -070011583 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11584 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11585 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011586
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070011587 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
Matt Carlson05ac4cb2008-11-03 16:53:46 -080011588 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
Matt Carlson0527ba32007-10-10 18:03:30 -070011589 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11590
Linus Torvalds1da177e2005-04-16 15:20:36 -070011591 if (cfg2 & (1 << 17))
11592 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11593
11594 /* serdes signal pre-emphasis in register 0x590 set by */
11595 /* bootcode if bit 18 is set */
11596 if (cfg2 & (1 << 18))
11597 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
Matt Carlson8ed5d972007-05-07 00:25:49 -070011598
11599 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11600 u32 cfg3;
11601
11602 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11603 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11604 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11605 }
Matt Carlsona9daf362008-05-25 23:49:44 -070011606
11607 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11608 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11609 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11610 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11611 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11612 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011613 }
Matt Carlson05ac4cb2008-11-03 16:53:46 -080011614done:
11615 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
11616 device_set_wakeup_enable(&tp->pdev->dev,
11617 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
Michael Chan7d0c41e2005-04-21 17:06:20 -070011618}
11619
Matt Carlsonb2a5c192008-04-03 21:44:44 -070011620static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11621{
11622 int i;
11623 u32 val;
11624
11625 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11626 tw32(OTP_CTRL, cmd);
11627
11628 /* Wait for up to 1 ms for command to execute. */
11629 for (i = 0; i < 100; i++) {
11630 val = tr32(OTP_STATUS);
11631 if (val & OTP_STATUS_CMD_DONE)
11632 break;
11633 udelay(10);
11634 }
11635
11636 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11637}
11638
11639/* Read the gphy configuration from the OTP region of the chip. The gphy
11640 * configuration is a 32-bit value that straddles the alignment boundary.
11641 * We do two 32-bit reads and then shift and merge the results.
11642 */
11643static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11644{
11645 u32 bhalf_otp, thalf_otp;
11646
11647 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11648
11649 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11650 return 0;
11651
11652 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11653
11654 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11655 return 0;
11656
11657 thalf_otp = tr32(OTP_READ_DATA);
11658
11659 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11660
11661 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11662 return 0;
11663
11664 bhalf_otp = tr32(OTP_READ_DATA);
11665
11666 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11667}
11668
Michael Chan7d0c41e2005-04-21 17:06:20 -070011669static int __devinit tg3_phy_probe(struct tg3 *tp)
11670{
11671 u32 hw_phy_id_1, hw_phy_id_2;
11672 u32 hw_phy_id, hw_phy_id_masked;
11673 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011674
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011675 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11676 return tg3_phy_init(tp);
11677
Linus Torvalds1da177e2005-04-16 15:20:36 -070011678 /* Reading the PHY ID register can conflict with ASF
11679 * firwmare access to the PHY hardware.
11680 */
11681 err = 0;
Matt Carlson0d3031d2007-10-10 18:02:43 -070011682 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11683 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011684 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11685 } else {
11686 /* Now read the physical PHY_ID from the chip and verify
11687 * that it is sane. If it doesn't look good, we fall back
11688 * to either the hard-coded table based PHY_ID and failing
11689 * that the value found in the eeprom area.
11690 */
11691 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11692 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11693
11694 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
11695 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11696 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
11697
11698 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11699 }
11700
11701 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11702 tp->phy_id = hw_phy_id;
11703 if (hw_phy_id_masked == PHY_ID_BCM8002)
11704 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
Michael Chanda6b2d02005-08-19 12:54:29 -070011705 else
11706 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011707 } else {
Michael Chan7d0c41e2005-04-21 17:06:20 -070011708 if (tp->phy_id != PHY_ID_INVALID) {
11709 /* Do nothing, phy ID already set up in
11710 * tg3_get_eeprom_hw_cfg().
11711 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070011712 } else {
11713 struct subsys_tbl_ent *p;
11714
11715 /* No eeprom signature? Try the hardcoded
11716 * subsys device table.
11717 */
11718 p = lookup_by_subsys(tp);
11719 if (!p)
11720 return -ENODEV;
11721
11722 tp->phy_id = p->phy_id;
11723 if (!tp->phy_id ||
11724 tp->phy_id == PHY_ID_BCM8002)
11725 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11726 }
11727 }
11728
Michael Chan747e8f82005-07-25 12:33:22 -070011729 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
Matt Carlson0d3031d2007-10-10 18:02:43 -070011730 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070011731 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chan3600d912006-12-07 00:21:48 -080011732 u32 bmsr, adv_reg, tg3_ctrl, mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011733
11734 tg3_readphy(tp, MII_BMSR, &bmsr);
11735 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11736 (bmsr & BMSR_LSTATUS))
11737 goto skip_phy_reset;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011738
Linus Torvalds1da177e2005-04-16 15:20:36 -070011739 err = tg3_phy_reset(tp);
11740 if (err)
11741 return err;
11742
11743 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11744 ADVERTISE_100HALF | ADVERTISE_100FULL |
11745 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11746 tg3_ctrl = 0;
11747 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11748 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11749 MII_TG3_CTRL_ADV_1000_FULL);
11750 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11751 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11752 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11753 MII_TG3_CTRL_ENABLE_AS_MASTER);
11754 }
11755
Michael Chan3600d912006-12-07 00:21:48 -080011756 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11757 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11758 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11759 if (!tg3_copper_is_advertising_all(tp, mask)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011760 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11761
11762 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11763 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11764
11765 tg3_writephy(tp, MII_BMCR,
11766 BMCR_ANENABLE | BMCR_ANRESTART);
11767 }
11768 tg3_phy_set_wirespeed(tp);
11769
11770 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11771 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11772 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11773 }
11774
11775skip_phy_reset:
11776 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11777 err = tg3_init_5401phy_dsp(tp);
11778 if (err)
11779 return err;
11780 }
11781
11782 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11783 err = tg3_init_5401phy_dsp(tp);
11784 }
11785
Michael Chan747e8f82005-07-25 12:33:22 -070011786 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011787 tp->link_config.advertising =
11788 (ADVERTISED_1000baseT_Half |
11789 ADVERTISED_1000baseT_Full |
11790 ADVERTISED_Autoneg |
11791 ADVERTISED_FIBRE);
11792 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11793 tp->link_config.advertising &=
11794 ~(ADVERTISED_1000baseT_Half |
11795 ADVERTISED_1000baseT_Full);
11796
11797 return err;
11798}
11799
11800static void __devinit tg3_read_partno(struct tg3 *tp)
11801{
11802 unsigned char vpd_data[256];
Michael Chanaf2c6a42006-11-07 14:57:51 -080011803 unsigned int i;
Michael Chan1b277772006-03-20 22:27:48 -080011804 u32 magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011805
Michael Chan18201802006-03-20 22:29:15 -080011806 if (tg3_nvram_read_swab(tp, 0x0, &magic))
David S. Millerf49639e2006-06-09 11:58:36 -070011807 goto out_not_found;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011808
Michael Chan18201802006-03-20 22:29:15 -080011809 if (magic == TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -080011810 for (i = 0; i < 256; i += 4) {
11811 u32 tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011812
Michael Chan1b277772006-03-20 22:27:48 -080011813 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11814 goto out_not_found;
11815
11816 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
11817 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
11818 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11819 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11820 }
11821 } else {
11822 int vpd_cap;
11823
11824 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11825 for (i = 0; i < 256; i += 4) {
11826 u32 tmp, j = 0;
Al Virob9fc7dc2007-12-17 22:59:57 -080011827 __le32 v;
Michael Chan1b277772006-03-20 22:27:48 -080011828 u16 tmp16;
11829
11830 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11831 i);
11832 while (j++ < 100) {
11833 pci_read_config_word(tp->pdev, vpd_cap +
11834 PCI_VPD_ADDR, &tmp16);
11835 if (tmp16 & 0x8000)
11836 break;
11837 msleep(1);
11838 }
David S. Millerf49639e2006-06-09 11:58:36 -070011839 if (!(tmp16 & 0x8000))
11840 goto out_not_found;
11841
Michael Chan1b277772006-03-20 22:27:48 -080011842 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11843 &tmp);
Al Virob9fc7dc2007-12-17 22:59:57 -080011844 v = cpu_to_le32(tmp);
11845 memcpy(&vpd_data[i], &v, 4);
Michael Chan1b277772006-03-20 22:27:48 -080011846 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011847 }
11848
11849 /* Now parse and find the part number. */
Michael Chanaf2c6a42006-11-07 14:57:51 -080011850 for (i = 0; i < 254; ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011851 unsigned char val = vpd_data[i];
Michael Chanaf2c6a42006-11-07 14:57:51 -080011852 unsigned int block_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011853
11854 if (val == 0x82 || val == 0x91) {
11855 i = (i + 3 +
11856 (vpd_data[i + 1] +
11857 (vpd_data[i + 2] << 8)));
11858 continue;
11859 }
11860
11861 if (val != 0x90)
11862 goto out_not_found;
11863
11864 block_end = (i + 3 +
11865 (vpd_data[i + 1] +
11866 (vpd_data[i + 2] << 8)));
11867 i += 3;
Michael Chanaf2c6a42006-11-07 14:57:51 -080011868
11869 if (block_end > 256)
11870 goto out_not_found;
11871
11872 while (i < (block_end - 2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011873 if (vpd_data[i + 0] == 'P' &&
11874 vpd_data[i + 1] == 'N') {
11875 int partno_len = vpd_data[i + 2];
11876
Michael Chanaf2c6a42006-11-07 14:57:51 -080011877 i += 3;
11878 if (partno_len > 24 || (partno_len + i) > 256)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011879 goto out_not_found;
11880
11881 memcpy(tp->board_part_number,
Michael Chanaf2c6a42006-11-07 14:57:51 -080011882 &vpd_data[i], partno_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011883
11884 /* Success. */
11885 return;
11886 }
Michael Chanaf2c6a42006-11-07 14:57:51 -080011887 i += 3 + vpd_data[i + 2];
Linus Torvalds1da177e2005-04-16 15:20:36 -070011888 }
11889
11890 /* Part number not found. */
11891 goto out_not_found;
11892 }
11893
11894out_not_found:
Michael Chanb5d37722006-09-27 16:06:21 -070011895 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11896 strcpy(tp->board_part_number, "BCM95906");
11897 else
11898 strcpy(tp->board_part_number, "none");
Linus Torvalds1da177e2005-04-16 15:20:36 -070011899}
11900
Matt Carlson9c8a6202007-10-21 16:16:08 -070011901static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11902{
11903 u32 val;
11904
11905 if (tg3_nvram_read_swab(tp, offset, &val) ||
11906 (val & 0xfc000000) != 0x0c000000 ||
11907 tg3_nvram_read_swab(tp, offset + 4, &val) ||
11908 val != 0)
11909 return 0;
11910
11911 return 1;
11912}
11913
Michael Chanc4e65752006-03-20 22:29:32 -080011914static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11915{
11916 u32 val, offset, start;
Matt Carlson9c8a6202007-10-21 16:16:08 -070011917 u32 ver_offset;
11918 int i, bcnt;
Michael Chanc4e65752006-03-20 22:29:32 -080011919
11920 if (tg3_nvram_read_swab(tp, 0, &val))
11921 return;
11922
11923 if (val != TG3_EEPROM_MAGIC)
11924 return;
11925
11926 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11927 tg3_nvram_read_swab(tp, 0x4, &start))
11928 return;
11929
11930 offset = tg3_nvram_logical_addr(tp, offset);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011931
11932 if (!tg3_fw_img_is_valid(tp, offset) ||
11933 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
Michael Chanc4e65752006-03-20 22:29:32 -080011934 return;
11935
Matt Carlson9c8a6202007-10-21 16:16:08 -070011936 offset = offset + ver_offset - start;
11937 for (i = 0; i < 16; i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011938 __le32 v;
11939 if (tg3_nvram_read_le(tp, offset + i, &v))
Michael Chanc4e65752006-03-20 22:29:32 -080011940 return;
11941
Al Virob9fc7dc2007-12-17 22:59:57 -080011942 memcpy(tp->fw_ver + i, &v, 4);
Michael Chanc4e65752006-03-20 22:29:32 -080011943 }
Matt Carlson9c8a6202007-10-21 16:16:08 -070011944
11945 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
Matt Carlson84af67f2007-11-12 21:08:59 -080011946 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
Matt Carlson9c8a6202007-10-21 16:16:08 -070011947 return;
11948
11949 for (offset = TG3_NVM_DIR_START;
11950 offset < TG3_NVM_DIR_END;
11951 offset += TG3_NVM_DIRENT_SIZE) {
11952 if (tg3_nvram_read_swab(tp, offset, &val))
11953 return;
11954
11955 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11956 break;
11957 }
11958
11959 if (offset == TG3_NVM_DIR_END)
11960 return;
11961
11962 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11963 start = 0x08000000;
11964 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11965 return;
11966
11967 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11968 !tg3_fw_img_is_valid(tp, offset) ||
11969 tg3_nvram_read_swab(tp, offset + 8, &val))
11970 return;
11971
11972 offset += val - start;
11973
11974 bcnt = strlen(tp->fw_ver);
11975
11976 tp->fw_ver[bcnt++] = ',';
11977 tp->fw_ver[bcnt++] = ' ';
11978
11979 for (i = 0; i < 4; i++) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011980 __le32 v;
11981 if (tg3_nvram_read_le(tp, offset, &v))
Matt Carlson9c8a6202007-10-21 16:16:08 -070011982 return;
11983
Al Virob9fc7dc2007-12-17 22:59:57 -080011984 offset += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011985
Al Virob9fc7dc2007-12-17 22:59:57 -080011986 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11987 memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011988 break;
11989 }
11990
Al Virob9fc7dc2007-12-17 22:59:57 -080011991 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11992 bcnt += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011993 }
11994
11995 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
Michael Chanc4e65752006-03-20 22:29:32 -080011996}
11997
Michael Chan7544b092007-05-05 13:08:32 -070011998static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11999
Linus Torvalds1da177e2005-04-16 15:20:36 -070012000static int __devinit tg3_get_invariants(struct tg3 *tp)
12001{
12002 static struct pci_device_id write_reorder_chipsets[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012003 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12004 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
John W. Linvillec165b002006-07-08 13:28:53 -070012005 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12006 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
Michael Chan399de502005-10-03 14:02:39 -070012007 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
12008 PCI_DEVICE_ID_VIA_8385_0) },
Linus Torvalds1da177e2005-04-16 15:20:36 -070012009 { },
12010 };
12011 u32 misc_ctrl_reg;
12012 u32 cacheline_sz_reg;
12013 u32 pci_state_reg, grc_misc_cfg;
12014 u32 val;
12015 u16 pci_cmd;
Matt Carlson5e7dfd02008-11-21 17:18:16 -080012016 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012017
Linus Torvalds1da177e2005-04-16 15:20:36 -070012018 /* Force memory write invalidate off. If we leave it on,
12019 * then on 5700_BX chips we have to enable a workaround.
12020 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
12021 * to match the cacheline size. The Broadcom driver have this
12022 * workaround but turns MWI off all the times so never uses
12023 * it. This seems to suggest that the workaround is insufficient.
12024 */
12025 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12026 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
12027 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12028
12029 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
12030 * has the register indirect write enable bit set before
12031 * we try to access any of the MMIO registers. It is also
12032 * critical that the PCI-X hw workaround situation is decided
12033 * before that as well.
12034 */
12035 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12036 &misc_ctrl_reg);
12037
12038 tp->pci_chip_rev_id = (misc_ctrl_reg >>
12039 MISC_HOST_CTRL_CHIPREV_SHIFT);
Matt Carlson795d01c2007-10-07 23:28:17 -070012040 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
12041 u32 prod_id_asic_rev;
12042
12043 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
12044 &prod_id_asic_rev);
12045 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
12046 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012047
Michael Chanff645be2005-04-21 17:09:53 -070012048 /* Wrong chip ID in 5752 A0. This code can be removed later
12049 * as A0 is not in production.
12050 */
12051 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12052 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12053
Michael Chan68929142005-08-09 20:17:14 -070012054 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12055 * we need to disable memory and use config. cycles
12056 * only to access all registers. The 5702/03 chips
12057 * can mistakenly decode the special cycles from the
12058 * ICH chipsets as memory write cycles, causing corruption
12059 * of register and memory space. Only certain ICH bridges
12060 * will drive special cycles with non-zero data during the
12061 * address phase which can fall within the 5703's address
12062 * range. This is not an ICH bug as the PCI spec allows
12063 * non-zero address during special cycles. However, only
12064 * these ICH bridges are known to drive non-zero addresses
12065 * during special cycles.
12066 *
12067 * Since special cycles do not cross PCI bridges, we only
12068 * enable this workaround if the 5703 is on the secondary
12069 * bus of these ICH bridges.
12070 */
12071 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12072 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12073 static struct tg3_dev_id {
12074 u32 vendor;
12075 u32 device;
12076 u32 rev;
12077 } ich_chipsets[] = {
12078 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12079 PCI_ANY_ID },
12080 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12081 PCI_ANY_ID },
12082 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12083 0xa },
12084 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12085 PCI_ANY_ID },
12086 { },
12087 };
12088 struct tg3_dev_id *pci_id = &ich_chipsets[0];
12089 struct pci_dev *bridge = NULL;
12090
12091 while (pci_id->vendor != 0) {
12092 bridge = pci_get_device(pci_id->vendor, pci_id->device,
12093 bridge);
12094 if (!bridge) {
12095 pci_id++;
12096 continue;
12097 }
12098 if (pci_id->rev != PCI_ANY_ID) {
Auke Kok44c10132007-06-08 15:46:36 -070012099 if (bridge->revision > pci_id->rev)
Michael Chan68929142005-08-09 20:17:14 -070012100 continue;
12101 }
12102 if (bridge->subordinate &&
12103 (bridge->subordinate->number ==
12104 tp->pdev->bus->number)) {
12105
12106 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
12107 pci_dev_put(bridge);
12108 break;
12109 }
12110 }
12111 }
12112
Matt Carlson41588ba2008-04-19 18:12:33 -070012113 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
12114 static struct tg3_dev_id {
12115 u32 vendor;
12116 u32 device;
12117 } bridge_chipsets[] = {
12118 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12119 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12120 { },
12121 };
12122 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12123 struct pci_dev *bridge = NULL;
12124
12125 while (pci_id->vendor != 0) {
12126 bridge = pci_get_device(pci_id->vendor,
12127 pci_id->device,
12128 bridge);
12129 if (!bridge) {
12130 pci_id++;
12131 continue;
12132 }
12133 if (bridge->subordinate &&
12134 (bridge->subordinate->number <=
12135 tp->pdev->bus->number) &&
12136 (bridge->subordinate->subordinate >=
12137 tp->pdev->bus->number)) {
12138 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12139 pci_dev_put(bridge);
12140 break;
12141 }
12142 }
12143 }
12144
Michael Chan4a29cc22006-03-19 13:21:12 -080012145 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12146 * DMA addresses > 40-bit. This bridge may have other additional
12147 * 57xx devices behind it in some 4-port NIC designs for example.
12148 * Any tg3 device found behind the bridge will also need the 40-bit
12149 * DMA workaround.
12150 */
Michael Chana4e2b342005-10-26 15:46:52 -070012151 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12152 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12153 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
Michael Chan4a29cc22006-03-19 13:21:12 -080012154 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
Michael Chan4cf78e42005-07-25 12:29:19 -070012155 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
Michael Chana4e2b342005-10-26 15:46:52 -070012156 }
Michael Chan4a29cc22006-03-19 13:21:12 -080012157 else {
12158 struct pci_dev *bridge = NULL;
12159
12160 do {
12161 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12162 PCI_DEVICE_ID_SERVERWORKS_EPB,
12163 bridge);
12164 if (bridge && bridge->subordinate &&
12165 (bridge->subordinate->number <=
12166 tp->pdev->bus->number) &&
12167 (bridge->subordinate->subordinate >=
12168 tp->pdev->bus->number)) {
12169 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12170 pci_dev_put(bridge);
12171 break;
12172 }
12173 } while (bridge);
12174 }
Michael Chan4cf78e42005-07-25 12:29:19 -070012175
Linus Torvalds1da177e2005-04-16 15:20:36 -070012176 /* Initialize misc host control in PCI block. */
12177 tp->misc_host_ctrl |= (misc_ctrl_reg &
12178 MISC_HOST_CTRL_CHIPREV);
12179 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12180 tp->misc_host_ctrl);
12181
12182 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12183 &cacheline_sz_reg);
12184
12185 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
12186 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
12187 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
12188 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
12189
Michael Chan7544b092007-05-05 13:08:32 -070012190 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12191 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12192 tp->pdev_peer = tg3_find_peer(tp);
12193
John W. Linville2052da92005-04-21 16:56:08 -070012194 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
Michael Chan4cf78e42005-07-25 12:29:19 -070012195 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -080012196 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chand9ab5ad2006-03-20 22:27:35 -080012197 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012198 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012199 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012200 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012201 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
Michael Chana4e2b342005-10-26 15:46:52 -070012202 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
John W. Linville6708e5c2005-04-21 17:00:52 -070012203 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12204
John W. Linville1b440c562005-04-21 17:03:18 -070012205 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12206 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12207 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12208
Michael Chan5a6f3072006-03-20 22:28:05 -080012209 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Michael Chan7544b092007-05-05 13:08:32 -070012210 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12211 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12212 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12213 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12214 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12215 tp->pdev_peer == tp->pdev))
12216 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12217
Michael Chanaf36e6b2006-03-23 01:28:06 -080012218 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012219 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012220 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012221 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012222 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012223 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan5a6f3072006-03-20 22:28:05 -080012224 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
Michael Chanfcfa0a32006-03-20 22:28:41 -080012225 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
Michael Chan52c0fd82006-06-29 20:15:54 -070012226 } else {
Michael Chan7f62ad52007-02-20 23:25:40 -080012227 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
Michael Chan52c0fd82006-06-29 20:15:54 -070012228 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12229 ASIC_REV_5750 &&
12230 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
Michael Chan7f62ad52007-02-20 23:25:40 -080012231 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
Michael Chan52c0fd82006-06-29 20:15:54 -070012232 }
Michael Chan5a6f3072006-03-20 22:28:05 -080012233 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012234
Matt Carlsonf51f3562008-05-25 23:45:08 -070012235 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12236 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan0f893dc2005-07-25 12:30:38 -070012237 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12238
Matt Carlson52f44902008-11-21 17:17:04 -080012239 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12240 &pci_state_reg);
12241
Matt Carlson5e7dfd02008-11-21 17:18:16 -080012242 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12243 if (tp->pcie_cap != 0) {
12244 u16 lnkctl;
12245
Linus Torvalds1da177e2005-04-16 15:20:36 -070012246 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
Matt Carlson5f5c51e2007-11-12 21:19:37 -080012247
12248 pcie_set_readrq(tp->pdev, 4096);
12249
Matt Carlson5e7dfd02008-11-21 17:18:16 -080012250 pci_read_config_word(tp->pdev,
12251 tp->pcie_cap + PCI_EXP_LNKCTL,
12252 &lnkctl);
12253 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
12254 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Michael Chanc7835a72006-11-15 21:14:42 -080012255 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
Matt Carlson5e7dfd02008-11-21 17:18:16 -080012256 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12257 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12258 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
Michael Chanc7835a72006-11-15 21:14:42 -080012259 }
Matt Carlson52f44902008-11-21 17:17:04 -080012260 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Matt Carlsonfcb389d2008-11-03 16:55:44 -080012261 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
Matt Carlson52f44902008-11-21 17:17:04 -080012262 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12263 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12264 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12265 if (!tp->pcix_cap) {
12266 printk(KERN_ERR PFX "Cannot find PCI-X "
12267 "capability, aborting.\n");
12268 return -EIO;
12269 }
12270
12271 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
12272 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12273 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012274
Michael Chan399de502005-10-03 14:02:39 -070012275 /* If we have an AMD 762 or VIA K8T800 chipset, write
12276 * reordering to the mailbox registers done by the host
12277 * controller can cause major troubles. We read back from
12278 * every mailbox register write to force the writes to be
12279 * posted to the chip in order.
12280 */
12281 if (pci_dev_present(write_reorder_chipsets) &&
12282 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12283 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12284
Linus Torvalds1da177e2005-04-16 15:20:36 -070012285 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12286 tp->pci_lat_timer < 64) {
12287 tp->pci_lat_timer = 64;
12288
12289 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
12290 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
12291 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
12292 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
12293
12294 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12295 cacheline_sz_reg);
12296 }
12297
Matt Carlson52f44902008-11-21 17:17:04 -080012298 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12299 /* 5700 BX chips need to have their TX producer index
12300 * mailboxes written twice to workaround a bug.
12301 */
12302 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
Matt Carlson9974a352007-10-07 23:27:28 -070012303
Matt Carlson52f44902008-11-21 17:17:04 -080012304 /* If we are in PCI-X mode, enable register write workaround.
Linus Torvalds1da177e2005-04-16 15:20:36 -070012305 *
12306 * The workaround is to use indirect register accesses
12307 * for all chip writes not to mailbox registers.
12308 */
Matt Carlson52f44902008-11-21 17:17:04 -080012309 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012310 u32 pm_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012311
12312 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12313
12314 /* The chip can have it's power management PCI config
12315 * space registers clobbered due to this bug.
12316 * So explicitly force the chip into D0 here.
12317 */
Matt Carlson9974a352007-10-07 23:27:28 -070012318 pci_read_config_dword(tp->pdev,
12319 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012320 &pm_reg);
12321 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12322 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
Matt Carlson9974a352007-10-07 23:27:28 -070012323 pci_write_config_dword(tp->pdev,
12324 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012325 pm_reg);
12326
12327 /* Also, force SERR#/PERR# in PCI command. */
12328 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12329 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12330 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12331 }
12332 }
12333
Linus Torvalds1da177e2005-04-16 15:20:36 -070012334 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12335 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12336 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12337 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12338
12339 /* Chip-specific fixup from Broadcom driver */
12340 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12341 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12342 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12343 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12344 }
12345
Michael Chan1ee582d2005-08-09 20:16:46 -070012346 /* Default fast path register access methods */
Michael Chan20094932005-08-09 20:16:32 -070012347 tp->read32 = tg3_read32;
Michael Chan1ee582d2005-08-09 20:16:46 -070012348 tp->write32 = tg3_write32;
Michael Chan09ee9292005-08-09 20:17:00 -070012349 tp->read32_mbox = tg3_read32;
Michael Chan20094932005-08-09 20:16:32 -070012350 tp->write32_mbox = tg3_write32;
Michael Chan1ee582d2005-08-09 20:16:46 -070012351 tp->write32_tx_mbox = tg3_write32;
12352 tp->write32_rx_mbox = tg3_write32;
12353
12354 /* Various workaround register access methods */
12355 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12356 tp->write32 = tg3_write_indirect_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070012357 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12358 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12359 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12360 /*
12361 * Back to back register writes can cause problems on these
12362 * chips, the workaround is to read back all reg writes
12363 * except those to mailbox regs.
12364 *
12365 * See tg3_write_indirect_reg32().
12366 */
Michael Chan1ee582d2005-08-09 20:16:46 -070012367 tp->write32 = tg3_write_flush_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070012368 }
12369
Michael Chan1ee582d2005-08-09 20:16:46 -070012370
12371 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12372 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12373 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12374 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12375 tp->write32_rx_mbox = tg3_write_flush_reg32;
12376 }
Michael Chan20094932005-08-09 20:16:32 -070012377
Michael Chan68929142005-08-09 20:17:14 -070012378 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12379 tp->read32 = tg3_read_indirect_reg32;
12380 tp->write32 = tg3_write_indirect_reg32;
12381 tp->read32_mbox = tg3_read_indirect_mbox;
12382 tp->write32_mbox = tg3_write_indirect_mbox;
12383 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12384 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12385
12386 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070012387 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070012388
12389 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12390 pci_cmd &= ~PCI_COMMAND_MEMORY;
12391 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12392 }
Michael Chanb5d37722006-09-27 16:06:21 -070012393 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12394 tp->read32_mbox = tg3_read32_mbox_5906;
12395 tp->write32_mbox = tg3_write32_mbox_5906;
12396 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12397 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12398 }
Michael Chan68929142005-08-09 20:17:14 -070012399
Michael Chanbbadf502006-04-06 21:46:34 -070012400 if (tp->write32 == tg3_write_indirect_reg32 ||
12401 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12402 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
David S. Millerf49639e2006-06-09 11:58:36 -070012403 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
Michael Chanbbadf502006-04-06 21:46:34 -070012404 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12405
Michael Chan7d0c41e2005-04-21 17:06:20 -070012406 /* Get eeprom hw config before calling tg3_set_power_state().
Michael Chan9d26e212006-12-07 00:21:14 -080012407 * In particular, the TG3_FLG2_IS_NIC flag must be
Michael Chan7d0c41e2005-04-21 17:06:20 -070012408 * determined before calling tg3_set_power_state() so that
12409 * we know whether or not to switch out of Vaux power.
12410 * When the flag is set, it means that GPIO1 is used for eeprom
12411 * write protect and also implies that it is a LOM where GPIOs
12412 * are not used to switch power.
Jeff Garzik6aa20a22006-09-13 13:24:59 -040012413 */
Michael Chan7d0c41e2005-04-21 17:06:20 -070012414 tg3_get_eeprom_hw_cfg(tp);
12415
Matt Carlson0d3031d2007-10-10 18:02:43 -070012416 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12417 /* Allow reads and writes to the
12418 * APE register and memory space.
12419 */
12420 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12421 PCISTATE_ALLOW_APE_SHMEM_WR;
12422 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12423 pci_state_reg);
12424 }
12425
Matt Carlson9936bcf2007-10-10 18:03:07 -070012426 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012427 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlsonbcb37f62008-11-03 16:52:09 -080012428 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsond30cdd22007-10-07 23:28:35 -070012429 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12430
Michael Chan314fba32005-04-21 17:07:04 -070012431 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12432 * GPIO1 driven high will bring 5700's external PHY out of reset.
12433 * It is also used as eeprom write protect on LOMs.
12434 */
12435 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12436 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12437 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12438 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12439 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan3e7d83b2005-04-21 17:10:36 -070012440 /* Unused GPIO3 must be driven as output on 5752 because there
12441 * are no pull-up resistors on unused GPIO pins.
12442 */
12443 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12444 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chan314fba32005-04-21 17:07:04 -070012445
Michael Chanaf36e6b2006-03-23 01:28:06 -080012446 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12447 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12448
Matt Carlson5f0c4a32008-06-09 15:41:12 -070012449 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12450 /* Turn off the debug UART. */
12451 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12452 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12453 /* Keep VMain power. */
12454 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12455 GRC_LCLCTRL_GPIO_OUTPUT0;
12456 }
12457
Linus Torvalds1da177e2005-04-16 15:20:36 -070012458 /* Force the chip into D0. */
Michael Chanbc1c7562006-03-20 17:48:03 -080012459 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012460 if (err) {
12461 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12462 pci_name(tp->pdev));
12463 return err;
12464 }
12465
12466 /* 5700 B0 chips do not support checksumming correctly due
12467 * to hardware bugs.
12468 */
12469 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12470 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12471
Linus Torvalds1da177e2005-04-16 15:20:36 -070012472 /* Derive initial jumbo mode from MTU assigned in
12473 * ether_setup() via the alloc_etherdev() call
12474 */
Michael Chan0f893dc2005-07-25 12:30:38 -070012475 if (tp->dev->mtu > ETH_DATA_LEN &&
Michael Chana4e2b342005-10-26 15:46:52 -070012476 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan0f893dc2005-07-25 12:30:38 -070012477 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012478
12479 /* Determine WakeOnLan speed to use. */
12480 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12481 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12482 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12483 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12484 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12485 } else {
12486 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12487 }
12488
12489 /* A few boards don't want Ethernet@WireSpeed phy feature */
12490 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12491 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12492 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
Michael Chan747e8f82005-07-25 12:33:22 -070012493 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
Michael Chanb5d37722006-09-27 16:06:21 -070012494 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
Michael Chan747e8f82005-07-25 12:33:22 -070012495 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
Linus Torvalds1da177e2005-04-16 15:20:36 -070012496 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12497
12498 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12499 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12500 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12501 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12502 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12503
Michael Chanc424cb22006-04-29 18:56:34 -070012504 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12505 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012506 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012507 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12508 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
Michael Chand4011ad2007-02-13 12:17:25 -080012509 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12510 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12511 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
Michael Chanc1d2a192007-01-08 19:57:20 -080012512 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12513 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
Matt Carlson57e69832008-05-25 23:48:31 -070012514 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12515 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
Michael Chanc424cb22006-04-29 18:56:34 -070012516 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12517 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012518
Matt Carlsonb2a5c192008-04-03 21:44:44 -070012519 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12520 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12521 tp->phy_otp = tg3_read_otp_phycfg(tp);
12522 if (tp->phy_otp == 0)
12523 tp->phy_otp = TG3_OTP_DEFAULT;
12524 }
12525
Matt Carlsonf51f3562008-05-25 23:45:08 -070012526 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
Matt Carlson8ef21422008-05-02 16:47:53 -070012527 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12528 else
12529 tp->mi_mode = MAC_MI_MODE_BASE;
12530
Linus Torvalds1da177e2005-04-16 15:20:36 -070012531 tp->coalesce_mode = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012532 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12533 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12534 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12535
Matt Carlson57e69832008-05-25 23:48:31 -070012536 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12537 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12538
Matt Carlson158d7ab2008-05-29 01:37:54 -070012539 err = tg3_mdio_init(tp);
12540 if (err)
12541 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012542
12543 /* Initialize data/descriptor byte/word swapping. */
12544 val = tr32(GRC_MODE);
12545 val &= GRC_MODE_HOST_STACKUP;
12546 tw32(GRC_MODE, val | tp->grc_mode);
12547
12548 tg3_switch_clocks(tp);
12549
12550 /* Clear this out for sanity. */
12551 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12552
12553 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12554 &pci_state_reg);
12555 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12556 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12557 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12558
12559 if (chiprevid == CHIPREV_ID_5701_A0 ||
12560 chiprevid == CHIPREV_ID_5701_B0 ||
12561 chiprevid == CHIPREV_ID_5701_B2 ||
12562 chiprevid == CHIPREV_ID_5701_B5) {
12563 void __iomem *sram_base;
12564
12565 /* Write some dummy words into the SRAM status block
12566 * area, see if it reads back correctly. If the return
12567 * value is bad, force enable the PCIX workaround.
12568 */
12569 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12570
12571 writel(0x00000000, sram_base);
12572 writel(0x00000000, sram_base + 4);
12573 writel(0xffffffff, sram_base + 4);
12574 if (readl(sram_base) != 0x00000000)
12575 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12576 }
12577 }
12578
12579 udelay(50);
12580 tg3_nvram_init(tp);
12581
12582 grc_misc_cfg = tr32(GRC_MISC_CFG);
12583 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12584
Linus Torvalds1da177e2005-04-16 15:20:36 -070012585 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12586 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12587 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12588 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12589
David S. Millerfac9b832005-05-18 22:46:34 -070012590 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12591 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12592 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12593 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12594 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12595 HOSTCC_MODE_CLRTICK_TXBD);
12596
12597 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12598 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12599 tp->misc_host_ctrl);
12600 }
12601
Matt Carlson3bda1252008-08-15 14:08:22 -070012602 /* Preserve the APE MAC_MODE bits */
12603 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12604 tp->mac_mode = tr32(MAC_MODE) |
12605 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12606 else
12607 tp->mac_mode = TG3_DEF_MAC_MODE;
12608
Linus Torvalds1da177e2005-04-16 15:20:36 -070012609 /* these are limited to 10/100 only */
12610 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12611 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12612 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12613 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12614 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12615 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12616 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12617 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12618 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
Michael Chan676917d2006-12-07 00:20:22 -080012619 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12620 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
Michael Chanb5d37722006-09-27 16:06:21 -070012621 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012622 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12623
12624 err = tg3_phy_probe(tp);
12625 if (err) {
12626 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12627 pci_name(tp->pdev), err);
12628 /* ... but do not return immediately ... */
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070012629 tg3_mdio_fini(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012630 }
12631
12632 tg3_read_partno(tp);
Michael Chanc4e65752006-03-20 22:29:32 -080012633 tg3_read_fw_ver(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012634
12635 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12636 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12637 } else {
12638 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12639 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12640 else
12641 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12642 }
12643
12644 /* 5700 {AX,BX} chips have a broken status block link
12645 * change bit implementation, so we must use the
12646 * status register in those cases.
12647 */
12648 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12649 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12650 else
12651 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12652
12653 /* The led_ctrl is set during tg3_phy_probe, here we might
12654 * have to force the link status polling mechanism based
12655 * upon subsystem IDs.
12656 */
12657 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
Michael Chan007a880d2007-05-31 14:49:51 -070012658 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070012659 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12660 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12661 TG3_FLAG_USE_LINKCHG_REG);
12662 }
12663
12664 /* For all SERDES we poll the MAC status register. */
12665 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12666 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12667 else
12668 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12669
Matt Carlsonad829262008-11-21 17:16:16 -080012670 tp->rx_offset = NET_IP_ALIGN;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012671 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12672 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12673 tp->rx_offset = 0;
12674
Michael Chanf92905d2006-06-29 20:14:29 -070012675 tp->rx_std_max_post = TG3_RX_RING_SIZE;
12676
12677 /* Increment the rx prod index on the rx std ring by at most
12678 * 8 for these chips to workaround hw errata.
12679 */
12680 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12681 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12682 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12683 tp->rx_std_max_post = 8;
12684
Matt Carlson8ed5d972007-05-07 00:25:49 -070012685 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12686 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12687 PCIE_PWR_MGMT_L1_THRESH_MSK;
12688
Linus Torvalds1da177e2005-04-16 15:20:36 -070012689 return err;
12690}
12691
David S. Miller49b6e95f2007-03-29 01:38:42 -070012692#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012693static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12694{
12695 struct net_device *dev = tp->dev;
12696 struct pci_dev *pdev = tp->pdev;
David S. Miller49b6e95f2007-03-29 01:38:42 -070012697 struct device_node *dp = pci_device_to_OF_node(pdev);
David S. Miller374d4ca2007-03-29 01:57:57 -070012698 const unsigned char *addr;
David S. Miller49b6e95f2007-03-29 01:38:42 -070012699 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012700
David S. Miller49b6e95f2007-03-29 01:38:42 -070012701 addr = of_get_property(dp, "local-mac-address", &len);
12702 if (addr && len == 6) {
12703 memcpy(dev->dev_addr, addr, 6);
12704 memcpy(dev->perm_addr, dev->dev_addr, 6);
12705 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012706 }
12707 return -ENODEV;
12708}
12709
12710static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12711{
12712 struct net_device *dev = tp->dev;
12713
12714 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
John W. Linville2ff43692005-09-12 14:44:20 -070012715 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012716 return 0;
12717}
12718#endif
12719
12720static int __devinit tg3_get_device_address(struct tg3 *tp)
12721{
12722 struct net_device *dev = tp->dev;
12723 u32 hi, lo, mac_offset;
Michael Chan008652b2006-03-27 23:14:53 -080012724 int addr_ok = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012725
David S. Miller49b6e95f2007-03-29 01:38:42 -070012726#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012727 if (!tg3_get_macaddr_sparc(tp))
12728 return 0;
12729#endif
12730
12731 mac_offset = 0x7c;
David S. Millerf49639e2006-06-09 11:58:36 -070012732 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
Michael Chana4e2b342005-10-26 15:46:52 -070012733 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012734 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12735 mac_offset = 0xcc;
12736 if (tg3_nvram_lock(tp))
12737 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12738 else
12739 tg3_nvram_unlock(tp);
12740 }
Michael Chanb5d37722006-09-27 16:06:21 -070012741 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12742 mac_offset = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012743
12744 /* First try to get it from MAC address mailbox. */
12745 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12746 if ((hi >> 16) == 0x484b) {
12747 dev->dev_addr[0] = (hi >> 8) & 0xff;
12748 dev->dev_addr[1] = (hi >> 0) & 0xff;
12749
12750 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12751 dev->dev_addr[2] = (lo >> 24) & 0xff;
12752 dev->dev_addr[3] = (lo >> 16) & 0xff;
12753 dev->dev_addr[4] = (lo >> 8) & 0xff;
12754 dev->dev_addr[5] = (lo >> 0) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012755
Michael Chan008652b2006-03-27 23:14:53 -080012756 /* Some old bootcode may report a 0 MAC address in SRAM */
12757 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12758 }
12759 if (!addr_ok) {
12760 /* Next, try NVRAM. */
David S. Millerf49639e2006-06-09 11:58:36 -070012761 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
Michael Chan008652b2006-03-27 23:14:53 -080012762 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12763 dev->dev_addr[0] = ((hi >> 16) & 0xff);
12764 dev->dev_addr[1] = ((hi >> 24) & 0xff);
12765 dev->dev_addr[2] = ((lo >> 0) & 0xff);
12766 dev->dev_addr[3] = ((lo >> 8) & 0xff);
12767 dev->dev_addr[4] = ((lo >> 16) & 0xff);
12768 dev->dev_addr[5] = ((lo >> 24) & 0xff);
12769 }
12770 /* Finally just fetch it out of the MAC control regs. */
12771 else {
12772 hi = tr32(MAC_ADDR_0_HIGH);
12773 lo = tr32(MAC_ADDR_0_LOW);
12774
12775 dev->dev_addr[5] = lo & 0xff;
12776 dev->dev_addr[4] = (lo >> 8) & 0xff;
12777 dev->dev_addr[3] = (lo >> 16) & 0xff;
12778 dev->dev_addr[2] = (lo >> 24) & 0xff;
12779 dev->dev_addr[1] = hi & 0xff;
12780 dev->dev_addr[0] = (hi >> 8) & 0xff;
12781 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012782 }
12783
12784 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
David S. Miller7582a332008-03-20 15:53:15 -070012785#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012786 if (!tg3_get_default_macaddr_sparc(tp))
12787 return 0;
12788#endif
12789 return -EINVAL;
12790 }
John W. Linville2ff43692005-09-12 14:44:20 -070012791 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012792 return 0;
12793}
12794
David S. Miller59e6b432005-05-18 22:50:10 -070012795#define BOUNDARY_SINGLE_CACHELINE 1
12796#define BOUNDARY_MULTI_CACHELINE 2
12797
12798static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12799{
12800 int cacheline_size;
12801 u8 byte;
12802 int goal;
12803
12804 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12805 if (byte == 0)
12806 cacheline_size = 1024;
12807 else
12808 cacheline_size = (int) byte * 4;
12809
12810 /* On 5703 and later chips, the boundary bits have no
12811 * effect.
12812 */
12813 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12814 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12815 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12816 goto out;
12817
12818#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12819 goal = BOUNDARY_MULTI_CACHELINE;
12820#else
12821#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12822 goal = BOUNDARY_SINGLE_CACHELINE;
12823#else
12824 goal = 0;
12825#endif
12826#endif
12827
12828 if (!goal)
12829 goto out;
12830
12831 /* PCI controllers on most RISC systems tend to disconnect
12832 * when a device tries to burst across a cache-line boundary.
12833 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12834 *
12835 * Unfortunately, for PCI-E there are only limited
12836 * write-side controls for this, and thus for reads
12837 * we will still get the disconnects. We'll also waste
12838 * these PCI cycles for both read and write for chips
12839 * other than 5700 and 5701 which do not implement the
12840 * boundary bits.
12841 */
12842 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12843 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12844 switch (cacheline_size) {
12845 case 16:
12846 case 32:
12847 case 64:
12848 case 128:
12849 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12850 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12851 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12852 } else {
12853 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12854 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12855 }
12856 break;
12857
12858 case 256:
12859 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12860 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12861 break;
12862
12863 default:
12864 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12865 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12866 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012867 }
David S. Miller59e6b432005-05-18 22:50:10 -070012868 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12869 switch (cacheline_size) {
12870 case 16:
12871 case 32:
12872 case 64:
12873 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12874 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12875 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12876 break;
12877 }
12878 /* fallthrough */
12879 case 128:
12880 default:
12881 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12882 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12883 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012884 }
David S. Miller59e6b432005-05-18 22:50:10 -070012885 } else {
12886 switch (cacheline_size) {
12887 case 16:
12888 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12889 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12890 DMA_RWCTRL_WRITE_BNDRY_16);
12891 break;
12892 }
12893 /* fallthrough */
12894 case 32:
12895 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12896 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12897 DMA_RWCTRL_WRITE_BNDRY_32);
12898 break;
12899 }
12900 /* fallthrough */
12901 case 64:
12902 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12903 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12904 DMA_RWCTRL_WRITE_BNDRY_64);
12905 break;
12906 }
12907 /* fallthrough */
12908 case 128:
12909 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12910 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12911 DMA_RWCTRL_WRITE_BNDRY_128);
12912 break;
12913 }
12914 /* fallthrough */
12915 case 256:
12916 val |= (DMA_RWCTRL_READ_BNDRY_256 |
12917 DMA_RWCTRL_WRITE_BNDRY_256);
12918 break;
12919 case 512:
12920 val |= (DMA_RWCTRL_READ_BNDRY_512 |
12921 DMA_RWCTRL_WRITE_BNDRY_512);
12922 break;
12923 case 1024:
12924 default:
12925 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12926 DMA_RWCTRL_WRITE_BNDRY_1024);
12927 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012928 }
David S. Miller59e6b432005-05-18 22:50:10 -070012929 }
12930
12931out:
12932 return val;
12933}
12934
Linus Torvalds1da177e2005-04-16 15:20:36 -070012935static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12936{
12937 struct tg3_internal_buffer_desc test_desc;
12938 u32 sram_dma_descs;
12939 int i, ret;
12940
12941 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12942
12943 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12944 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12945 tw32(RDMAC_STATUS, 0);
12946 tw32(WDMAC_STATUS, 0);
12947
12948 tw32(BUFMGR_MODE, 0);
12949 tw32(FTQ_RESET, 0);
12950
12951 test_desc.addr_hi = ((u64) buf_dma) >> 32;
12952 test_desc.addr_lo = buf_dma & 0xffffffff;
12953 test_desc.nic_mbuf = 0x00002100;
12954 test_desc.len = size;
12955
12956 /*
12957 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12958 * the *second* time the tg3 driver was getting loaded after an
12959 * initial scan.
12960 *
12961 * Broadcom tells me:
12962 * ...the DMA engine is connected to the GRC block and a DMA
12963 * reset may affect the GRC block in some unpredictable way...
12964 * The behavior of resets to individual blocks has not been tested.
12965 *
12966 * Broadcom noted the GRC reset will also reset all sub-components.
12967 */
12968 if (to_device) {
12969 test_desc.cqid_sqid = (13 << 8) | 2;
12970
12971 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12972 udelay(40);
12973 } else {
12974 test_desc.cqid_sqid = (16 << 8) | 7;
12975
12976 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12977 udelay(40);
12978 }
12979 test_desc.flags = 0x00000005;
12980
12981 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12982 u32 val;
12983
12984 val = *(((u32 *)&test_desc) + i);
12985 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12986 sram_dma_descs + (i * sizeof(u32)));
12987 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12988 }
12989 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12990
12991 if (to_device) {
12992 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12993 } else {
12994 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12995 }
12996
12997 ret = -ENODEV;
12998 for (i = 0; i < 40; i++) {
12999 u32 val;
13000
13001 if (to_device)
13002 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
13003 else
13004 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
13005 if ((val & 0xffff) == sram_dma_descs) {
13006 ret = 0;
13007 break;
13008 }
13009
13010 udelay(100);
13011 }
13012
13013 return ret;
13014}
13015
David S. Millerded73402005-05-23 13:59:47 -070013016#define TEST_BUFFER_SIZE 0x2000
Linus Torvalds1da177e2005-04-16 15:20:36 -070013017
13018static int __devinit tg3_test_dma(struct tg3 *tp)
13019{
13020 dma_addr_t buf_dma;
David S. Miller59e6b432005-05-18 22:50:10 -070013021 u32 *buf, saved_dma_rwctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013022 int ret;
13023
13024 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
13025 if (!buf) {
13026 ret = -ENOMEM;
13027 goto out_nofree;
13028 }
13029
13030 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
13031 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
13032
David S. Miller59e6b432005-05-18 22:50:10 -070013033 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013034
13035 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13036 /* DMA read watermark not used on PCIE */
13037 tp->dma_rwctrl |= 0x00180000;
13038 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
Michael Chan85e94ce2005-04-21 17:05:28 -070013039 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13040 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013041 tp->dma_rwctrl |= 0x003f0000;
13042 else
13043 tp->dma_rwctrl |= 0x003f000f;
13044 } else {
13045 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13046 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
13047 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
Michael Chan49afdeb2007-02-13 12:17:03 -080013048 u32 read_water = 0x7;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013049
Michael Chan4a29cc22006-03-19 13:21:12 -080013050 /* If the 5704 is behind the EPB bridge, we can
13051 * do the less restrictive ONE_DMA workaround for
13052 * better performance.
13053 */
13054 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
13055 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13056 tp->dma_rwctrl |= 0x8000;
13057 else if (ccval == 0x6 || ccval == 0x7)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013058 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
13059
Michael Chan49afdeb2007-02-13 12:17:03 -080013060 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
13061 read_water = 4;
David S. Miller59e6b432005-05-18 22:50:10 -070013062 /* Set bit 23 to enable PCIX hw bug fix */
Michael Chan49afdeb2007-02-13 12:17:03 -080013063 tp->dma_rwctrl |=
13064 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
13065 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
13066 (1 << 23);
Michael Chan4cf78e42005-07-25 12:29:19 -070013067 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
13068 /* 5780 always in PCIX mode */
13069 tp->dma_rwctrl |= 0x00144000;
Michael Chana4e2b342005-10-26 15:46:52 -070013070 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13071 /* 5714 always in PCIX mode */
13072 tp->dma_rwctrl |= 0x00148000;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013073 } else {
13074 tp->dma_rwctrl |= 0x001b000f;
13075 }
13076 }
13077
13078 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13079 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13080 tp->dma_rwctrl &= 0xfffffff0;
13081
13082 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13083 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13084 /* Remove this if it causes problems for some boards. */
13085 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
13086
13087 /* On 5700/5701 chips, we need to set this bit.
13088 * Otherwise the chip will issue cacheline transactions
13089 * to streamable DMA memory with not all the byte
13090 * enables turned on. This is an error on several
13091 * RISC PCI controllers, in particular sparc64.
13092 *
13093 * On 5703/5704 chips, this bit has been reassigned
13094 * a different meaning. In particular, it is used
13095 * on those chips to enable a PCI-X workaround.
13096 */
13097 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
13098 }
13099
13100 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13101
13102#if 0
13103 /* Unneeded, already done by tg3_get_invariants. */
13104 tg3_switch_clocks(tp);
13105#endif
13106
13107 ret = 0;
13108 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13109 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13110 goto out;
13111
David S. Miller59e6b432005-05-18 22:50:10 -070013112 /* It is best to perform DMA test with maximum write burst size
13113 * to expose the 5700/5701 write DMA bug.
13114 */
13115 saved_dma_rwctrl = tp->dma_rwctrl;
13116 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13117 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13118
Linus Torvalds1da177e2005-04-16 15:20:36 -070013119 while (1) {
13120 u32 *p = buf, i;
13121
13122 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
13123 p[i] = i;
13124
13125 /* Send the buffer to the chip. */
13126 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13127 if (ret) {
13128 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
13129 break;
13130 }
13131
13132#if 0
13133 /* validate data reached card RAM correctly. */
13134 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13135 u32 val;
13136 tg3_read_mem(tp, 0x2100 + (i*4), &val);
13137 if (le32_to_cpu(val) != p[i]) {
13138 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
13139 /* ret = -ENODEV here? */
13140 }
13141 p[i] = 0;
13142 }
13143#endif
13144 /* Now read it back. */
13145 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13146 if (ret) {
13147 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13148
13149 break;
13150 }
13151
13152 /* Verify it. */
13153 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13154 if (p[i] == i)
13155 continue;
13156
David S. Miller59e6b432005-05-18 22:50:10 -070013157 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13158 DMA_RWCTRL_WRITE_BNDRY_16) {
13159 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013160 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13161 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13162 break;
13163 } else {
13164 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13165 ret = -ENODEV;
13166 goto out;
13167 }
13168 }
13169
13170 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13171 /* Success. */
13172 ret = 0;
13173 break;
13174 }
13175 }
David S. Miller59e6b432005-05-18 22:50:10 -070013176 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13177 DMA_RWCTRL_WRITE_BNDRY_16) {
Michael Chan6d1cfba2005-06-08 14:13:14 -070013178 static struct pci_device_id dma_wait_state_chipsets[] = {
13179 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13180 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13181 { },
13182 };
13183
David S. Miller59e6b432005-05-18 22:50:10 -070013184 /* DMA test passed without adjusting DMA boundary,
Michael Chan6d1cfba2005-06-08 14:13:14 -070013185 * now look for chipsets that are known to expose the
13186 * DMA bug without failing the test.
David S. Miller59e6b432005-05-18 22:50:10 -070013187 */
Michael Chan6d1cfba2005-06-08 14:13:14 -070013188 if (pci_dev_present(dma_wait_state_chipsets)) {
13189 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13190 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13191 }
13192 else
13193 /* Safe to use the calculated DMA boundary. */
13194 tp->dma_rwctrl = saved_dma_rwctrl;
13195
David S. Miller59e6b432005-05-18 22:50:10 -070013196 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13197 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013198
13199out:
13200 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13201out_nofree:
13202 return ret;
13203}
13204
13205static void __devinit tg3_init_link_config(struct tg3 *tp)
13206{
13207 tp->link_config.advertising =
13208 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13209 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13210 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13211 ADVERTISED_Autoneg | ADVERTISED_MII);
13212 tp->link_config.speed = SPEED_INVALID;
13213 tp->link_config.duplex = DUPLEX_INVALID;
13214 tp->link_config.autoneg = AUTONEG_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013215 tp->link_config.active_speed = SPEED_INVALID;
13216 tp->link_config.active_duplex = DUPLEX_INVALID;
13217 tp->link_config.phy_is_low_power = 0;
13218 tp->link_config.orig_speed = SPEED_INVALID;
13219 tp->link_config.orig_duplex = DUPLEX_INVALID;
13220 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13221}
13222
13223static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13224{
Michael Chanfdfec172005-07-25 12:31:48 -070013225 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13226 tp->bufmgr_config.mbuf_read_dma_low_water =
13227 DEFAULT_MB_RDMA_LOW_WATER_5705;
13228 tp->bufmgr_config.mbuf_mac_rx_low_water =
13229 DEFAULT_MB_MACRX_LOW_WATER_5705;
13230 tp->bufmgr_config.mbuf_high_water =
13231 DEFAULT_MB_HIGH_WATER_5705;
Michael Chanb5d37722006-09-27 16:06:21 -070013232 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13233 tp->bufmgr_config.mbuf_mac_rx_low_water =
13234 DEFAULT_MB_MACRX_LOW_WATER_5906;
13235 tp->bufmgr_config.mbuf_high_water =
13236 DEFAULT_MB_HIGH_WATER_5906;
13237 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013238
Michael Chanfdfec172005-07-25 12:31:48 -070013239 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13240 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13241 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13242 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13243 tp->bufmgr_config.mbuf_high_water_jumbo =
13244 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13245 } else {
13246 tp->bufmgr_config.mbuf_read_dma_low_water =
13247 DEFAULT_MB_RDMA_LOW_WATER;
13248 tp->bufmgr_config.mbuf_mac_rx_low_water =
13249 DEFAULT_MB_MACRX_LOW_WATER;
13250 tp->bufmgr_config.mbuf_high_water =
13251 DEFAULT_MB_HIGH_WATER;
13252
13253 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13254 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13255 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13256 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13257 tp->bufmgr_config.mbuf_high_water_jumbo =
13258 DEFAULT_MB_HIGH_WATER_JUMBO;
13259 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013260
13261 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13262 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13263}
13264
13265static char * __devinit tg3_phy_string(struct tg3 *tp)
13266{
13267 switch (tp->phy_id & PHY_ID_MASK) {
13268 case PHY_ID_BCM5400: return "5400";
13269 case PHY_ID_BCM5401: return "5401";
13270 case PHY_ID_BCM5411: return "5411";
13271 case PHY_ID_BCM5701: return "5701";
13272 case PHY_ID_BCM5703: return "5703";
13273 case PHY_ID_BCM5704: return "5704";
13274 case PHY_ID_BCM5705: return "5705";
13275 case PHY_ID_BCM5750: return "5750";
Michael Chan85e94ce2005-04-21 17:05:28 -070013276 case PHY_ID_BCM5752: return "5752";
Michael Chana4e2b342005-10-26 15:46:52 -070013277 case PHY_ID_BCM5714: return "5714";
Michael Chan4cf78e42005-07-25 12:29:19 -070013278 case PHY_ID_BCM5780: return "5780";
Michael Chanaf36e6b2006-03-23 01:28:06 -080013279 case PHY_ID_BCM5755: return "5755";
Michael Chand9ab5ad2006-03-20 22:27:35 -080013280 case PHY_ID_BCM5787: return "5787";
Matt Carlsond30cdd22007-10-07 23:28:35 -070013281 case PHY_ID_BCM5784: return "5784";
Michael Chan126a3362006-09-27 16:03:07 -070013282 case PHY_ID_BCM5756: return "5722/5756";
Michael Chanb5d37722006-09-27 16:06:21 -070013283 case PHY_ID_BCM5906: return "5906";
Matt Carlson9936bcf2007-10-10 18:03:07 -070013284 case PHY_ID_BCM5761: return "5761";
Linus Torvalds1da177e2005-04-16 15:20:36 -070013285 case PHY_ID_BCM8002: return "8002/serdes";
13286 case 0: return "serdes";
13287 default: return "unknown";
Stephen Hemminger855e1112008-04-16 16:37:28 -070013288 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013289}
13290
Michael Chanf9804dd2005-09-27 12:13:10 -070013291static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13292{
13293 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13294 strcpy(str, "PCI Express");
13295 return str;
13296 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13297 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13298
13299 strcpy(str, "PCIX:");
13300
13301 if ((clock_ctrl == 7) ||
13302 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13303 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13304 strcat(str, "133MHz");
13305 else if (clock_ctrl == 0)
13306 strcat(str, "33MHz");
13307 else if (clock_ctrl == 2)
13308 strcat(str, "50MHz");
13309 else if (clock_ctrl == 4)
13310 strcat(str, "66MHz");
13311 else if (clock_ctrl == 6)
13312 strcat(str, "100MHz");
Michael Chanf9804dd2005-09-27 12:13:10 -070013313 } else {
13314 strcpy(str, "PCI:");
13315 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13316 strcat(str, "66MHz");
13317 else
13318 strcat(str, "33MHz");
13319 }
13320 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13321 strcat(str, ":32-bit");
13322 else
13323 strcat(str, ":64-bit");
13324 return str;
13325}
13326
Michael Chan8c2dc7e2005-12-19 16:26:02 -080013327static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013328{
13329 struct pci_dev *peer;
13330 unsigned int func, devnr = tp->pdev->devfn & ~7;
13331
13332 for (func = 0; func < 8; func++) {
13333 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13334 if (peer && peer != tp->pdev)
13335 break;
13336 pci_dev_put(peer);
13337 }
Michael Chan16fe9d72005-12-13 21:09:54 -080013338 /* 5704 can be configured in single-port mode, set peer to
13339 * tp->pdev in that case.
13340 */
13341 if (!peer) {
13342 peer = tp->pdev;
13343 return peer;
13344 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013345
13346 /*
13347 * We don't need to keep the refcount elevated; there's no way
13348 * to remove one half of this device without removing the other
13349 */
13350 pci_dev_put(peer);
13351
13352 return peer;
13353}
13354
David S. Miller15f98502005-05-18 22:49:26 -070013355static void __devinit tg3_init_coal(struct tg3 *tp)
13356{
13357 struct ethtool_coalesce *ec = &tp->coal;
13358
13359 memset(ec, 0, sizeof(*ec));
13360 ec->cmd = ETHTOOL_GCOALESCE;
13361 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13362 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13363 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13364 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13365 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13366 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13367 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13368 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13369 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13370
13371 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13372 HOSTCC_MODE_CLRTICK_TXBD)) {
13373 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13374 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13375 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13376 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13377 }
Michael Chand244c892005-07-05 14:42:33 -070013378
13379 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13380 ec->rx_coalesce_usecs_irq = 0;
13381 ec->tx_coalesce_usecs_irq = 0;
13382 ec->stats_block_coalesce_usecs = 0;
13383 }
David S. Miller15f98502005-05-18 22:49:26 -070013384}
13385
Stephen Hemminger7c7d64b2008-11-19 22:25:36 -080013386static const struct net_device_ops tg3_netdev_ops = {
13387 .ndo_open = tg3_open,
13388 .ndo_stop = tg3_close,
Stephen Hemminger00829822008-11-20 20:14:53 -080013389 .ndo_start_xmit = tg3_start_xmit,
13390 .ndo_get_stats = tg3_get_stats,
13391 .ndo_validate_addr = eth_validate_addr,
13392 .ndo_set_multicast_list = tg3_set_rx_mode,
13393 .ndo_set_mac_address = tg3_set_mac_addr,
13394 .ndo_do_ioctl = tg3_ioctl,
13395 .ndo_tx_timeout = tg3_tx_timeout,
13396 .ndo_change_mtu = tg3_change_mtu,
13397#if TG3_VLAN_TAG_USED
13398 .ndo_vlan_rx_register = tg3_vlan_rx_register,
13399#endif
13400#ifdef CONFIG_NET_POLL_CONTROLLER
13401 .ndo_poll_controller = tg3_poll_controller,
13402#endif
13403};
13404
13405static const struct net_device_ops tg3_netdev_ops_dma_bug = {
13406 .ndo_open = tg3_open,
13407 .ndo_stop = tg3_close,
13408 .ndo_start_xmit = tg3_start_xmit_dma_bug,
Stephen Hemminger7c7d64b2008-11-19 22:25:36 -080013409 .ndo_get_stats = tg3_get_stats,
13410 .ndo_validate_addr = eth_validate_addr,
13411 .ndo_set_multicast_list = tg3_set_rx_mode,
13412 .ndo_set_mac_address = tg3_set_mac_addr,
13413 .ndo_do_ioctl = tg3_ioctl,
13414 .ndo_tx_timeout = tg3_tx_timeout,
13415 .ndo_change_mtu = tg3_change_mtu,
13416#if TG3_VLAN_TAG_USED
13417 .ndo_vlan_rx_register = tg3_vlan_rx_register,
13418#endif
13419#ifdef CONFIG_NET_POLL_CONTROLLER
13420 .ndo_poll_controller = tg3_poll_controller,
13421#endif
13422};
13423
Linus Torvalds1da177e2005-04-16 15:20:36 -070013424static int __devinit tg3_init_one(struct pci_dev *pdev,
13425 const struct pci_device_id *ent)
13426{
13427 static int tg3_version_printed = 0;
Matt Carlson63532392008-11-03 16:49:57 -080013428 resource_size_t tg3reg_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013429 struct net_device *dev;
13430 struct tg3 *tp;
Joe Perchesd6645372007-12-20 04:06:59 -080013431 int err, pm_cap;
Michael Chanf9804dd2005-09-27 12:13:10 -070013432 char str[40];
Michael Chan72f2afb2006-03-06 19:28:35 -080013433 u64 dma_mask, persist_dma_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013434
13435 if (tg3_version_printed++ == 0)
13436 printk(KERN_INFO "%s", version);
13437
13438 err = pci_enable_device(pdev);
13439 if (err) {
13440 printk(KERN_ERR PFX "Cannot enable PCI device, "
13441 "aborting.\n");
13442 return err;
13443 }
13444
Matt Carlson63532392008-11-03 16:49:57 -080013445 if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013446 printk(KERN_ERR PFX "Cannot find proper PCI device "
13447 "base address, aborting.\n");
13448 err = -ENODEV;
13449 goto err_out_disable_pdev;
13450 }
13451
13452 err = pci_request_regions(pdev, DRV_MODULE_NAME);
13453 if (err) {
13454 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13455 "aborting.\n");
13456 goto err_out_disable_pdev;
13457 }
13458
13459 pci_set_master(pdev);
13460
13461 /* Find power-management capability. */
13462 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13463 if (pm_cap == 0) {
13464 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13465 "aborting.\n");
13466 err = -EIO;
13467 goto err_out_free_res;
13468 }
13469
Linus Torvalds1da177e2005-04-16 15:20:36 -070013470 dev = alloc_etherdev(sizeof(*tp));
13471 if (!dev) {
13472 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13473 err = -ENOMEM;
13474 goto err_out_free_res;
13475 }
13476
Linus Torvalds1da177e2005-04-16 15:20:36 -070013477 SET_NETDEV_DEV(dev, &pdev->dev);
13478
Linus Torvalds1da177e2005-04-16 15:20:36 -070013479#if TG3_VLAN_TAG_USED
13480 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013481#endif
13482
13483 tp = netdev_priv(dev);
13484 tp->pdev = pdev;
13485 tp->dev = dev;
13486 tp->pm_cap = pm_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013487 tp->rx_mode = TG3_DEF_RX_MODE;
13488 tp->tx_mode = TG3_DEF_TX_MODE;
Matt Carlson8ef21422008-05-02 16:47:53 -070013489
Linus Torvalds1da177e2005-04-16 15:20:36 -070013490 if (tg3_debug > 0)
13491 tp->msg_enable = tg3_debug;
13492 else
13493 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13494
13495 /* The word/byte swap controls here control register access byte
13496 * swapping. DMA data byte swapping is controlled in the GRC_MODE
13497 * setting below.
13498 */
13499 tp->misc_host_ctrl =
13500 MISC_HOST_CTRL_MASK_PCI_INT |
13501 MISC_HOST_CTRL_WORD_SWAP |
13502 MISC_HOST_CTRL_INDIR_ACCESS |
13503 MISC_HOST_CTRL_PCISTATE_RW;
13504
13505 /* The NONFRM (non-frame) byte/word swap controls take effect
13506 * on descriptor entries, anything which isn't packet data.
13507 *
13508 * The StrongARM chips on the board (one for tx, one for rx)
13509 * are running in big-endian mode.
13510 */
13511 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13512 GRC_MODE_WSWAP_NONFRM_DATA);
13513#ifdef __BIG_ENDIAN
13514 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13515#endif
13516 spin_lock_init(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013517 spin_lock_init(&tp->indirect_lock);
David Howellsc4028952006-11-22 14:57:56 +000013518 INIT_WORK(&tp->reset_task, tg3_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013519
Matt Carlson63532392008-11-03 16:49:57 -080013520 dev->mem_start = pci_resource_start(pdev, BAR_0);
13521 tg3reg_len = pci_resource_len(pdev, BAR_0);
13522 dev->mem_end = dev->mem_start + tg3reg_len;
13523
13524 tp->regs = ioremap_nocache(dev->mem_start, tg3reg_len);
Andy Gospodarekab0049b2007-09-06 20:42:14 +010013525 if (!tp->regs) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013526 printk(KERN_ERR PFX "Cannot map device registers, "
13527 "aborting.\n");
13528 err = -ENOMEM;
13529 goto err_out_free_dev;
13530 }
13531
13532 tg3_init_link_config(tp);
13533
Linus Torvalds1da177e2005-04-16 15:20:36 -070013534 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13535 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13536 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13537
Stephen Hemmingerbea33482007-10-03 16:41:36 -070013538 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013539 dev->ethtool_ops = &tg3_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013540 dev->watchdog_timeo = TG3_TX_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013541 dev->irq = pdev->irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013542
13543 err = tg3_get_invariants(tp);
13544 if (err) {
13545 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13546 "aborting.\n");
13547 goto err_out_iounmap;
13548 }
13549
Stephen Hemminger00829822008-11-20 20:14:53 -080013550 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13551 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13552 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13553 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13554 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13555 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13556 dev->netdev_ops = &tg3_netdev_ops;
13557 else
13558 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
13559
13560
Michael Chan4a29cc22006-03-19 13:21:12 -080013561 /* The EPB bridge inside 5714, 5715, and 5780 and any
13562 * device behind the EPB cannot support DMA addresses > 40-bit.
Michael Chan72f2afb2006-03-06 19:28:35 -080013563 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13564 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13565 * do DMA address check in tg3_start_xmit().
13566 */
Michael Chan4a29cc22006-03-19 13:21:12 -080013567 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13568 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13569 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
Michael Chan72f2afb2006-03-06 19:28:35 -080013570 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13571#ifdef CONFIG_HIGHMEM
13572 dma_mask = DMA_64BIT_MASK;
13573#endif
Michael Chan4a29cc22006-03-19 13:21:12 -080013574 } else
Michael Chan72f2afb2006-03-06 19:28:35 -080013575 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13576
13577 /* Configure DMA attributes. */
13578 if (dma_mask > DMA_32BIT_MASK) {
13579 err = pci_set_dma_mask(pdev, dma_mask);
13580 if (!err) {
13581 dev->features |= NETIF_F_HIGHDMA;
13582 err = pci_set_consistent_dma_mask(pdev,
13583 persist_dma_mask);
13584 if (err < 0) {
13585 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13586 "DMA for consistent allocations\n");
13587 goto err_out_iounmap;
13588 }
13589 }
13590 }
13591 if (err || dma_mask == DMA_32BIT_MASK) {
13592 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13593 if (err) {
13594 printk(KERN_ERR PFX "No usable DMA configuration, "
13595 "aborting.\n");
13596 goto err_out_iounmap;
13597 }
13598 }
13599
Michael Chanfdfec172005-07-25 12:31:48 -070013600 tg3_init_bufmgr_config(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013601
Linus Torvalds1da177e2005-04-16 15:20:36 -070013602 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13603 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13604 }
13605 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13606 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13607 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
Michael Chanc7835a72006-11-15 21:14:42 -080013608 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -070013609 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13610 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13611 } else {
Michael Chan7f62ad52007-02-20 23:25:40 -080013612 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013613 }
13614
Michael Chan4e3a7aa2006-03-20 17:47:44 -080013615 /* TSO is on by default on chips that support hardware TSO.
13616 * Firmware TSO on older chips gives lower performance, so it
13617 * is off by default, but can be enabled using ethtool.
13618 */
Michael Chanb0026622006-07-03 19:42:14 -070013619 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013620 dev->features |= NETIF_F_TSO;
Michael Chanb5d37722006-09-27 16:06:21 -070013621 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13622 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
Michael Chanb0026622006-07-03 19:42:14 -070013623 dev->features |= NETIF_F_TSO6;
Matt Carlson57e69832008-05-25 23:48:31 -070013624 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13625 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13626 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13627 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlson9936bcf2007-10-10 18:03:07 -070013628 dev->features |= NETIF_F_TSO_ECN;
Michael Chanb0026622006-07-03 19:42:14 -070013629 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013630
Linus Torvalds1da177e2005-04-16 15:20:36 -070013631
13632 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13633 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13634 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13635 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13636 tp->rx_pending = 63;
13637 }
13638
Linus Torvalds1da177e2005-04-16 15:20:36 -070013639 err = tg3_get_device_address(tp);
13640 if (err) {
13641 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13642 "aborting.\n");
13643 goto err_out_iounmap;
13644 }
13645
Matt Carlson0d3031d2007-10-10 18:02:43 -070013646 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
Matt Carlson63532392008-11-03 16:49:57 -080013647 if (!(pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM)) {
Matt Carlson0d3031d2007-10-10 18:02:43 -070013648 printk(KERN_ERR PFX "Cannot find proper PCI device "
13649 "base address for APE, aborting.\n");
13650 err = -ENODEV;
13651 goto err_out_iounmap;
13652 }
13653
Matt Carlson63532392008-11-03 16:49:57 -080013654 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
Al Viro79ea13c2008-01-24 02:06:46 -080013655 if (!tp->aperegs) {
Matt Carlson0d3031d2007-10-10 18:02:43 -070013656 printk(KERN_ERR PFX "Cannot map APE registers, "
13657 "aborting.\n");
13658 err = -ENOMEM;
13659 goto err_out_iounmap;
13660 }
13661
13662 tg3_ape_lock_init(tp);
13663 }
13664
Matt Carlsonc88864d2007-11-12 21:07:01 -080013665 /*
13666 * Reset chip in case UNDI or EFI driver did not shutdown
13667 * DMA self test will enable WDMAC and we'll see (spurious)
13668 * pending DMA on the PCI bus at that point.
13669 */
13670 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13671 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13672 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13673 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13674 }
13675
13676 err = tg3_test_dma(tp);
13677 if (err) {
13678 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13679 goto err_out_apeunmap;
13680 }
13681
13682 /* Tigon3 can do ipv4 only... and some chips have buggy
13683 * checksumming.
13684 */
13685 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13686 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13687 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13688 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13689 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070013690 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13691 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsonc88864d2007-11-12 21:07:01 -080013692 dev->features |= NETIF_F_IPV6_CSUM;
13693
13694 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13695 } else
13696 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13697
13698 /* flow control autonegotiation is default behavior */
13699 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
Matt Carlson8d018622007-12-20 20:05:44 -080013700 tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
Matt Carlsonc88864d2007-11-12 21:07:01 -080013701
13702 tg3_init_coal(tp);
13703
Michael Chanc49a1562006-12-17 17:07:29 -080013704 pci_set_drvdata(pdev, dev);
13705
Linus Torvalds1da177e2005-04-16 15:20:36 -070013706 err = register_netdev(dev);
13707 if (err) {
13708 printk(KERN_ERR PFX "Cannot register net device, "
13709 "aborting.\n");
Matt Carlson0d3031d2007-10-10 18:02:43 -070013710 goto err_out_apeunmap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013711 }
13712
Matt Carlsondf59c942008-11-03 16:52:56 -080013713 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070013714 dev->name,
13715 tp->board_part_number,
13716 tp->pci_chip_rev_id,
Michael Chanf9804dd2005-09-27 12:13:10 -070013717 tg3_bus_string(tp, str),
Johannes Berge1749612008-10-27 15:59:26 -070013718 dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013719
Matt Carlsondf59c942008-11-03 16:52:56 -080013720 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
13721 printk(KERN_INFO
13722 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
13723 tp->dev->name,
13724 tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
Kay Sieversfb28ad32008-11-10 13:55:14 -080013725 dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev));
Matt Carlsondf59c942008-11-03 16:52:56 -080013726 else
13727 printk(KERN_INFO
13728 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
13729 tp->dev->name, tg3_phy_string(tp),
13730 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13731 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13732 "10/100/1000Base-T")),
13733 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
13734
13735 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070013736 dev->name,
13737 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13738 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13739 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13740 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -070013741 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
Michael Chan4a29cc22006-03-19 13:21:12 -080013742 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13743 dev->name, tp->dma_rwctrl,
13744 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13745 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
Linus Torvalds1da177e2005-04-16 15:20:36 -070013746
13747 return 0;
13748
Matt Carlson0d3031d2007-10-10 18:02:43 -070013749err_out_apeunmap:
13750 if (tp->aperegs) {
13751 iounmap(tp->aperegs);
13752 tp->aperegs = NULL;
13753 }
13754
Linus Torvalds1da177e2005-04-16 15:20:36 -070013755err_out_iounmap:
Michael Chan68929142005-08-09 20:17:14 -070013756 if (tp->regs) {
13757 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070013758 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070013759 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013760
13761err_out_free_dev:
13762 free_netdev(dev);
13763
13764err_out_free_res:
13765 pci_release_regions(pdev);
13766
13767err_out_disable_pdev:
13768 pci_disable_device(pdev);
13769 pci_set_drvdata(pdev, NULL);
13770 return err;
13771}
13772
13773static void __devexit tg3_remove_one(struct pci_dev *pdev)
13774{
13775 struct net_device *dev = pci_get_drvdata(pdev);
13776
13777 if (dev) {
13778 struct tg3 *tp = netdev_priv(dev);
13779
Michael Chan7faa0062006-02-02 17:29:28 -080013780 flush_scheduled_work();
Matt Carlson158d7ab2008-05-29 01:37:54 -070013781
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013782 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13783 tg3_phy_fini(tp);
Matt Carlson158d7ab2008-05-29 01:37:54 -070013784 tg3_mdio_fini(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013785 }
Matt Carlson158d7ab2008-05-29 01:37:54 -070013786
Linus Torvalds1da177e2005-04-16 15:20:36 -070013787 unregister_netdev(dev);
Matt Carlson0d3031d2007-10-10 18:02:43 -070013788 if (tp->aperegs) {
13789 iounmap(tp->aperegs);
13790 tp->aperegs = NULL;
13791 }
Michael Chan68929142005-08-09 20:17:14 -070013792 if (tp->regs) {
13793 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070013794 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070013795 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013796 free_netdev(dev);
13797 pci_release_regions(pdev);
13798 pci_disable_device(pdev);
13799 pci_set_drvdata(pdev, NULL);
13800 }
13801}
13802
13803static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13804{
13805 struct net_device *dev = pci_get_drvdata(pdev);
13806 struct tg3 *tp = netdev_priv(dev);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070013807 pci_power_t target_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013808 int err;
13809
Michael Chan3e0c95f2007-08-03 20:56:54 -070013810 /* PCI register 4 needs to be saved whether netif_running() or not.
13811 * MSI address and data need to be saved if using MSI and
13812 * netif_running().
13813 */
13814 pci_save_state(pdev);
13815
Linus Torvalds1da177e2005-04-16 15:20:36 -070013816 if (!netif_running(dev))
13817 return 0;
13818
Michael Chan7faa0062006-02-02 17:29:28 -080013819 flush_scheduled_work();
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013820 tg3_phy_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013821 tg3_netif_stop(tp);
13822
13823 del_timer_sync(&tp->timer);
13824
David S. Millerf47c11e2005-06-24 20:18:35 -070013825 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013826 tg3_disable_ints(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -070013827 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013828
13829 netif_device_detach(dev);
13830
David S. Millerf47c11e2005-06-24 20:18:35 -070013831 tg3_full_lock(tp, 0);
Michael Chan944d9802005-05-29 14:57:48 -070013832 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan6a9eba12005-12-13 21:08:58 -080013833 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
David S. Millerf47c11e2005-06-24 20:18:35 -070013834 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013835
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070013836 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13837
13838 err = tg3_set_power_state(tp, target_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013839 if (err) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013840 int err2;
13841
David S. Millerf47c11e2005-06-24 20:18:35 -070013842 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013843
Michael Chan6a9eba12005-12-13 21:08:58 -080013844 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013845 err2 = tg3_restart_hw(tp, 1);
13846 if (err2)
Michael Chanb9ec6c12006-07-25 16:37:27 -070013847 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013848
13849 tp->timer.expires = jiffies + tp->timer_offset;
13850 add_timer(&tp->timer);
13851
13852 netif_device_attach(dev);
13853 tg3_netif_start(tp);
13854
Michael Chanb9ec6c12006-07-25 16:37:27 -070013855out:
David S. Millerf47c11e2005-06-24 20:18:35 -070013856 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013857
13858 if (!err2)
13859 tg3_phy_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013860 }
13861
13862 return err;
13863}
13864
13865static int tg3_resume(struct pci_dev *pdev)
13866{
13867 struct net_device *dev = pci_get_drvdata(pdev);
13868 struct tg3 *tp = netdev_priv(dev);
13869 int err;
13870
Michael Chan3e0c95f2007-08-03 20:56:54 -070013871 pci_restore_state(tp->pdev);
13872
Linus Torvalds1da177e2005-04-16 15:20:36 -070013873 if (!netif_running(dev))
13874 return 0;
13875
Michael Chanbc1c7562006-03-20 17:48:03 -080013876 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013877 if (err)
13878 return err;
13879
13880 netif_device_attach(dev);
13881
David S. Millerf47c11e2005-06-24 20:18:35 -070013882 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013883
Michael Chan6a9eba12005-12-13 21:08:58 -080013884 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Michael Chanb9ec6c12006-07-25 16:37:27 -070013885 err = tg3_restart_hw(tp, 1);
13886 if (err)
13887 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013888
13889 tp->timer.expires = jiffies + tp->timer_offset;
13890 add_timer(&tp->timer);
13891
Linus Torvalds1da177e2005-04-16 15:20:36 -070013892 tg3_netif_start(tp);
13893
Michael Chanb9ec6c12006-07-25 16:37:27 -070013894out:
David S. Millerf47c11e2005-06-24 20:18:35 -070013895 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013896
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013897 if (!err)
13898 tg3_phy_start(tp);
13899
Michael Chanb9ec6c12006-07-25 16:37:27 -070013900 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013901}
13902
13903static struct pci_driver tg3_driver = {
13904 .name = DRV_MODULE_NAME,
13905 .id_table = tg3_pci_tbl,
13906 .probe = tg3_init_one,
13907 .remove = __devexit_p(tg3_remove_one),
13908 .suspend = tg3_suspend,
13909 .resume = tg3_resume
13910};
13911
13912static int __init tg3_init(void)
13913{
Jeff Garzik29917622006-08-19 17:48:59 -040013914 return pci_register_driver(&tg3_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013915}
13916
13917static void __exit tg3_cleanup(void)
13918{
13919 pci_unregister_driver(&tg3_driver);
13920}
13921
13922module_init(tg3_init);
13923module_exit(tg3_cleanup);