blob: 07bf05673da73b6d84637c062e9f13335e1a59e4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
Michael Chan65610fb2007-02-13 12:18:46 -08007 * Copyright (C) 2005-2007 Broadcom Corporation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Firmware is:
Michael Chan49cabf42005-06-06 15:15:17 -070010 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/compiler.h>
24#include <linux/slab.h>
25#include <linux/delay.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020026#include <linux/in.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
Matt Carlson158d7ab2008-05-29 01:37:54 -070035#include <linux/phy.h>
Matt Carlsona9daf362008-05-25 23:49:44 -070036#include <linux/brcmphy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/if_vlan.h>
38#include <linux/ip.h>
39#include <linux/tcp.h>
40#include <linux/workqueue.h>
Michael Chan61487482005-09-05 17:53:19 -070041#include <linux/prefetch.h>
Tobias Klauserf9a5f7d2005-10-29 15:09:26 +020042#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44#include <net/checksum.h>
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -030045#include <net/ip.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47#include <asm/system.h>
48#include <asm/io.h>
49#include <asm/byteorder.h>
50#include <asm/uaccess.h>
51
David S. Miller49b6e95f2007-03-29 01:38:42 -070052#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <asm/idprom.h>
David S. Miller49b6e95f2007-03-29 01:38:42 -070054#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#endif
56
Matt Carlson63532392008-11-03 16:49:57 -080057#define BAR_0 0
58#define BAR_2 2
59
Linus Torvalds1da177e2005-04-16 15:20:36 -070060#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
61#define TG3_VLAN_TAG_USED 1
62#else
63#define TG3_VLAN_TAG_USED 0
64#endif
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#define TG3_TSO_SUPPORT 1
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
68#include "tg3.h"
69
70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": "
Matt Carlsonfa228b32008-11-03 16:58:53 -080072#define DRV_MODULE_VERSION "3.95"
73#define DRV_MODULE_RELDATE "November 3, 2008"
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0
77#define TG3_DEF_TX_MODE 0
78#define TG3_DEF_MSG_ENABLE \
79 (NETIF_MSG_DRV | \
80 NETIF_MSG_PROBE | \
81 NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | \
83 NETIF_MSG_IFDOWN | \
84 NETIF_MSG_IFUP | \
85 NETIF_MSG_RX_ERR | \
86 NETIF_MSG_TX_ERR)
87
88/* length of time before we decide the hardware is borked,
89 * and dev->tx_timeout() should be called to fix the problem
90 */
91#define TG3_TX_TIMEOUT (5 * HZ)
92
93/* hardware minimum and maximum for a single frame's data payload */
94#define TG3_MIN_MTU 60
95#define TG3_MAX_MTU(tp) \
Michael Chan0f893dc2005-07-25 12:30:38 -070096 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
98/* These numbers seem to be hard coded in the NIC firmware somehow.
99 * You can't change the ring sizes, but you can change where you place
100 * them in the NIC onboard memory.
101 */
102#define TG3_RX_RING_SIZE 512
103#define TG3_DEF_RX_RING_PENDING 200
104#define TG3_RX_JUMBO_RING_SIZE 256
105#define TG3_DEF_RX_JUMBO_RING_PENDING 100
106
107/* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
112 */
113#define TG3_RX_RCB_RING_SIZE(tp) \
114 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
115
116#define TG3_TX_RING_SIZE 512
117#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
118
119#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120 TG3_RX_RING_SIZE)
121#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 TG3_RX_JUMBO_RING_SIZE)
123#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RCB_RING_SIZE(tp))
125#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
126 TG3_TX_RING_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128
129#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
130#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
131
132/* minimum number of free TX descriptors required to wake up TX process */
Ranjit Manomohan42952232006-10-18 20:54:26 -0700133#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
Matt Carlsonad829262008-11-21 17:16:16 -0800135#define TG3_RAW_IP_ALIGN 2
136
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137/* number of ETHTOOL_GSTATS u64's */
138#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
139
Michael Chan4cafd3f2005-05-29 14:56:34 -0700140#define TG3_NUM_TEST 6
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142static char version[] __devinitdata =
143 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
144
145MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
146MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
147MODULE_LICENSE("GPL");
148MODULE_VERSION(DRV_MODULE_VERSION);
149
150static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
151module_param(tg3_debug, int, 0);
152MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
153
154static struct pci_device_id tg3_pci_tbl[] = {
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
Michael Chan126a3362006-09-27 16:03:07 -0700179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
Michael Chan126a3362006-09-27 16:03:07 -0700194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
Michael Chan676917d2006-12-07 00:20:22 -0800198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
Michael Chanb5d37722006-09-27 16:06:21 -0700206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
Matt Carlsond30cdd22007-10-07 23:28:35 -0700208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
Matt Carlson6c7af272007-10-21 16:12:02 -0700210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
Matt Carlson9936bcf2007-10-10 18:03:07 -0700211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
Matt Carlsonc88e6682008-11-03 16:49:18 -0800213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
Matt Carlson57e69832008-05-25 23:48:31 -0700215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700216 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
217 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
218 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
219 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
220 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
221 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
222 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
223 {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224};
225
226MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
227
Andreas Mohr50da8592006-08-14 23:54:30 -0700228static const struct {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 const char string[ETH_GSTRING_LEN];
230} ethtool_stats_keys[TG3_NUM_STATS] = {
231 { "rx_octets" },
232 { "rx_fragments" },
233 { "rx_ucast_packets" },
234 { "rx_mcast_packets" },
235 { "rx_bcast_packets" },
236 { "rx_fcs_errors" },
237 { "rx_align_errors" },
238 { "rx_xon_pause_rcvd" },
239 { "rx_xoff_pause_rcvd" },
240 { "rx_mac_ctrl_rcvd" },
241 { "rx_xoff_entered" },
242 { "rx_frame_too_long_errors" },
243 { "rx_jabbers" },
244 { "rx_undersize_packets" },
245 { "rx_in_length_errors" },
246 { "rx_out_length_errors" },
247 { "rx_64_or_less_octet_packets" },
248 { "rx_65_to_127_octet_packets" },
249 { "rx_128_to_255_octet_packets" },
250 { "rx_256_to_511_octet_packets" },
251 { "rx_512_to_1023_octet_packets" },
252 { "rx_1024_to_1522_octet_packets" },
253 { "rx_1523_to_2047_octet_packets" },
254 { "rx_2048_to_4095_octet_packets" },
255 { "rx_4096_to_8191_octet_packets" },
256 { "rx_8192_to_9022_octet_packets" },
257
258 { "tx_octets" },
259 { "tx_collisions" },
260
261 { "tx_xon_sent" },
262 { "tx_xoff_sent" },
263 { "tx_flow_control" },
264 { "tx_mac_errors" },
265 { "tx_single_collisions" },
266 { "tx_mult_collisions" },
267 { "tx_deferred" },
268 { "tx_excessive_collisions" },
269 { "tx_late_collisions" },
270 { "tx_collide_2times" },
271 { "tx_collide_3times" },
272 { "tx_collide_4times" },
273 { "tx_collide_5times" },
274 { "tx_collide_6times" },
275 { "tx_collide_7times" },
276 { "tx_collide_8times" },
277 { "tx_collide_9times" },
278 { "tx_collide_10times" },
279 { "tx_collide_11times" },
280 { "tx_collide_12times" },
281 { "tx_collide_13times" },
282 { "tx_collide_14times" },
283 { "tx_collide_15times" },
284 { "tx_ucast_packets" },
285 { "tx_mcast_packets" },
286 { "tx_bcast_packets" },
287 { "tx_carrier_sense_errors" },
288 { "tx_discards" },
289 { "tx_errors" },
290
291 { "dma_writeq_full" },
292 { "dma_write_prioq_full" },
293 { "rxbds_empty" },
294 { "rx_discards" },
295 { "rx_errors" },
296 { "rx_threshold_hit" },
297
298 { "dma_readq_full" },
299 { "dma_read_prioq_full" },
300 { "tx_comp_queue_full" },
301
302 { "ring_set_send_prod_index" },
303 { "ring_status_update" },
304 { "nic_irqs" },
305 { "nic_avoided_irqs" },
306 { "nic_tx_threshold_hit" }
307};
308
Andreas Mohr50da8592006-08-14 23:54:30 -0700309static const struct {
Michael Chan4cafd3f2005-05-29 14:56:34 -0700310 const char string[ETH_GSTRING_LEN];
311} ethtool_test_keys[TG3_NUM_TEST] = {
312 { "nvram test (online) " },
313 { "link test (online) " },
314 { "register test (offline)" },
315 { "memory test (offline)" },
316 { "loopback test (offline)" },
317 { "interrupt test (offline)" },
318};
319
Michael Chanb401e9e2005-12-19 16:27:04 -0800320static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
321{
322 writel(val, tp->regs + off);
323}
324
325static u32 tg3_read32(struct tg3 *tp, u32 off)
326{
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400327 return (readl(tp->regs + off));
Michael Chanb401e9e2005-12-19 16:27:04 -0800328}
329
Matt Carlson0d3031d2007-10-10 18:02:43 -0700330static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
331{
332 writel(val, tp->aperegs + off);
333}
334
335static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
336{
337 return (readl(tp->aperegs + off));
338}
339
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
341{
Michael Chan68929142005-08-09 20:17:14 -0700342 unsigned long flags;
343
344 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700345 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
346 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
Michael Chan68929142005-08-09 20:17:14 -0700347 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700348}
349
350static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
351{
352 writel(val, tp->regs + off);
353 readl(tp->regs + off);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354}
355
Michael Chan68929142005-08-09 20:17:14 -0700356static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
357{
358 unsigned long flags;
359 u32 val;
360
361 spin_lock_irqsave(&tp->indirect_lock, flags);
362 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
363 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
364 spin_unlock_irqrestore(&tp->indirect_lock, flags);
365 return val;
366}
367
368static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
369{
370 unsigned long flags;
371
372 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
373 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
374 TG3_64BIT_REG_LOW, val);
375 return;
376 }
377 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
378 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
379 TG3_64BIT_REG_LOW, val);
380 return;
381 }
382
383 spin_lock_irqsave(&tp->indirect_lock, flags);
384 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
385 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
386 spin_unlock_irqrestore(&tp->indirect_lock, flags);
387
388 /* In indirect mode when disabling interrupts, we also need
389 * to clear the interrupt bit in the GRC local ctrl register.
390 */
391 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
392 (val == 0x1)) {
393 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
394 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
395 }
396}
397
398static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
399{
400 unsigned long flags;
401 u32 val;
402
403 spin_lock_irqsave(&tp->indirect_lock, flags);
404 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
405 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
406 spin_unlock_irqrestore(&tp->indirect_lock, flags);
407 return val;
408}
409
Michael Chanb401e9e2005-12-19 16:27:04 -0800410/* usec_wait specifies the wait time in usec when writing to certain registers
411 * where it is unsafe to read back the register without some delay.
412 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
413 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
414 */
415static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416{
Michael Chanb401e9e2005-12-19 16:27:04 -0800417 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
418 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
419 /* Non-posted methods */
420 tp->write32(tp, off, val);
421 else {
422 /* Posted method */
423 tg3_write32(tp, off, val);
424 if (usec_wait)
425 udelay(usec_wait);
426 tp->read32(tp, off);
427 }
428 /* Wait again after the read for the posted method to guarantee that
429 * the wait time is met.
430 */
431 if (usec_wait)
432 udelay(usec_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433}
434
Michael Chan09ee9292005-08-09 20:17:00 -0700435static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
436{
437 tp->write32_mbox(tp, off, val);
Michael Chan68929142005-08-09 20:17:14 -0700438 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
439 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
440 tp->read32_mbox(tp, off);
Michael Chan09ee9292005-08-09 20:17:00 -0700441}
442
Michael Chan20094932005-08-09 20:16:32 -0700443static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444{
445 void __iomem *mbox = tp->regs + off;
446 writel(val, mbox);
447 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
448 writel(val, mbox);
449 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
450 readl(mbox);
451}
452
Michael Chanb5d37722006-09-27 16:06:21 -0700453static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
454{
455 return (readl(tp->regs + off + GRCMBOX_BASE));
456}
457
458static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
459{
460 writel(val, tp->regs + off + GRCMBOX_BASE);
461}
462
Michael Chan20094932005-08-09 20:16:32 -0700463#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700464#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
Michael Chan20094932005-08-09 20:16:32 -0700465#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
466#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700467#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
Michael Chan20094932005-08-09 20:16:32 -0700468
469#define tw32(reg,val) tp->write32(tp, reg, val)
Michael Chanb401e9e2005-12-19 16:27:04 -0800470#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
471#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
Michael Chan20094932005-08-09 20:16:32 -0700472#define tr32(reg) tp->read32(tp, reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473
474static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
475{
Michael Chan68929142005-08-09 20:17:14 -0700476 unsigned long flags;
477
Michael Chanb5d37722006-09-27 16:06:21 -0700478 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
479 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
480 return;
481
Michael Chan68929142005-08-09 20:17:14 -0700482 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700483 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
484 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
485 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486
Michael Chanbbadf502006-04-06 21:46:34 -0700487 /* Always leave this as zero. */
488 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
489 } else {
490 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
491 tw32_f(TG3PCI_MEM_WIN_DATA, val);
492
493 /* Always leave this as zero. */
494 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
495 }
Michael Chan68929142005-08-09 20:17:14 -0700496 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497}
498
499static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
500{
Michael Chan68929142005-08-09 20:17:14 -0700501 unsigned long flags;
502
Michael Chanb5d37722006-09-27 16:06:21 -0700503 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
504 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
505 *val = 0;
506 return;
507 }
508
Michael Chan68929142005-08-09 20:17:14 -0700509 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700510 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
511 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
512 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513
Michael Chanbbadf502006-04-06 21:46:34 -0700514 /* Always leave this as zero. */
515 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
516 } else {
517 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
518 *val = tr32(TG3PCI_MEM_WIN_DATA);
519
520 /* Always leave this as zero. */
521 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
522 }
Michael Chan68929142005-08-09 20:17:14 -0700523 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524}
525
Matt Carlson0d3031d2007-10-10 18:02:43 -0700526static void tg3_ape_lock_init(struct tg3 *tp)
527{
528 int i;
529
530 /* Make sure the driver hasn't any stale locks. */
531 for (i = 0; i < 8; i++)
532 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
533 APE_LOCK_GRANT_DRIVER);
534}
535
536static int tg3_ape_lock(struct tg3 *tp, int locknum)
537{
538 int i, off;
539 int ret = 0;
540 u32 status;
541
542 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
543 return 0;
544
545 switch (locknum) {
Matt Carlson77b483f2008-08-15 14:07:24 -0700546 case TG3_APE_LOCK_GRC:
Matt Carlson0d3031d2007-10-10 18:02:43 -0700547 case TG3_APE_LOCK_MEM:
548 break;
549 default:
550 return -EINVAL;
551 }
552
553 off = 4 * locknum;
554
555 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
556
557 /* Wait for up to 1 millisecond to acquire lock. */
558 for (i = 0; i < 100; i++) {
559 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
560 if (status == APE_LOCK_GRANT_DRIVER)
561 break;
562 udelay(10);
563 }
564
565 if (status != APE_LOCK_GRANT_DRIVER) {
566 /* Revoke the lock request. */
567 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
568 APE_LOCK_GRANT_DRIVER);
569
570 ret = -EBUSY;
571 }
572
573 return ret;
574}
575
576static void tg3_ape_unlock(struct tg3 *tp, int locknum)
577{
578 int off;
579
580 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
581 return;
582
583 switch (locknum) {
Matt Carlson77b483f2008-08-15 14:07:24 -0700584 case TG3_APE_LOCK_GRC:
Matt Carlson0d3031d2007-10-10 18:02:43 -0700585 case TG3_APE_LOCK_MEM:
586 break;
587 default:
588 return;
589 }
590
591 off = 4 * locknum;
592 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
593}
594
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595static void tg3_disable_ints(struct tg3 *tp)
596{
597 tw32(TG3PCI_MISC_HOST_CTRL,
598 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700599 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600}
601
602static inline void tg3_cond_int(struct tg3 *tp)
603{
Michael Chan38f38432005-09-05 17:53:32 -0700604 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
605 (tp->hw_status->status & SD_STATUS_UPDATED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
Michael Chanb5d37722006-09-27 16:06:21 -0700607 else
608 tw32(HOSTCC_MODE, tp->coalesce_mode |
609 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610}
611
612static void tg3_enable_ints(struct tg3 *tp)
613{
Michael Chanbbe832c2005-06-24 20:20:04 -0700614 tp->irq_sync = 0;
615 wmb();
616
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 tw32(TG3PCI_MISC_HOST_CTRL,
618 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700619 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
620 (tp->last_tag << 24));
Michael Chanfcfa0a32006-03-20 22:28:41 -0800621 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
622 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
623 (tp->last_tag << 24));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 tg3_cond_int(tp);
625}
626
Michael Chan04237dd2005-04-25 15:17:17 -0700627static inline unsigned int tg3_has_work(struct tg3 *tp)
628{
629 struct tg3_hw_status *sblk = tp->hw_status;
630 unsigned int work_exists = 0;
631
632 /* check for phy events */
633 if (!(tp->tg3_flags &
634 (TG3_FLAG_USE_LINKCHG_REG |
635 TG3_FLAG_POLL_SERDES))) {
636 if (sblk->status & SD_STATUS_LINK_CHG)
637 work_exists = 1;
638 }
639 /* check for RX/TX work to do */
640 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
641 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
642 work_exists = 1;
643
644 return work_exists;
645}
646
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647/* tg3_restart_ints
Michael Chan04237dd2005-04-25 15:17:17 -0700648 * similar to tg3_enable_ints, but it accurately determines whether there
649 * is new work pending and can return without flushing the PIO write
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400650 * which reenables interrupts
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 */
652static void tg3_restart_ints(struct tg3 *tp)
653{
David S. Millerfac9b832005-05-18 22:46:34 -0700654 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
655 tp->last_tag << 24);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 mmiowb();
657
David S. Millerfac9b832005-05-18 22:46:34 -0700658 /* When doing tagged status, this work check is unnecessary.
659 * The last_tag we write above tells the chip which piece of
660 * work we've completed.
661 */
662 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
663 tg3_has_work(tp))
Michael Chan04237dd2005-04-25 15:17:17 -0700664 tw32(HOSTCC_MODE, tp->coalesce_mode |
665 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666}
667
668static inline void tg3_netif_stop(struct tg3 *tp)
669{
Michael Chanbbe832c2005-06-24 20:20:04 -0700670 tp->dev->trans_start = jiffies; /* prevent tx timeout */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700671 napi_disable(&tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 netif_tx_disable(tp->dev);
673}
674
675static inline void tg3_netif_start(struct tg3 *tp)
676{
677 netif_wake_queue(tp->dev);
678 /* NOTE: unconditional netif_wake_queue is only appropriate
679 * so long as all callers are assured to have free tx slots
680 * (such as after tg3_init_hw)
681 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700682 napi_enable(&tp->napi);
David S. Millerf47c11e2005-06-24 20:18:35 -0700683 tp->hw_status->status |= SD_STATUS_UPDATED;
684 tg3_enable_ints(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685}
686
687static void tg3_switch_clocks(struct tg3 *tp)
688{
689 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
690 u32 orig_clock_ctrl;
691
Matt Carlson795d01c2007-10-07 23:28:17 -0700692 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
693 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan4cf78e42005-07-25 12:29:19 -0700694 return;
695
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 orig_clock_ctrl = clock_ctrl;
697 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
698 CLOCK_CTRL_CLKRUN_OENABLE |
699 0x1f);
700 tp->pci_clock_ctrl = clock_ctrl;
701
702 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
703 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800704 tw32_wait_f(TG3PCI_CLOCK_CTRL,
705 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 }
707 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800708 tw32_wait_f(TG3PCI_CLOCK_CTRL,
709 clock_ctrl |
710 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
711 40);
712 tw32_wait_f(TG3PCI_CLOCK_CTRL,
713 clock_ctrl | (CLOCK_CTRL_ALTCLK),
714 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 }
Michael Chanb401e9e2005-12-19 16:27:04 -0800716 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717}
718
719#define PHY_BUSY_LOOPS 5000
720
721static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
722{
723 u32 frame_val;
724 unsigned int loops;
725 int ret;
726
727 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
728 tw32_f(MAC_MI_MODE,
729 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
730 udelay(80);
731 }
732
733 *val = 0x0;
734
735 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
736 MI_COM_PHY_ADDR_MASK);
737 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
738 MI_COM_REG_ADDR_MASK);
739 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400740
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 tw32_f(MAC_MI_COM, frame_val);
742
743 loops = PHY_BUSY_LOOPS;
744 while (loops != 0) {
745 udelay(10);
746 frame_val = tr32(MAC_MI_COM);
747
748 if ((frame_val & MI_COM_BUSY) == 0) {
749 udelay(5);
750 frame_val = tr32(MAC_MI_COM);
751 break;
752 }
753 loops -= 1;
754 }
755
756 ret = -EBUSY;
757 if (loops != 0) {
758 *val = frame_val & MI_COM_DATA_MASK;
759 ret = 0;
760 }
761
762 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
763 tw32_f(MAC_MI_MODE, tp->mi_mode);
764 udelay(80);
765 }
766
767 return ret;
768}
769
770static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
771{
772 u32 frame_val;
773 unsigned int loops;
774 int ret;
775
Michael Chanb5d37722006-09-27 16:06:21 -0700776 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
777 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
778 return 0;
779
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
781 tw32_f(MAC_MI_MODE,
782 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
783 udelay(80);
784 }
785
786 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
787 MI_COM_PHY_ADDR_MASK);
788 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
789 MI_COM_REG_ADDR_MASK);
790 frame_val |= (val & MI_COM_DATA_MASK);
791 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400792
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 tw32_f(MAC_MI_COM, frame_val);
794
795 loops = PHY_BUSY_LOOPS;
796 while (loops != 0) {
797 udelay(10);
798 frame_val = tr32(MAC_MI_COM);
799 if ((frame_val & MI_COM_BUSY) == 0) {
800 udelay(5);
801 frame_val = tr32(MAC_MI_COM);
802 break;
803 }
804 loops -= 1;
805 }
806
807 ret = -EBUSY;
808 if (loops != 0)
809 ret = 0;
810
811 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
812 tw32_f(MAC_MI_MODE, tp->mi_mode);
813 udelay(80);
814 }
815
816 return ret;
817}
818
Matt Carlson95e28692008-05-25 23:44:14 -0700819static int tg3_bmcr_reset(struct tg3 *tp)
820{
821 u32 phy_control;
822 int limit, err;
823
824 /* OK, reset it, and poll the BMCR_RESET bit until it
825 * clears or we time out.
826 */
827 phy_control = BMCR_RESET;
828 err = tg3_writephy(tp, MII_BMCR, phy_control);
829 if (err != 0)
830 return -EBUSY;
831
832 limit = 5000;
833 while (limit--) {
834 err = tg3_readphy(tp, MII_BMCR, &phy_control);
835 if (err != 0)
836 return -EBUSY;
837
838 if ((phy_control & BMCR_RESET) == 0) {
839 udelay(40);
840 break;
841 }
842 udelay(10);
843 }
844 if (limit <= 0)
845 return -EBUSY;
846
847 return 0;
848}
849
Matt Carlson158d7ab2008-05-29 01:37:54 -0700850static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
851{
852 struct tg3 *tp = (struct tg3 *)bp->priv;
853 u32 val;
854
855 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
856 return -EAGAIN;
857
858 if (tg3_readphy(tp, reg, &val))
859 return -EIO;
860
861 return val;
862}
863
864static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
865{
866 struct tg3 *tp = (struct tg3 *)bp->priv;
867
868 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
869 return -EAGAIN;
870
871 if (tg3_writephy(tp, reg, val))
872 return -EIO;
873
874 return 0;
875}
876
877static int tg3_mdio_reset(struct mii_bus *bp)
878{
879 return 0;
880}
881
Matt Carlson9c61d6b2008-11-03 16:54:56 -0800882static void tg3_mdio_config_5785(struct tg3 *tp)
Matt Carlsona9daf362008-05-25 23:49:44 -0700883{
884 u32 val;
Matt Carlsonfcb389d2008-11-03 16:55:44 -0800885 struct phy_device *phydev;
Matt Carlsona9daf362008-05-25 23:49:44 -0700886
Matt Carlsonfcb389d2008-11-03 16:55:44 -0800887 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
888 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
889 case TG3_PHY_ID_BCM50610:
890 val = MAC_PHYCFG2_50610_LED_MODES;
891 break;
892 case TG3_PHY_ID_BCMAC131:
893 val = MAC_PHYCFG2_AC131_LED_MODES;
894 break;
895 case TG3_PHY_ID_RTL8211C:
896 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
897 break;
898 case TG3_PHY_ID_RTL8201E:
899 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
900 break;
901 default:
Matt Carlsona9daf362008-05-25 23:49:44 -0700902 return;
Matt Carlsonfcb389d2008-11-03 16:55:44 -0800903 }
904
905 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
906 tw32(MAC_PHYCFG2, val);
907
908 val = tr32(MAC_PHYCFG1);
909 val &= ~MAC_PHYCFG1_RGMII_INT;
910 tw32(MAC_PHYCFG1, val);
911
912 return;
913 }
914
915 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
916 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
917 MAC_PHYCFG2_FMODE_MASK_MASK |
918 MAC_PHYCFG2_GMODE_MASK_MASK |
919 MAC_PHYCFG2_ACT_MASK_MASK |
920 MAC_PHYCFG2_QUAL_MASK_MASK |
921 MAC_PHYCFG2_INBAND_ENABLE;
922
923 tw32(MAC_PHYCFG2, val);
Matt Carlsona9daf362008-05-25 23:49:44 -0700924
925 val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
926 MAC_PHYCFG1_RGMII_SND_STAT_EN);
927 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
928 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
929 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
930 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
931 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
932 }
933 tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
934
Matt Carlsona9daf362008-05-25 23:49:44 -0700935 val = tr32(MAC_EXT_RGMII_MODE);
936 val &= ~(MAC_RGMII_MODE_RX_INT_B |
937 MAC_RGMII_MODE_RX_QUALITY |
938 MAC_RGMII_MODE_RX_ACTIVITY |
939 MAC_RGMII_MODE_RX_ENG_DET |
940 MAC_RGMII_MODE_TX_ENABLE |
941 MAC_RGMII_MODE_TX_LOWPWR |
942 MAC_RGMII_MODE_TX_RESET);
Matt Carlsonfcb389d2008-11-03 16:55:44 -0800943 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
Matt Carlsona9daf362008-05-25 23:49:44 -0700944 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
945 val |= MAC_RGMII_MODE_RX_INT_B |
946 MAC_RGMII_MODE_RX_QUALITY |
947 MAC_RGMII_MODE_RX_ACTIVITY |
948 MAC_RGMII_MODE_RX_ENG_DET;
949 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
950 val |= MAC_RGMII_MODE_TX_ENABLE |
951 MAC_RGMII_MODE_TX_LOWPWR |
952 MAC_RGMII_MODE_TX_RESET;
953 }
954 tw32(MAC_EXT_RGMII_MODE, val);
955}
956
Matt Carlson158d7ab2008-05-29 01:37:54 -0700957static void tg3_mdio_start(struct tg3 *tp)
958{
959 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700960 mutex_lock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700961 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700962 mutex_unlock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700963 }
964
965 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
966 tw32_f(MAC_MI_MODE, tp->mi_mode);
967 udelay(80);
Matt Carlsona9daf362008-05-25 23:49:44 -0700968
Matt Carlson9c61d6b2008-11-03 16:54:56 -0800969 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
970 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
971 tg3_mdio_config_5785(tp);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700972}
973
974static void tg3_mdio_stop(struct tg3 *tp)
975{
976 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700977 mutex_lock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700978 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700979 mutex_unlock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700980 }
981}
982
983static int tg3_mdio_init(struct tg3 *tp)
984{
985 int i;
986 u32 reg;
Matt Carlsona9daf362008-05-25 23:49:44 -0700987 struct phy_device *phydev;
Matt Carlson158d7ab2008-05-29 01:37:54 -0700988
989 tg3_mdio_start(tp);
990
991 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
992 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
993 return 0;
994
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700995 tp->mdio_bus = mdiobus_alloc();
996 if (tp->mdio_bus == NULL)
997 return -ENOMEM;
Matt Carlson158d7ab2008-05-29 01:37:54 -0700998
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700999 tp->mdio_bus->name = "tg3 mdio bus";
1000 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
Matt Carlson158d7ab2008-05-29 01:37:54 -07001001 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001002 tp->mdio_bus->priv = tp;
1003 tp->mdio_bus->parent = &tp->pdev->dev;
1004 tp->mdio_bus->read = &tg3_mdio_read;
1005 tp->mdio_bus->write = &tg3_mdio_write;
1006 tp->mdio_bus->reset = &tg3_mdio_reset;
1007 tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
1008 tp->mdio_bus->irq = &tp->mdio_irq[0];
Matt Carlson158d7ab2008-05-29 01:37:54 -07001009
1010 for (i = 0; i < PHY_MAX_ADDR; i++)
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001011 tp->mdio_bus->irq[i] = PHY_POLL;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001012
1013 /* The bus registration will look for all the PHYs on the mdio bus.
1014 * Unfortunately, it does not ensure the PHY is powered up before
1015 * accessing the PHY ID registers. A chip reset is the
1016 * quickest way to bring the device back to an operational state..
1017 */
1018 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1019 tg3_bmcr_reset(tp);
1020
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001021 i = mdiobus_register(tp->mdio_bus);
Matt Carlsona9daf362008-05-25 23:49:44 -07001022 if (i) {
Matt Carlson158d7ab2008-05-29 01:37:54 -07001023 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1024 tp->dev->name, i);
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001025 mdiobus_free(tp->mdio_bus);
Matt Carlsona9daf362008-05-25 23:49:44 -07001026 return i;
1027 }
Matt Carlson158d7ab2008-05-29 01:37:54 -07001028
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001029 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsona9daf362008-05-25 23:49:44 -07001030
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001031 if (!phydev || !phydev->drv) {
1032 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1033 mdiobus_unregister(tp->mdio_bus);
1034 mdiobus_free(tp->mdio_bus);
1035 return -ENODEV;
1036 }
1037
1038 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
Matt Carlsona9daf362008-05-25 23:49:44 -07001039 case TG3_PHY_ID_BCM50610:
Matt Carlsona9daf362008-05-25 23:49:44 -07001040 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1041 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1042 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1043 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1044 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1045 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001046 /* fallthru */
1047 case TG3_PHY_ID_RTL8211C:
1048 phydev->interface = PHY_INTERFACE_MODE_RGMII;
Matt Carlsona9daf362008-05-25 23:49:44 -07001049 break;
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001050 case TG3_PHY_ID_RTL8201E:
Matt Carlsona9daf362008-05-25 23:49:44 -07001051 case TG3_PHY_ID_BCMAC131:
1052 phydev->interface = PHY_INTERFACE_MODE_MII;
1053 break;
1054 }
1055
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001056 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1057
1058 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1059 tg3_mdio_config_5785(tp);
Matt Carlsona9daf362008-05-25 23:49:44 -07001060
1061 return 0;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001062}
1063
1064static void tg3_mdio_fini(struct tg3 *tp)
1065{
1066 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1067 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001068 mdiobus_unregister(tp->mdio_bus);
1069 mdiobus_free(tp->mdio_bus);
Matt Carlson158d7ab2008-05-29 01:37:54 -07001070 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1071 }
1072}
1073
Matt Carlson95e28692008-05-25 23:44:14 -07001074/* tp->lock is held. */
Matt Carlson4ba526c2008-08-15 14:10:04 -07001075static inline void tg3_generate_fw_event(struct tg3 *tp)
1076{
1077 u32 val;
1078
1079 val = tr32(GRC_RX_CPU_EVENT);
1080 val |= GRC_RX_CPU_DRIVER_EVENT;
1081 tw32_f(GRC_RX_CPU_EVENT, val);
1082
1083 tp->last_event_jiffies = jiffies;
1084}
1085
1086#define TG3_FW_EVENT_TIMEOUT_USEC 2500
1087
1088/* tp->lock is held. */
Matt Carlson95e28692008-05-25 23:44:14 -07001089static void tg3_wait_for_event_ack(struct tg3 *tp)
1090{
1091 int i;
Matt Carlson4ba526c2008-08-15 14:10:04 -07001092 unsigned int delay_cnt;
1093 long time_remain;
Matt Carlson95e28692008-05-25 23:44:14 -07001094
Matt Carlson4ba526c2008-08-15 14:10:04 -07001095 /* If enough time has passed, no wait is necessary. */
1096 time_remain = (long)(tp->last_event_jiffies + 1 +
1097 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1098 (long)jiffies;
1099 if (time_remain < 0)
1100 return;
1101
1102 /* Check if we can shorten the wait time. */
1103 delay_cnt = jiffies_to_usecs(time_remain);
1104 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1105 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1106 delay_cnt = (delay_cnt >> 3) + 1;
1107
1108 for (i = 0; i < delay_cnt; i++) {
Matt Carlson95e28692008-05-25 23:44:14 -07001109 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1110 break;
Matt Carlson4ba526c2008-08-15 14:10:04 -07001111 udelay(8);
Matt Carlson95e28692008-05-25 23:44:14 -07001112 }
1113}
1114
1115/* tp->lock is held. */
1116static void tg3_ump_link_report(struct tg3 *tp)
1117{
1118 u32 reg;
1119 u32 val;
1120
1121 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1122 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1123 return;
1124
1125 tg3_wait_for_event_ack(tp);
1126
1127 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1128
1129 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1130
1131 val = 0;
1132 if (!tg3_readphy(tp, MII_BMCR, &reg))
1133 val = reg << 16;
1134 if (!tg3_readphy(tp, MII_BMSR, &reg))
1135 val |= (reg & 0xffff);
1136 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1137
1138 val = 0;
1139 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1140 val = reg << 16;
1141 if (!tg3_readphy(tp, MII_LPA, &reg))
1142 val |= (reg & 0xffff);
1143 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1144
1145 val = 0;
1146 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1147 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1148 val = reg << 16;
1149 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1150 val |= (reg & 0xffff);
1151 }
1152 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1153
1154 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1155 val = reg << 16;
1156 else
1157 val = 0;
1158 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1159
Matt Carlson4ba526c2008-08-15 14:10:04 -07001160 tg3_generate_fw_event(tp);
Matt Carlson95e28692008-05-25 23:44:14 -07001161}
1162
1163static void tg3_link_report(struct tg3 *tp)
1164{
1165 if (!netif_carrier_ok(tp->dev)) {
1166 if (netif_msg_link(tp))
1167 printk(KERN_INFO PFX "%s: Link is down.\n",
1168 tp->dev->name);
1169 tg3_ump_link_report(tp);
1170 } else if (netif_msg_link(tp)) {
1171 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1172 tp->dev->name,
1173 (tp->link_config.active_speed == SPEED_1000 ?
1174 1000 :
1175 (tp->link_config.active_speed == SPEED_100 ?
1176 100 : 10)),
1177 (tp->link_config.active_duplex == DUPLEX_FULL ?
1178 "full" : "half"));
1179
1180 printk(KERN_INFO PFX
1181 "%s: Flow control is %s for TX and %s for RX.\n",
1182 tp->dev->name,
1183 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1184 "on" : "off",
1185 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1186 "on" : "off");
1187 tg3_ump_link_report(tp);
1188 }
1189}
1190
1191static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1192{
1193 u16 miireg;
1194
1195 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1196 miireg = ADVERTISE_PAUSE_CAP;
1197 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1198 miireg = ADVERTISE_PAUSE_ASYM;
1199 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1200 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1201 else
1202 miireg = 0;
1203
1204 return miireg;
1205}
1206
1207static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1208{
1209 u16 miireg;
1210
1211 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1212 miireg = ADVERTISE_1000XPAUSE;
1213 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1214 miireg = ADVERTISE_1000XPSE_ASYM;
1215 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1216 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1217 else
1218 miireg = 0;
1219
1220 return miireg;
1221}
1222
1223static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1224{
1225 u8 cap = 0;
1226
1227 if (lcladv & ADVERTISE_PAUSE_CAP) {
1228 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1229 if (rmtadv & LPA_PAUSE_CAP)
1230 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1231 else if (rmtadv & LPA_PAUSE_ASYM)
1232 cap = TG3_FLOW_CTRL_RX;
1233 } else {
1234 if (rmtadv & LPA_PAUSE_CAP)
1235 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1236 }
1237 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1238 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1239 cap = TG3_FLOW_CTRL_TX;
1240 }
1241
1242 return cap;
1243}
1244
1245static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1246{
1247 u8 cap = 0;
1248
1249 if (lcladv & ADVERTISE_1000XPAUSE) {
1250 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1251 if (rmtadv & LPA_1000XPAUSE)
1252 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1253 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1254 cap = TG3_FLOW_CTRL_RX;
1255 } else {
1256 if (rmtadv & LPA_1000XPAUSE)
1257 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1258 }
1259 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1260 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1261 cap = TG3_FLOW_CTRL_TX;
1262 }
1263
1264 return cap;
1265}
1266
Matt Carlsonf51f3562008-05-25 23:45:08 -07001267static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
Matt Carlson95e28692008-05-25 23:44:14 -07001268{
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001269 u8 autoneg;
Matt Carlsonf51f3562008-05-25 23:45:08 -07001270 u8 flowctrl = 0;
Matt Carlson95e28692008-05-25 23:44:14 -07001271 u32 old_rx_mode = tp->rx_mode;
1272 u32 old_tx_mode = tp->tx_mode;
1273
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001274 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001275 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001276 else
1277 autoneg = tp->link_config.autoneg;
1278
1279 if (autoneg == AUTONEG_ENABLE &&
Matt Carlson95e28692008-05-25 23:44:14 -07001280 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1281 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Matt Carlsonf51f3562008-05-25 23:45:08 -07001282 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
Matt Carlson95e28692008-05-25 23:44:14 -07001283 else
Matt Carlsonf51f3562008-05-25 23:45:08 -07001284 flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1285 } else
1286 flowctrl = tp->link_config.flowctrl;
Matt Carlson95e28692008-05-25 23:44:14 -07001287
Matt Carlsonf51f3562008-05-25 23:45:08 -07001288 tp->link_config.active_flowctrl = flowctrl;
Matt Carlson95e28692008-05-25 23:44:14 -07001289
Matt Carlsonf51f3562008-05-25 23:45:08 -07001290 if (flowctrl & TG3_FLOW_CTRL_RX)
Matt Carlson95e28692008-05-25 23:44:14 -07001291 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1292 else
1293 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1294
Matt Carlsonf51f3562008-05-25 23:45:08 -07001295 if (old_rx_mode != tp->rx_mode)
Matt Carlson95e28692008-05-25 23:44:14 -07001296 tw32_f(MAC_RX_MODE, tp->rx_mode);
Matt Carlson95e28692008-05-25 23:44:14 -07001297
Matt Carlsonf51f3562008-05-25 23:45:08 -07001298 if (flowctrl & TG3_FLOW_CTRL_TX)
Matt Carlson95e28692008-05-25 23:44:14 -07001299 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1300 else
1301 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1302
Matt Carlsonf51f3562008-05-25 23:45:08 -07001303 if (old_tx_mode != tp->tx_mode)
Matt Carlson95e28692008-05-25 23:44:14 -07001304 tw32_f(MAC_TX_MODE, tp->tx_mode);
Matt Carlson95e28692008-05-25 23:44:14 -07001305}
1306
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001307static void tg3_adjust_link(struct net_device *dev)
1308{
1309 u8 oldflowctrl, linkmesg = 0;
1310 u32 mac_mode, lcl_adv, rmt_adv;
1311 struct tg3 *tp = netdev_priv(dev);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001312 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001313
1314 spin_lock(&tp->lock);
1315
1316 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1317 MAC_MODE_HALF_DUPLEX);
1318
1319 oldflowctrl = tp->link_config.active_flowctrl;
1320
1321 if (phydev->link) {
1322 lcl_adv = 0;
1323 rmt_adv = 0;
1324
1325 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1326 mac_mode |= MAC_MODE_PORT_MODE_MII;
1327 else
1328 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1329
1330 if (phydev->duplex == DUPLEX_HALF)
1331 mac_mode |= MAC_MODE_HALF_DUPLEX;
1332 else {
1333 lcl_adv = tg3_advert_flowctrl_1000T(
1334 tp->link_config.flowctrl);
1335
1336 if (phydev->pause)
1337 rmt_adv = LPA_PAUSE_CAP;
1338 if (phydev->asym_pause)
1339 rmt_adv |= LPA_PAUSE_ASYM;
1340 }
1341
1342 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1343 } else
1344 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1345
1346 if (mac_mode != tp->mac_mode) {
1347 tp->mac_mode = mac_mode;
1348 tw32_f(MAC_MODE, tp->mac_mode);
1349 udelay(40);
1350 }
1351
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001352 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1353 if (phydev->speed == SPEED_10)
1354 tw32(MAC_MI_STAT,
1355 MAC_MI_STAT_10MBPS_MODE |
1356 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1357 else
1358 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1359 }
1360
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001361 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1362 tw32(MAC_TX_LENGTHS,
1363 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1364 (6 << TX_LENGTHS_IPG_SHIFT) |
1365 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1366 else
1367 tw32(MAC_TX_LENGTHS,
1368 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1369 (6 << TX_LENGTHS_IPG_SHIFT) |
1370 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1371
1372 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1373 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1374 phydev->speed != tp->link_config.active_speed ||
1375 phydev->duplex != tp->link_config.active_duplex ||
1376 oldflowctrl != tp->link_config.active_flowctrl)
1377 linkmesg = 1;
1378
1379 tp->link_config.active_speed = phydev->speed;
1380 tp->link_config.active_duplex = phydev->duplex;
1381
1382 spin_unlock(&tp->lock);
1383
1384 if (linkmesg)
1385 tg3_link_report(tp);
1386}
1387
1388static int tg3_phy_init(struct tg3 *tp)
1389{
1390 struct phy_device *phydev;
1391
1392 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1393 return 0;
1394
1395 /* Bring the PHY back to a known state. */
1396 tg3_bmcr_reset(tp);
1397
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001398 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001399
1400 /* Attach the MAC to the PHY. */
Kay Sieversfb28ad32008-11-10 13:55:14 -08001401 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
Matt Carlsona9daf362008-05-25 23:49:44 -07001402 phydev->dev_flags, phydev->interface);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001403 if (IS_ERR(phydev)) {
1404 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1405 return PTR_ERR(phydev);
1406 }
1407
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001408 /* Mask with MAC supported features. */
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001409 switch (phydev->interface) {
1410 case PHY_INTERFACE_MODE_GMII:
1411 case PHY_INTERFACE_MODE_RGMII:
1412 phydev->supported &= (PHY_GBIT_FEATURES |
1413 SUPPORTED_Pause |
1414 SUPPORTED_Asym_Pause);
1415 break;
1416 case PHY_INTERFACE_MODE_MII:
1417 phydev->supported &= (PHY_BASIC_FEATURES |
1418 SUPPORTED_Pause |
1419 SUPPORTED_Asym_Pause);
1420 break;
1421 default:
1422 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1423 return -EINVAL;
1424 }
1425
1426 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001427
1428 phydev->advertising = phydev->supported;
1429
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001430 return 0;
1431}
1432
1433static void tg3_phy_start(struct tg3 *tp)
1434{
1435 struct phy_device *phydev;
1436
1437 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1438 return;
1439
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001440 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001441
1442 if (tp->link_config.phy_is_low_power) {
1443 tp->link_config.phy_is_low_power = 0;
1444 phydev->speed = tp->link_config.orig_speed;
1445 phydev->duplex = tp->link_config.orig_duplex;
1446 phydev->autoneg = tp->link_config.orig_autoneg;
1447 phydev->advertising = tp->link_config.orig_advertising;
1448 }
1449
1450 phy_start(phydev);
1451
1452 phy_start_aneg(phydev);
1453}
1454
1455static void tg3_phy_stop(struct tg3 *tp)
1456{
1457 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1458 return;
1459
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001460 phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001461}
1462
1463static void tg3_phy_fini(struct tg3 *tp)
1464{
1465 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001466 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001467 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1468 }
1469}
1470
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001471static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1472{
1473 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1474 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1475}
1476
Matt Carlson6833c042008-11-21 17:18:59 -08001477static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1478{
1479 u32 reg;
1480
1481 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
1482 return;
1483
1484 reg = MII_TG3_MISC_SHDW_WREN |
1485 MII_TG3_MISC_SHDW_SCR5_SEL |
1486 MII_TG3_MISC_SHDW_SCR5_LPED |
1487 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1488 MII_TG3_MISC_SHDW_SCR5_SDTL |
1489 MII_TG3_MISC_SHDW_SCR5_C125OE;
1490 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1491 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1492
1493 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1494
1495
1496 reg = MII_TG3_MISC_SHDW_WREN |
1497 MII_TG3_MISC_SHDW_APD_SEL |
1498 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1499 if (enable)
1500 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1501
1502 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1503}
1504
Matt Carlson9ef8ca92007-07-11 19:48:29 -07001505static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1506{
1507 u32 phy;
1508
1509 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1510 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1511 return;
1512
1513 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1514 u32 ephy;
1515
1516 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1517 tg3_writephy(tp, MII_TG3_EPHY_TEST,
1518 ephy | MII_TG3_EPHY_SHADOW_EN);
1519 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1520 if (enable)
1521 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1522 else
1523 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1524 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1525 }
1526 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1527 }
1528 } else {
1529 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1530 MII_TG3_AUXCTL_SHDWSEL_MISC;
1531 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1532 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1533 if (enable)
1534 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1535 else
1536 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1537 phy |= MII_TG3_AUXCTL_MISC_WREN;
1538 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1539 }
1540 }
1541}
1542
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543static void tg3_phy_set_wirespeed(struct tg3 *tp)
1544{
1545 u32 val;
1546
1547 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1548 return;
1549
1550 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1551 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1552 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1553 (val | (1 << 15) | (1 << 4)));
1554}
1555
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001556static void tg3_phy_apply_otp(struct tg3 *tp)
1557{
1558 u32 otp, phy;
1559
1560 if (!tp->phy_otp)
1561 return;
1562
1563 otp = tp->phy_otp;
1564
1565 /* Enable SM_DSP clock and tx 6dB coding. */
1566 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1567 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1568 MII_TG3_AUXCTL_ACTL_TX_6DB;
1569 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1570
1571 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1572 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1573 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1574
1575 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1576 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1577 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1578
1579 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1580 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1581 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1582
1583 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1584 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1585
1586 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1587 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1588
1589 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1590 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1591 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1592
1593 /* Turn off SM_DSP clock. */
1594 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1595 MII_TG3_AUXCTL_ACTL_TX_6DB;
1596 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1597}
1598
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599static int tg3_wait_macro_done(struct tg3 *tp)
1600{
1601 int limit = 100;
1602
1603 while (limit--) {
1604 u32 tmp32;
1605
1606 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1607 if ((tmp32 & 0x1000) == 0)
1608 break;
1609 }
1610 }
1611 if (limit <= 0)
1612 return -EBUSY;
1613
1614 return 0;
1615}
1616
1617static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1618{
1619 static const u32 test_pat[4][6] = {
1620 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1621 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1622 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1623 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1624 };
1625 int chan;
1626
1627 for (chan = 0; chan < 4; chan++) {
1628 int i;
1629
1630 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1631 (chan * 0x2000) | 0x0200);
1632 tg3_writephy(tp, 0x16, 0x0002);
1633
1634 for (i = 0; i < 6; i++)
1635 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1636 test_pat[chan][i]);
1637
1638 tg3_writephy(tp, 0x16, 0x0202);
1639 if (tg3_wait_macro_done(tp)) {
1640 *resetp = 1;
1641 return -EBUSY;
1642 }
1643
1644 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1645 (chan * 0x2000) | 0x0200);
1646 tg3_writephy(tp, 0x16, 0x0082);
1647 if (tg3_wait_macro_done(tp)) {
1648 *resetp = 1;
1649 return -EBUSY;
1650 }
1651
1652 tg3_writephy(tp, 0x16, 0x0802);
1653 if (tg3_wait_macro_done(tp)) {
1654 *resetp = 1;
1655 return -EBUSY;
1656 }
1657
1658 for (i = 0; i < 6; i += 2) {
1659 u32 low, high;
1660
1661 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1662 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1663 tg3_wait_macro_done(tp)) {
1664 *resetp = 1;
1665 return -EBUSY;
1666 }
1667 low &= 0x7fff;
1668 high &= 0x000f;
1669 if (low != test_pat[chan][i] ||
1670 high != test_pat[chan][i+1]) {
1671 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1672 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1673 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1674
1675 return -EBUSY;
1676 }
1677 }
1678 }
1679
1680 return 0;
1681}
1682
1683static int tg3_phy_reset_chanpat(struct tg3 *tp)
1684{
1685 int chan;
1686
1687 for (chan = 0; chan < 4; chan++) {
1688 int i;
1689
1690 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1691 (chan * 0x2000) | 0x0200);
1692 tg3_writephy(tp, 0x16, 0x0002);
1693 for (i = 0; i < 6; i++)
1694 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1695 tg3_writephy(tp, 0x16, 0x0202);
1696 if (tg3_wait_macro_done(tp))
1697 return -EBUSY;
1698 }
1699
1700 return 0;
1701}
1702
1703static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1704{
1705 u32 reg32, phy9_orig;
1706 int retries, do_phy_reset, err;
1707
1708 retries = 10;
1709 do_phy_reset = 1;
1710 do {
1711 if (do_phy_reset) {
1712 err = tg3_bmcr_reset(tp);
1713 if (err)
1714 return err;
1715 do_phy_reset = 0;
1716 }
1717
1718 /* Disable transmitter and interrupt. */
1719 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1720 continue;
1721
1722 reg32 |= 0x3000;
1723 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1724
1725 /* Set full-duplex, 1000 mbps. */
1726 tg3_writephy(tp, MII_BMCR,
1727 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1728
1729 /* Set to master mode. */
1730 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1731 continue;
1732
1733 tg3_writephy(tp, MII_TG3_CTRL,
1734 (MII_TG3_CTRL_AS_MASTER |
1735 MII_TG3_CTRL_ENABLE_AS_MASTER));
1736
1737 /* Enable SM_DSP_CLOCK and 6dB. */
1738 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1739
1740 /* Block the PHY control access. */
1741 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1742 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1743
1744 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1745 if (!err)
1746 break;
1747 } while (--retries);
1748
1749 err = tg3_phy_reset_chanpat(tp);
1750 if (err)
1751 return err;
1752
1753 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1754 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1755
1756 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1757 tg3_writephy(tp, 0x16, 0x0000);
1758
1759 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1760 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1761 /* Set Extended packet length bit for jumbo frames */
1762 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1763 }
1764 else {
1765 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1766 }
1767
1768 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1769
1770 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1771 reg32 &= ~0x3000;
1772 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1773 } else if (!err)
1774 err = -EBUSY;
1775
1776 return err;
1777}
1778
1779/* This will reset the tigon3 PHY if there is no valid
1780 * link unless the FORCE argument is non-zero.
1781 */
1782static int tg3_phy_reset(struct tg3 *tp)
1783{
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001784 u32 cpmuctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785 u32 phy_status;
1786 int err;
1787
Michael Chan60189dd2006-12-17 17:08:07 -08001788 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1789 u32 val;
1790
1791 val = tr32(GRC_MISC_CFG);
1792 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1793 udelay(40);
1794 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1796 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1797 if (err != 0)
1798 return -EBUSY;
1799
Michael Chanc8e1e822006-04-29 18:55:17 -07001800 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1801 netif_carrier_off(tp->dev);
1802 tg3_link_report(tp);
1803 }
1804
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1806 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1807 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1808 err = tg3_phy_reset_5703_4_5(tp);
1809 if (err)
1810 return err;
1811 goto out;
1812 }
1813
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001814 cpmuctrl = 0;
1815 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1816 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1817 cpmuctrl = tr32(TG3_CPMU_CTRL);
1818 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1819 tw32(TG3_CPMU_CTRL,
1820 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1821 }
1822
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 err = tg3_bmcr_reset(tp);
1824 if (err)
1825 return err;
1826
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001827 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1828 u32 phy;
1829
1830 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1831 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1832
1833 tw32(TG3_CPMU_CTRL, cpmuctrl);
1834 }
1835
Matt Carlsonbcb37f62008-11-03 16:52:09 -08001836 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1837 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
Matt Carlsonce057f02007-11-12 21:08:03 -08001838 u32 val;
1839
1840 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1841 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1842 CPMU_LSPD_1000MB_MACCLK_12_5) {
1843 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1844 udelay(40);
1845 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1846 }
1847 }
1848
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001849 tg3_phy_apply_otp(tp);
1850
Matt Carlson6833c042008-11-21 17:18:59 -08001851 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1852 tg3_phy_toggle_apd(tp, true);
1853 else
1854 tg3_phy_toggle_apd(tp, false);
1855
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856out:
1857 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1858 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1859 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1860 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1861 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1862 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1863 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1864 }
1865 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1866 tg3_writephy(tp, 0x1c, 0x8d68);
1867 tg3_writephy(tp, 0x1c, 0x8d68);
1868 }
1869 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1870 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1871 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1872 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1873 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1874 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1875 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1876 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1877 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1878 }
Michael Chanc424cb22006-04-29 18:56:34 -07001879 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1880 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1881 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
Michael Chanc1d2a192007-01-08 19:57:20 -08001882 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1883 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1884 tg3_writephy(tp, MII_TG3_TEST1,
1885 MII_TG3_TEST1_TRIM_EN | 0x4);
1886 } else
1887 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
Michael Chanc424cb22006-04-29 18:56:34 -07001888 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1889 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 /* Set Extended packet length bit (bit 14) on all chips that */
1891 /* support jumbo frames */
1892 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1893 /* Cannot do read-modify-write on 5401 */
1894 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
Michael Chan0f893dc2005-07-25 12:30:38 -07001895 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 u32 phy_reg;
1897
1898 /* Set bit 14 with read-modify-write to preserve other bits */
1899 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1900 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1901 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1902 }
1903
1904 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1905 * jumbo frames transmission.
1906 */
Michael Chan0f893dc2005-07-25 12:30:38 -07001907 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 u32 phy_reg;
1909
1910 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1911 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1912 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1913 }
1914
Michael Chan715116a2006-09-27 16:09:25 -07001915 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan715116a2006-09-27 16:09:25 -07001916 /* adjust output voltage */
1917 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
Michael Chan715116a2006-09-27 16:09:25 -07001918 }
1919
Matt Carlson9ef8ca92007-07-11 19:48:29 -07001920 tg3_phy_toggle_automdix(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 tg3_phy_set_wirespeed(tp);
1922 return 0;
1923}
1924
1925static void tg3_frob_aux_power(struct tg3 *tp)
1926{
1927 struct tg3 *tp_peer = tp;
1928
Michael Chan9d26e212006-12-07 00:21:14 -08001929 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930 return;
1931
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001932 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1933 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1934 struct net_device *dev_peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001936 dev_peer = pci_get_drvdata(tp->pdev_peer);
Michael Chanbc1c7562006-03-20 17:48:03 -08001937 /* remove_one() may have been run on the peer. */
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001938 if (!dev_peer)
Michael Chanbc1c7562006-03-20 17:48:03 -08001939 tp_peer = tp;
1940 else
1941 tp_peer = netdev_priv(dev_peer);
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001942 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943
1944 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
Michael Chan6921d202005-12-13 21:15:53 -08001945 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1946 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1947 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1949 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
Michael Chanb401e9e2005-12-19 16:27:04 -08001950 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1951 (GRC_LCLCTRL_GPIO_OE0 |
1952 GRC_LCLCTRL_GPIO_OE1 |
1953 GRC_LCLCTRL_GPIO_OE2 |
1954 GRC_LCLCTRL_GPIO_OUTPUT0 |
1955 GRC_LCLCTRL_GPIO_OUTPUT1),
1956 100);
Matt Carlson5f0c4a32008-06-09 15:41:12 -07001957 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1958 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1959 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1960 GRC_LCLCTRL_GPIO_OE1 |
1961 GRC_LCLCTRL_GPIO_OE2 |
1962 GRC_LCLCTRL_GPIO_OUTPUT0 |
1963 GRC_LCLCTRL_GPIO_OUTPUT1 |
1964 tp->grc_local_ctrl;
1965 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1966
1967 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1968 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1969
1970 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1971 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 } else {
1973 u32 no_gpio2;
Michael Chandc56b7d2005-12-19 16:26:28 -08001974 u32 grc_local_ctrl = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975
1976 if (tp_peer != tp &&
1977 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1978 return;
1979
Michael Chandc56b7d2005-12-19 16:26:28 -08001980 /* Workaround to prevent overdrawing Amps. */
1981 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1982 ASIC_REV_5714) {
1983 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chanb401e9e2005-12-19 16:27:04 -08001984 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1985 grc_local_ctrl, 100);
Michael Chandc56b7d2005-12-19 16:26:28 -08001986 }
1987
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988 /* On 5753 and variants, GPIO2 cannot be used. */
1989 no_gpio2 = tp->nic_sram_data_cfg &
1990 NIC_SRAM_DATA_CFG_NO_GPIO2;
1991
Michael Chandc56b7d2005-12-19 16:26:28 -08001992 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 GRC_LCLCTRL_GPIO_OE1 |
1994 GRC_LCLCTRL_GPIO_OE2 |
1995 GRC_LCLCTRL_GPIO_OUTPUT1 |
1996 GRC_LCLCTRL_GPIO_OUTPUT2;
1997 if (no_gpio2) {
1998 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1999 GRC_LCLCTRL_GPIO_OUTPUT2);
2000 }
Michael Chanb401e9e2005-12-19 16:27:04 -08002001 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2002 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003
2004 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2005
Michael Chanb401e9e2005-12-19 16:27:04 -08002006 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2007 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008
2009 if (!no_gpio2) {
2010 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chanb401e9e2005-12-19 16:27:04 -08002011 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2012 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013 }
2014 }
2015 } else {
2016 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2017 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2018 if (tp_peer != tp &&
2019 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2020 return;
2021
Michael Chanb401e9e2005-12-19 16:27:04 -08002022 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2023 (GRC_LCLCTRL_GPIO_OE1 |
2024 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025
Michael Chanb401e9e2005-12-19 16:27:04 -08002026 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2027 GRC_LCLCTRL_GPIO_OE1, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028
Michael Chanb401e9e2005-12-19 16:27:04 -08002029 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2030 (GRC_LCLCTRL_GPIO_OE1 |
2031 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032 }
2033 }
2034}
2035
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002036static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2037{
2038 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2039 return 1;
2040 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2041 if (speed != SPEED_10)
2042 return 1;
2043 } else if (speed == SPEED_10)
2044 return 1;
2045
2046 return 0;
2047}
2048
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049static int tg3_setup_phy(struct tg3 *, int);
2050
2051#define RESET_KIND_SHUTDOWN 0
2052#define RESET_KIND_INIT 1
2053#define RESET_KIND_SUSPEND 2
2054
2055static void tg3_write_sig_post_reset(struct tg3 *, int);
2056static int tg3_halt_cpu(struct tg3 *, u32);
Michael Chan6921d202005-12-13 21:15:53 -08002057static int tg3_nvram_lock(struct tg3 *);
2058static void tg3_nvram_unlock(struct tg3 *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059
Matt Carlson0a459aa2008-11-03 16:54:15 -08002060static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
Michael Chan15c3b692006-03-22 01:06:52 -08002061{
Matt Carlsonce057f02007-11-12 21:08:03 -08002062 u32 val;
2063
Michael Chan51297242007-02-13 12:17:57 -08002064 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2065 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2066 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2067 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2068
2069 sg_dig_ctrl |=
2070 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2071 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2072 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2073 }
Michael Chan3f7045c2006-09-27 16:02:29 -07002074 return;
Michael Chan51297242007-02-13 12:17:57 -08002075 }
Michael Chan3f7045c2006-09-27 16:02:29 -07002076
Michael Chan60189dd2006-12-17 17:08:07 -08002077 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan60189dd2006-12-17 17:08:07 -08002078 tg3_bmcr_reset(tp);
2079 val = tr32(GRC_MISC_CFG);
2080 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2081 udelay(40);
2082 return;
Matt Carlson0a459aa2008-11-03 16:54:15 -08002083 } else if (do_low_power) {
Michael Chan715116a2006-09-27 16:09:25 -07002084 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2085 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
Matt Carlson0a459aa2008-11-03 16:54:15 -08002086
2087 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2088 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2089 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2090 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2091 MII_TG3_AUXCTL_PCTL_VREG_11V);
Michael Chan715116a2006-09-27 16:09:25 -07002092 }
Michael Chan3f7045c2006-09-27 16:02:29 -07002093
Michael Chan15c3b692006-03-22 01:06:52 -08002094 /* The PHY should not be powered down on some chips because
2095 * of bugs.
2096 */
2097 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2098 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2099 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2100 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2101 return;
Matt Carlsonce057f02007-11-12 21:08:03 -08002102
Matt Carlsonbcb37f62008-11-03 16:52:09 -08002103 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2104 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
Matt Carlsonce057f02007-11-12 21:08:03 -08002105 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2106 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2107 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2108 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2109 }
2110
Michael Chan15c3b692006-03-22 01:06:52 -08002111 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2112}
2113
Matt Carlson3f007892008-11-03 16:51:36 -08002114/* tp->lock is held. */
2115static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2116{
2117 u32 addr_high, addr_low;
2118 int i;
2119
2120 addr_high = ((tp->dev->dev_addr[0] << 8) |
2121 tp->dev->dev_addr[1]);
2122 addr_low = ((tp->dev->dev_addr[2] << 24) |
2123 (tp->dev->dev_addr[3] << 16) |
2124 (tp->dev->dev_addr[4] << 8) |
2125 (tp->dev->dev_addr[5] << 0));
2126 for (i = 0; i < 4; i++) {
2127 if (i == 1 && skip_mac_1)
2128 continue;
2129 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2130 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2131 }
2132
2133 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2134 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2135 for (i = 0; i < 12; i++) {
2136 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2137 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2138 }
2139 }
2140
2141 addr_high = (tp->dev->dev_addr[0] +
2142 tp->dev->dev_addr[1] +
2143 tp->dev->dev_addr[2] +
2144 tp->dev->dev_addr[3] +
2145 tp->dev->dev_addr[4] +
2146 tp->dev->dev_addr[5]) &
2147 TX_BACKOFF_SEED_MASK;
2148 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2149}
2150
Michael Chanbc1c7562006-03-20 17:48:03 -08002151static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152{
2153 u32 misc_host_ctrl;
Matt Carlson0a459aa2008-11-03 16:54:15 -08002154 bool device_should_wake, do_low_power;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155
2156 /* Make sure register accesses (indirect or otherwise)
2157 * will function correctly.
2158 */
2159 pci_write_config_dword(tp->pdev,
2160 TG3PCI_MISC_HOST_CTRL,
2161 tp->misc_host_ctrl);
2162
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 switch (state) {
Michael Chanbc1c7562006-03-20 17:48:03 -08002164 case PCI_D0:
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002165 pci_enable_wake(tp->pdev, state, false);
2166 pci_set_power_state(tp->pdev, PCI_D0);
Michael Chan8c6bda12005-04-21 17:09:08 -07002167
Michael Chan9d26e212006-12-07 00:21:14 -08002168 /* Switch out of Vaux if it is a NIC */
2169 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
Michael Chanb401e9e2005-12-19 16:27:04 -08002170 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171
2172 return 0;
2173
Michael Chanbc1c7562006-03-20 17:48:03 -08002174 case PCI_D1:
Michael Chanbc1c7562006-03-20 17:48:03 -08002175 case PCI_D2:
Michael Chanbc1c7562006-03-20 17:48:03 -08002176 case PCI_D3hot:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177 break;
2178
2179 default:
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002180 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2181 tp->dev->name, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 return -EINVAL;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002183 }
Matt Carlson5e7dfd02008-11-21 17:18:16 -08002184
2185 /* Restore the CLKREQ setting. */
2186 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2187 u16 lnkctl;
2188
2189 pci_read_config_word(tp->pdev,
2190 tp->pcie_cap + PCI_EXP_LNKCTL,
2191 &lnkctl);
2192 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2193 pci_write_config_word(tp->pdev,
2194 tp->pcie_cap + PCI_EXP_LNKCTL,
2195 lnkctl);
2196 }
2197
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2199 tw32(TG3PCI_MISC_HOST_CTRL,
2200 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2201
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002202 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2203 device_may_wakeup(&tp->pdev->dev) &&
2204 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2205
Matt Carlsondd477002008-05-25 23:45:58 -07002206 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
Matt Carlson0a459aa2008-11-03 16:54:15 -08002207 do_low_power = false;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002208 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2209 !tp->link_config.phy_is_low_power) {
2210 struct phy_device *phydev;
Matt Carlson0a459aa2008-11-03 16:54:15 -08002211 u32 phyid, advertising;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002212
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07002213 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002214
2215 tp->link_config.phy_is_low_power = 1;
2216
2217 tp->link_config.orig_speed = phydev->speed;
2218 tp->link_config.orig_duplex = phydev->duplex;
2219 tp->link_config.orig_autoneg = phydev->autoneg;
2220 tp->link_config.orig_advertising = phydev->advertising;
2221
2222 advertising = ADVERTISED_TP |
2223 ADVERTISED_Pause |
2224 ADVERTISED_Autoneg |
2225 ADVERTISED_10baseT_Half;
2226
2227 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002228 device_should_wake) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002229 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2230 advertising |=
2231 ADVERTISED_100baseT_Half |
2232 ADVERTISED_100baseT_Full |
2233 ADVERTISED_10baseT_Full;
2234 else
2235 advertising |= ADVERTISED_10baseT_Full;
2236 }
2237
2238 phydev->advertising = advertising;
2239
2240 phy_start_aneg(phydev);
Matt Carlson0a459aa2008-11-03 16:54:15 -08002241
2242 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2243 if (phyid != TG3_PHY_ID_BCMAC131) {
2244 phyid &= TG3_PHY_OUI_MASK;
2245 if (phyid == TG3_PHY_OUI_1 &&
2246 phyid == TG3_PHY_OUI_2 &&
2247 phyid == TG3_PHY_OUI_3)
2248 do_low_power = true;
2249 }
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002250 }
Matt Carlsondd477002008-05-25 23:45:58 -07002251 } else {
Matt Carlson0a459aa2008-11-03 16:54:15 -08002252 do_low_power = false;
2253
Matt Carlsondd477002008-05-25 23:45:58 -07002254 if (tp->link_config.phy_is_low_power == 0) {
2255 tp->link_config.phy_is_low_power = 1;
2256 tp->link_config.orig_speed = tp->link_config.speed;
2257 tp->link_config.orig_duplex = tp->link_config.duplex;
2258 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2259 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260
Matt Carlsondd477002008-05-25 23:45:58 -07002261 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2262 tp->link_config.speed = SPEED_10;
2263 tp->link_config.duplex = DUPLEX_HALF;
2264 tp->link_config.autoneg = AUTONEG_ENABLE;
2265 tg3_setup_phy(tp, 0);
2266 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 }
2268
Matt Carlson3f007892008-11-03 16:51:36 -08002269 __tg3_set_mac_addr(tp, 0);
2270
Michael Chanb5d37722006-09-27 16:06:21 -07002271 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2272 u32 val;
2273
2274 val = tr32(GRC_VCPU_EXT_CTRL);
2275 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2276 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chan6921d202005-12-13 21:15:53 -08002277 int i;
2278 u32 val;
2279
2280 for (i = 0; i < 200; i++) {
2281 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2282 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2283 break;
2284 msleep(1);
2285 }
2286 }
Gary Zambranoa85feb82007-05-05 11:52:19 -07002287 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2288 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2289 WOL_DRV_STATE_SHUTDOWN |
2290 WOL_DRV_WOL |
2291 WOL_SET_MAGIC_PKT);
Michael Chan6921d202005-12-13 21:15:53 -08002292
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002293 if (device_should_wake) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294 u32 mac_mode;
2295
2296 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
Matt Carlson0a459aa2008-11-03 16:54:15 -08002297 if (do_low_power) {
Matt Carlsondd477002008-05-25 23:45:58 -07002298 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2299 udelay(40);
2300 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301
Michael Chan3f7045c2006-09-27 16:02:29 -07002302 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2303 mac_mode = MAC_MODE_PORT_MODE_GMII;
2304 else
2305 mac_mode = MAC_MODE_PORT_MODE_MII;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002307 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2308 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2309 ASIC_REV_5700) {
2310 u32 speed = (tp->tg3_flags &
2311 TG3_FLAG_WOL_SPEED_100MB) ?
2312 SPEED_100 : SPEED_10;
2313 if (tg3_5700_link_polarity(tp, speed))
2314 mac_mode |= MAC_MODE_LINK_POLARITY;
2315 else
2316 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2317 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 } else {
2319 mac_mode = MAC_MODE_PORT_MODE_TBI;
2320 }
2321
John W. Linvillecbf46852005-04-21 17:01:29 -07002322 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323 tw32(MAC_LED_CTRL, tp->led_ctrl);
2324
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002325 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2326 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2327 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2328 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2329 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2330 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331
Matt Carlson3bda1252008-08-15 14:08:22 -07002332 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2333 mac_mode |= tp->mac_mode &
2334 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2335 if (mac_mode & MAC_MODE_APE_TX_EN)
2336 mac_mode |= MAC_MODE_TDE_ENABLE;
2337 }
2338
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 tw32_f(MAC_MODE, mac_mode);
2340 udelay(100);
2341
2342 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2343 udelay(10);
2344 }
2345
2346 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2347 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2348 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2349 u32 base_val;
2350
2351 base_val = tp->pci_clock_ctrl;
2352 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2353 CLOCK_CTRL_TXCLK_DISABLE);
2354
Michael Chanb401e9e2005-12-19 16:27:04 -08002355 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2356 CLOCK_CTRL_PWRDOWN_PLL133, 40);
Michael Chand7b0a852007-02-13 12:17:38 -08002357 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
Matt Carlson795d01c2007-10-07 23:28:17 -07002358 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
Michael Chand7b0a852007-02-13 12:17:38 -08002359 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
Michael Chan4cf78e42005-07-25 12:29:19 -07002360 /* do nothing */
Michael Chan85e94ce2005-04-21 17:05:28 -07002361 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2363 u32 newbits1, newbits2;
2364
2365 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2366 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2367 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2368 CLOCK_CTRL_TXCLK_DISABLE |
2369 CLOCK_CTRL_ALTCLK);
2370 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2371 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2372 newbits1 = CLOCK_CTRL_625_CORE;
2373 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2374 } else {
2375 newbits1 = CLOCK_CTRL_ALTCLK;
2376 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2377 }
2378
Michael Chanb401e9e2005-12-19 16:27:04 -08002379 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2380 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381
Michael Chanb401e9e2005-12-19 16:27:04 -08002382 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2383 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384
2385 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2386 u32 newbits3;
2387
2388 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2389 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2390 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2391 CLOCK_CTRL_TXCLK_DISABLE |
2392 CLOCK_CTRL_44MHZ_CORE);
2393 } else {
2394 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2395 }
2396
Michael Chanb401e9e2005-12-19 16:27:04 -08002397 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2398 tp->pci_clock_ctrl | newbits3, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399 }
2400 }
2401
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002402 if (!(device_should_wake) &&
Matt Carlson22435842008-11-21 17:21:13 -08002403 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
Matt Carlson0a459aa2008-11-03 16:54:15 -08002404 tg3_power_down_phy(tp, do_low_power);
Michael Chan6921d202005-12-13 21:15:53 -08002405
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406 tg3_frob_aux_power(tp);
2407
2408 /* Workaround for unstable PLL clock */
2409 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2410 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2411 u32 val = tr32(0x7d00);
2412
2413 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2414 tw32(0x7d00, val);
Michael Chan6921d202005-12-13 21:15:53 -08002415 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chanec41c7d2006-01-17 02:40:55 -08002416 int err;
2417
2418 err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419 tg3_halt_cpu(tp, RX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -08002420 if (!err)
2421 tg3_nvram_unlock(tp);
Michael Chan6921d202005-12-13 21:15:53 -08002422 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423 }
2424
Michael Chanbbadf502006-04-06 21:46:34 -07002425 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2426
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002427 if (device_should_wake)
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002428 pci_enable_wake(tp->pdev, state, true);
2429
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430 /* Finally, set the new power state. */
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002431 pci_set_power_state(tp->pdev, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433 return 0;
2434}
2435
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2437{
2438 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2439 case MII_TG3_AUX_STAT_10HALF:
2440 *speed = SPEED_10;
2441 *duplex = DUPLEX_HALF;
2442 break;
2443
2444 case MII_TG3_AUX_STAT_10FULL:
2445 *speed = SPEED_10;
2446 *duplex = DUPLEX_FULL;
2447 break;
2448
2449 case MII_TG3_AUX_STAT_100HALF:
2450 *speed = SPEED_100;
2451 *duplex = DUPLEX_HALF;
2452 break;
2453
2454 case MII_TG3_AUX_STAT_100FULL:
2455 *speed = SPEED_100;
2456 *duplex = DUPLEX_FULL;
2457 break;
2458
2459 case MII_TG3_AUX_STAT_1000HALF:
2460 *speed = SPEED_1000;
2461 *duplex = DUPLEX_HALF;
2462 break;
2463
2464 case MII_TG3_AUX_STAT_1000FULL:
2465 *speed = SPEED_1000;
2466 *duplex = DUPLEX_FULL;
2467 break;
2468
2469 default:
Michael Chan715116a2006-09-27 16:09:25 -07002470 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2471 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2472 SPEED_10;
2473 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2474 DUPLEX_HALF;
2475 break;
2476 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477 *speed = SPEED_INVALID;
2478 *duplex = DUPLEX_INVALID;
2479 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002480 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481}
2482
2483static void tg3_phy_copper_begin(struct tg3 *tp)
2484{
2485 u32 new_adv;
2486 int i;
2487
2488 if (tp->link_config.phy_is_low_power) {
2489 /* Entering low power mode. Disable gigabit and
2490 * 100baseT advertisements.
2491 */
2492 tg3_writephy(tp, MII_TG3_CTRL, 0);
2493
2494 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2495 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2496 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2497 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2498
2499 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2500 } else if (tp->link_config.speed == SPEED_INVALID) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2502 tp->link_config.advertising &=
2503 ~(ADVERTISED_1000baseT_Half |
2504 ADVERTISED_1000baseT_Full);
2505
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002506 new_adv = ADVERTISE_CSMA;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2508 new_adv |= ADVERTISE_10HALF;
2509 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2510 new_adv |= ADVERTISE_10FULL;
2511 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2512 new_adv |= ADVERTISE_100HALF;
2513 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2514 new_adv |= ADVERTISE_100FULL;
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002515
2516 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2517
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2519
2520 if (tp->link_config.advertising &
2521 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2522 new_adv = 0;
2523 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2524 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2525 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2526 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2527 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2528 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2529 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2530 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2531 MII_TG3_CTRL_ENABLE_AS_MASTER);
2532 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2533 } else {
2534 tg3_writephy(tp, MII_TG3_CTRL, 0);
2535 }
2536 } else {
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002537 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2538 new_adv |= ADVERTISE_CSMA;
2539
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540 /* Asking for a specific link mode. */
2541 if (tp->link_config.speed == SPEED_1000) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2543
2544 if (tp->link_config.duplex == DUPLEX_FULL)
2545 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2546 else
2547 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2548 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2549 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2550 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2551 MII_TG3_CTRL_ENABLE_AS_MASTER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553 if (tp->link_config.speed == SPEED_100) {
2554 if (tp->link_config.duplex == DUPLEX_FULL)
2555 new_adv |= ADVERTISE_100FULL;
2556 else
2557 new_adv |= ADVERTISE_100HALF;
2558 } else {
2559 if (tp->link_config.duplex == DUPLEX_FULL)
2560 new_adv |= ADVERTISE_10FULL;
2561 else
2562 new_adv |= ADVERTISE_10HALF;
2563 }
2564 tg3_writephy(tp, MII_ADVERTISE, new_adv);
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002565
2566 new_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567 }
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002568
2569 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570 }
2571
2572 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2573 tp->link_config.speed != SPEED_INVALID) {
2574 u32 bmcr, orig_bmcr;
2575
2576 tp->link_config.active_speed = tp->link_config.speed;
2577 tp->link_config.active_duplex = tp->link_config.duplex;
2578
2579 bmcr = 0;
2580 switch (tp->link_config.speed) {
2581 default:
2582 case SPEED_10:
2583 break;
2584
2585 case SPEED_100:
2586 bmcr |= BMCR_SPEED100;
2587 break;
2588
2589 case SPEED_1000:
2590 bmcr |= TG3_BMCR_SPEED1000;
2591 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002592 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593
2594 if (tp->link_config.duplex == DUPLEX_FULL)
2595 bmcr |= BMCR_FULLDPLX;
2596
2597 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2598 (bmcr != orig_bmcr)) {
2599 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2600 for (i = 0; i < 1500; i++) {
2601 u32 tmp;
2602
2603 udelay(10);
2604 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2605 tg3_readphy(tp, MII_BMSR, &tmp))
2606 continue;
2607 if (!(tmp & BMSR_LSTATUS)) {
2608 udelay(40);
2609 break;
2610 }
2611 }
2612 tg3_writephy(tp, MII_BMCR, bmcr);
2613 udelay(40);
2614 }
2615 } else {
2616 tg3_writephy(tp, MII_BMCR,
2617 BMCR_ANENABLE | BMCR_ANRESTART);
2618 }
2619}
2620
2621static int tg3_init_5401phy_dsp(struct tg3 *tp)
2622{
2623 int err;
2624
2625 /* Turn off tap power management. */
2626 /* Set Extended packet length bit */
2627 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2628
2629 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2630 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2631
2632 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2633 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2634
2635 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2636 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2637
2638 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2639 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2640
2641 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2642 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2643
2644 udelay(40);
2645
2646 return err;
2647}
2648
Michael Chan3600d912006-12-07 00:21:48 -08002649static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650{
Michael Chan3600d912006-12-07 00:21:48 -08002651 u32 adv_reg, all_mask = 0;
2652
2653 if (mask & ADVERTISED_10baseT_Half)
2654 all_mask |= ADVERTISE_10HALF;
2655 if (mask & ADVERTISED_10baseT_Full)
2656 all_mask |= ADVERTISE_10FULL;
2657 if (mask & ADVERTISED_100baseT_Half)
2658 all_mask |= ADVERTISE_100HALF;
2659 if (mask & ADVERTISED_100baseT_Full)
2660 all_mask |= ADVERTISE_100FULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661
2662 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2663 return 0;
2664
Linus Torvalds1da177e2005-04-16 15:20:36 -07002665 if ((adv_reg & all_mask) != all_mask)
2666 return 0;
2667 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2668 u32 tg3_ctrl;
2669
Michael Chan3600d912006-12-07 00:21:48 -08002670 all_mask = 0;
2671 if (mask & ADVERTISED_1000baseT_Half)
2672 all_mask |= ADVERTISE_1000HALF;
2673 if (mask & ADVERTISED_1000baseT_Full)
2674 all_mask |= ADVERTISE_1000FULL;
2675
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2677 return 0;
2678
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679 if ((tg3_ctrl & all_mask) != all_mask)
2680 return 0;
2681 }
2682 return 1;
2683}
2684
Matt Carlsonef167e22007-12-20 20:10:01 -08002685static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2686{
2687 u32 curadv, reqadv;
2688
2689 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2690 return 1;
2691
2692 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2693 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2694
2695 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2696 if (curadv != reqadv)
2697 return 0;
2698
2699 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2700 tg3_readphy(tp, MII_LPA, rmtadv);
2701 } else {
2702 /* Reprogram the advertisement register, even if it
2703 * does not affect the current link. If the link
2704 * gets renegotiated in the future, we can save an
2705 * additional renegotiation cycle by advertising
2706 * it correctly in the first place.
2707 */
2708 if (curadv != reqadv) {
2709 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2710 ADVERTISE_PAUSE_ASYM);
2711 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2712 }
2713 }
2714
2715 return 1;
2716}
2717
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2719{
2720 int current_link_up;
2721 u32 bmsr, dummy;
Matt Carlsonef167e22007-12-20 20:10:01 -08002722 u32 lcl_adv, rmt_adv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723 u16 current_speed;
2724 u8 current_duplex;
2725 int i, err;
2726
2727 tw32(MAC_EVENT, 0);
2728
2729 tw32_f(MAC_STATUS,
2730 (MAC_STATUS_SYNC_CHANGED |
2731 MAC_STATUS_CFG_CHANGED |
2732 MAC_STATUS_MI_COMPLETION |
2733 MAC_STATUS_LNKSTATE_CHANGED));
2734 udelay(40);
2735
Matt Carlson8ef21422008-05-02 16:47:53 -07002736 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2737 tw32_f(MAC_MI_MODE,
2738 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2739 udelay(80);
2740 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741
2742 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2743
2744 /* Some third-party PHYs need to be reset on link going
2745 * down.
2746 */
2747 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2748 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2749 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2750 netif_carrier_ok(tp->dev)) {
2751 tg3_readphy(tp, MII_BMSR, &bmsr);
2752 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2753 !(bmsr & BMSR_LSTATUS))
2754 force_reset = 1;
2755 }
2756 if (force_reset)
2757 tg3_phy_reset(tp);
2758
2759 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2760 tg3_readphy(tp, MII_BMSR, &bmsr);
2761 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2762 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2763 bmsr = 0;
2764
2765 if (!(bmsr & BMSR_LSTATUS)) {
2766 err = tg3_init_5401phy_dsp(tp);
2767 if (err)
2768 return err;
2769
2770 tg3_readphy(tp, MII_BMSR, &bmsr);
2771 for (i = 0; i < 1000; i++) {
2772 udelay(10);
2773 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2774 (bmsr & BMSR_LSTATUS)) {
2775 udelay(40);
2776 break;
2777 }
2778 }
2779
2780 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2781 !(bmsr & BMSR_LSTATUS) &&
2782 tp->link_config.active_speed == SPEED_1000) {
2783 err = tg3_phy_reset(tp);
2784 if (!err)
2785 err = tg3_init_5401phy_dsp(tp);
2786 if (err)
2787 return err;
2788 }
2789 }
2790 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2791 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2792 /* 5701 {A0,B0} CRC bug workaround */
2793 tg3_writephy(tp, 0x15, 0x0a75);
2794 tg3_writephy(tp, 0x1c, 0x8c68);
2795 tg3_writephy(tp, 0x1c, 0x8d68);
2796 tg3_writephy(tp, 0x1c, 0x8c68);
2797 }
2798
2799 /* Clear pending interrupts... */
2800 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2801 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2802
2803 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2804 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
Michael Chan715116a2006-09-27 16:09:25 -07002805 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2807
2808 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2809 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2810 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2811 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2812 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2813 else
2814 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2815 }
2816
2817 current_link_up = 0;
2818 current_speed = SPEED_INVALID;
2819 current_duplex = DUPLEX_INVALID;
2820
2821 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2822 u32 val;
2823
2824 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2825 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2826 if (!(val & (1 << 10))) {
2827 val |= (1 << 10);
2828 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2829 goto relink;
2830 }
2831 }
2832
2833 bmsr = 0;
2834 for (i = 0; i < 100; i++) {
2835 tg3_readphy(tp, MII_BMSR, &bmsr);
2836 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2837 (bmsr & BMSR_LSTATUS))
2838 break;
2839 udelay(40);
2840 }
2841
2842 if (bmsr & BMSR_LSTATUS) {
2843 u32 aux_stat, bmcr;
2844
2845 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2846 for (i = 0; i < 2000; i++) {
2847 udelay(10);
2848 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2849 aux_stat)
2850 break;
2851 }
2852
2853 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2854 &current_speed,
2855 &current_duplex);
2856
2857 bmcr = 0;
2858 for (i = 0; i < 200; i++) {
2859 tg3_readphy(tp, MII_BMCR, &bmcr);
2860 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2861 continue;
2862 if (bmcr && bmcr != 0x7fff)
2863 break;
2864 udelay(10);
2865 }
2866
Matt Carlsonef167e22007-12-20 20:10:01 -08002867 lcl_adv = 0;
2868 rmt_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869
Matt Carlsonef167e22007-12-20 20:10:01 -08002870 tp->link_config.active_speed = current_speed;
2871 tp->link_config.active_duplex = current_duplex;
2872
2873 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2874 if ((bmcr & BMCR_ANENABLE) &&
2875 tg3_copper_is_advertising_all(tp,
2876 tp->link_config.advertising)) {
2877 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2878 &rmt_adv))
2879 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880 }
2881 } else {
2882 if (!(bmcr & BMCR_ANENABLE) &&
2883 tp->link_config.speed == current_speed &&
Matt Carlsonef167e22007-12-20 20:10:01 -08002884 tp->link_config.duplex == current_duplex &&
2885 tp->link_config.flowctrl ==
2886 tp->link_config.active_flowctrl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002888 }
2889 }
2890
Matt Carlsonef167e22007-12-20 20:10:01 -08002891 if (current_link_up == 1 &&
2892 tp->link_config.active_duplex == DUPLEX_FULL)
2893 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894 }
2895
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896relink:
Michael Chan6921d202005-12-13 21:15:53 -08002897 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898 u32 tmp;
2899
2900 tg3_phy_copper_begin(tp);
2901
2902 tg3_readphy(tp, MII_BMSR, &tmp);
2903 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2904 (tmp & BMSR_LSTATUS))
2905 current_link_up = 1;
2906 }
2907
2908 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2909 if (current_link_up == 1) {
2910 if (tp->link_config.active_speed == SPEED_100 ||
2911 tp->link_config.active_speed == SPEED_10)
2912 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2913 else
2914 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2915 } else
2916 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2917
2918 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2919 if (tp->link_config.active_duplex == DUPLEX_HALF)
2920 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2921
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002923 if (current_link_up == 1 &&
2924 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002926 else
2927 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928 }
2929
2930 /* ??? Without this setting Netgear GA302T PHY does not
2931 * ??? send/receive packets...
2932 */
2933 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2934 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2935 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2936 tw32_f(MAC_MI_MODE, tp->mi_mode);
2937 udelay(80);
2938 }
2939
2940 tw32_f(MAC_MODE, tp->mac_mode);
2941 udelay(40);
2942
2943 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2944 /* Polled via timer. */
2945 tw32_f(MAC_EVENT, 0);
2946 } else {
2947 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2948 }
2949 udelay(40);
2950
2951 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2952 current_link_up == 1 &&
2953 tp->link_config.active_speed == SPEED_1000 &&
2954 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2955 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2956 udelay(120);
2957 tw32_f(MAC_STATUS,
2958 (MAC_STATUS_SYNC_CHANGED |
2959 MAC_STATUS_CFG_CHANGED));
2960 udelay(40);
2961 tg3_write_mem(tp,
2962 NIC_SRAM_FIRMWARE_MBOX,
2963 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2964 }
2965
Matt Carlson5e7dfd02008-11-21 17:18:16 -08002966 /* Prevent send BD corruption. */
2967 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2968 u16 oldlnkctl, newlnkctl;
2969
2970 pci_read_config_word(tp->pdev,
2971 tp->pcie_cap + PCI_EXP_LNKCTL,
2972 &oldlnkctl);
2973 if (tp->link_config.active_speed == SPEED_100 ||
2974 tp->link_config.active_speed == SPEED_10)
2975 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
2976 else
2977 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
2978 if (newlnkctl != oldlnkctl)
2979 pci_write_config_word(tp->pdev,
2980 tp->pcie_cap + PCI_EXP_LNKCTL,
2981 newlnkctl);
2982 }
2983
Linus Torvalds1da177e2005-04-16 15:20:36 -07002984 if (current_link_up != netif_carrier_ok(tp->dev)) {
2985 if (current_link_up)
2986 netif_carrier_on(tp->dev);
2987 else
2988 netif_carrier_off(tp->dev);
2989 tg3_link_report(tp);
2990 }
2991
2992 return 0;
2993}
2994
2995struct tg3_fiber_aneginfo {
2996 int state;
2997#define ANEG_STATE_UNKNOWN 0
2998#define ANEG_STATE_AN_ENABLE 1
2999#define ANEG_STATE_RESTART_INIT 2
3000#define ANEG_STATE_RESTART 3
3001#define ANEG_STATE_DISABLE_LINK_OK 4
3002#define ANEG_STATE_ABILITY_DETECT_INIT 5
3003#define ANEG_STATE_ABILITY_DETECT 6
3004#define ANEG_STATE_ACK_DETECT_INIT 7
3005#define ANEG_STATE_ACK_DETECT 8
3006#define ANEG_STATE_COMPLETE_ACK_INIT 9
3007#define ANEG_STATE_COMPLETE_ACK 10
3008#define ANEG_STATE_IDLE_DETECT_INIT 11
3009#define ANEG_STATE_IDLE_DETECT 12
3010#define ANEG_STATE_LINK_OK 13
3011#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3012#define ANEG_STATE_NEXT_PAGE_WAIT 15
3013
3014 u32 flags;
3015#define MR_AN_ENABLE 0x00000001
3016#define MR_RESTART_AN 0x00000002
3017#define MR_AN_COMPLETE 0x00000004
3018#define MR_PAGE_RX 0x00000008
3019#define MR_NP_LOADED 0x00000010
3020#define MR_TOGGLE_TX 0x00000020
3021#define MR_LP_ADV_FULL_DUPLEX 0x00000040
3022#define MR_LP_ADV_HALF_DUPLEX 0x00000080
3023#define MR_LP_ADV_SYM_PAUSE 0x00000100
3024#define MR_LP_ADV_ASYM_PAUSE 0x00000200
3025#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3026#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3027#define MR_LP_ADV_NEXT_PAGE 0x00001000
3028#define MR_TOGGLE_RX 0x00002000
3029#define MR_NP_RX 0x00004000
3030
3031#define MR_LINK_OK 0x80000000
3032
3033 unsigned long link_time, cur_time;
3034
3035 u32 ability_match_cfg;
3036 int ability_match_count;
3037
3038 char ability_match, idle_match, ack_match;
3039
3040 u32 txconfig, rxconfig;
3041#define ANEG_CFG_NP 0x00000080
3042#define ANEG_CFG_ACK 0x00000040
3043#define ANEG_CFG_RF2 0x00000020
3044#define ANEG_CFG_RF1 0x00000010
3045#define ANEG_CFG_PS2 0x00000001
3046#define ANEG_CFG_PS1 0x00008000
3047#define ANEG_CFG_HD 0x00004000
3048#define ANEG_CFG_FD 0x00002000
3049#define ANEG_CFG_INVAL 0x00001f06
3050
3051};
3052#define ANEG_OK 0
3053#define ANEG_DONE 1
3054#define ANEG_TIMER_ENAB 2
3055#define ANEG_FAILED -1
3056
3057#define ANEG_STATE_SETTLE_TIME 10000
3058
3059static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3060 struct tg3_fiber_aneginfo *ap)
3061{
Matt Carlson5be73b42007-12-20 20:09:29 -08003062 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063 unsigned long delta;
3064 u32 rx_cfg_reg;
3065 int ret;
3066
3067 if (ap->state == ANEG_STATE_UNKNOWN) {
3068 ap->rxconfig = 0;
3069 ap->link_time = 0;
3070 ap->cur_time = 0;
3071 ap->ability_match_cfg = 0;
3072 ap->ability_match_count = 0;
3073 ap->ability_match = 0;
3074 ap->idle_match = 0;
3075 ap->ack_match = 0;
3076 }
3077 ap->cur_time++;
3078
3079 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3080 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3081
3082 if (rx_cfg_reg != ap->ability_match_cfg) {
3083 ap->ability_match_cfg = rx_cfg_reg;
3084 ap->ability_match = 0;
3085 ap->ability_match_count = 0;
3086 } else {
3087 if (++ap->ability_match_count > 1) {
3088 ap->ability_match = 1;
3089 ap->ability_match_cfg = rx_cfg_reg;
3090 }
3091 }
3092 if (rx_cfg_reg & ANEG_CFG_ACK)
3093 ap->ack_match = 1;
3094 else
3095 ap->ack_match = 0;
3096
3097 ap->idle_match = 0;
3098 } else {
3099 ap->idle_match = 1;
3100 ap->ability_match_cfg = 0;
3101 ap->ability_match_count = 0;
3102 ap->ability_match = 0;
3103 ap->ack_match = 0;
3104
3105 rx_cfg_reg = 0;
3106 }
3107
3108 ap->rxconfig = rx_cfg_reg;
3109 ret = ANEG_OK;
3110
3111 switch(ap->state) {
3112 case ANEG_STATE_UNKNOWN:
3113 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3114 ap->state = ANEG_STATE_AN_ENABLE;
3115
3116 /* fallthru */
3117 case ANEG_STATE_AN_ENABLE:
3118 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3119 if (ap->flags & MR_AN_ENABLE) {
3120 ap->link_time = 0;
3121 ap->cur_time = 0;
3122 ap->ability_match_cfg = 0;
3123 ap->ability_match_count = 0;
3124 ap->ability_match = 0;
3125 ap->idle_match = 0;
3126 ap->ack_match = 0;
3127
3128 ap->state = ANEG_STATE_RESTART_INIT;
3129 } else {
3130 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3131 }
3132 break;
3133
3134 case ANEG_STATE_RESTART_INIT:
3135 ap->link_time = ap->cur_time;
3136 ap->flags &= ~(MR_NP_LOADED);
3137 ap->txconfig = 0;
3138 tw32(MAC_TX_AUTO_NEG, 0);
3139 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3140 tw32_f(MAC_MODE, tp->mac_mode);
3141 udelay(40);
3142
3143 ret = ANEG_TIMER_ENAB;
3144 ap->state = ANEG_STATE_RESTART;
3145
3146 /* fallthru */
3147 case ANEG_STATE_RESTART:
3148 delta = ap->cur_time - ap->link_time;
3149 if (delta > ANEG_STATE_SETTLE_TIME) {
3150 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3151 } else {
3152 ret = ANEG_TIMER_ENAB;
3153 }
3154 break;
3155
3156 case ANEG_STATE_DISABLE_LINK_OK:
3157 ret = ANEG_DONE;
3158 break;
3159
3160 case ANEG_STATE_ABILITY_DETECT_INIT:
3161 ap->flags &= ~(MR_TOGGLE_TX);
Matt Carlson5be73b42007-12-20 20:09:29 -08003162 ap->txconfig = ANEG_CFG_FD;
3163 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3164 if (flowctrl & ADVERTISE_1000XPAUSE)
3165 ap->txconfig |= ANEG_CFG_PS1;
3166 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3167 ap->txconfig |= ANEG_CFG_PS2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003168 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3169 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3170 tw32_f(MAC_MODE, tp->mac_mode);
3171 udelay(40);
3172
3173 ap->state = ANEG_STATE_ABILITY_DETECT;
3174 break;
3175
3176 case ANEG_STATE_ABILITY_DETECT:
3177 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3178 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3179 }
3180 break;
3181
3182 case ANEG_STATE_ACK_DETECT_INIT:
3183 ap->txconfig |= ANEG_CFG_ACK;
3184 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3185 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3186 tw32_f(MAC_MODE, tp->mac_mode);
3187 udelay(40);
3188
3189 ap->state = ANEG_STATE_ACK_DETECT;
3190
3191 /* fallthru */
3192 case ANEG_STATE_ACK_DETECT:
3193 if (ap->ack_match != 0) {
3194 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3195 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3196 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3197 } else {
3198 ap->state = ANEG_STATE_AN_ENABLE;
3199 }
3200 } else if (ap->ability_match != 0 &&
3201 ap->rxconfig == 0) {
3202 ap->state = ANEG_STATE_AN_ENABLE;
3203 }
3204 break;
3205
3206 case ANEG_STATE_COMPLETE_ACK_INIT:
3207 if (ap->rxconfig & ANEG_CFG_INVAL) {
3208 ret = ANEG_FAILED;
3209 break;
3210 }
3211 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3212 MR_LP_ADV_HALF_DUPLEX |
3213 MR_LP_ADV_SYM_PAUSE |
3214 MR_LP_ADV_ASYM_PAUSE |
3215 MR_LP_ADV_REMOTE_FAULT1 |
3216 MR_LP_ADV_REMOTE_FAULT2 |
3217 MR_LP_ADV_NEXT_PAGE |
3218 MR_TOGGLE_RX |
3219 MR_NP_RX);
3220 if (ap->rxconfig & ANEG_CFG_FD)
3221 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3222 if (ap->rxconfig & ANEG_CFG_HD)
3223 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3224 if (ap->rxconfig & ANEG_CFG_PS1)
3225 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3226 if (ap->rxconfig & ANEG_CFG_PS2)
3227 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3228 if (ap->rxconfig & ANEG_CFG_RF1)
3229 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3230 if (ap->rxconfig & ANEG_CFG_RF2)
3231 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3232 if (ap->rxconfig & ANEG_CFG_NP)
3233 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3234
3235 ap->link_time = ap->cur_time;
3236
3237 ap->flags ^= (MR_TOGGLE_TX);
3238 if (ap->rxconfig & 0x0008)
3239 ap->flags |= MR_TOGGLE_RX;
3240 if (ap->rxconfig & ANEG_CFG_NP)
3241 ap->flags |= MR_NP_RX;
3242 ap->flags |= MR_PAGE_RX;
3243
3244 ap->state = ANEG_STATE_COMPLETE_ACK;
3245 ret = ANEG_TIMER_ENAB;
3246 break;
3247
3248 case ANEG_STATE_COMPLETE_ACK:
3249 if (ap->ability_match != 0 &&
3250 ap->rxconfig == 0) {
3251 ap->state = ANEG_STATE_AN_ENABLE;
3252 break;
3253 }
3254 delta = ap->cur_time - ap->link_time;
3255 if (delta > ANEG_STATE_SETTLE_TIME) {
3256 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3257 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3258 } else {
3259 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3260 !(ap->flags & MR_NP_RX)) {
3261 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3262 } else {
3263 ret = ANEG_FAILED;
3264 }
3265 }
3266 }
3267 break;
3268
3269 case ANEG_STATE_IDLE_DETECT_INIT:
3270 ap->link_time = ap->cur_time;
3271 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3272 tw32_f(MAC_MODE, tp->mac_mode);
3273 udelay(40);
3274
3275 ap->state = ANEG_STATE_IDLE_DETECT;
3276 ret = ANEG_TIMER_ENAB;
3277 break;
3278
3279 case ANEG_STATE_IDLE_DETECT:
3280 if (ap->ability_match != 0 &&
3281 ap->rxconfig == 0) {
3282 ap->state = ANEG_STATE_AN_ENABLE;
3283 break;
3284 }
3285 delta = ap->cur_time - ap->link_time;
3286 if (delta > ANEG_STATE_SETTLE_TIME) {
3287 /* XXX another gem from the Broadcom driver :( */
3288 ap->state = ANEG_STATE_LINK_OK;
3289 }
3290 break;
3291
3292 case ANEG_STATE_LINK_OK:
3293 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3294 ret = ANEG_DONE;
3295 break;
3296
3297 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3298 /* ??? unimplemented */
3299 break;
3300
3301 case ANEG_STATE_NEXT_PAGE_WAIT:
3302 /* ??? unimplemented */
3303 break;
3304
3305 default:
3306 ret = ANEG_FAILED;
3307 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07003308 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003309
3310 return ret;
3311}
3312
Matt Carlson5be73b42007-12-20 20:09:29 -08003313static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003314{
3315 int res = 0;
3316 struct tg3_fiber_aneginfo aninfo;
3317 int status = ANEG_FAILED;
3318 unsigned int tick;
3319 u32 tmp;
3320
3321 tw32_f(MAC_TX_AUTO_NEG, 0);
3322
3323 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3324 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3325 udelay(40);
3326
3327 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3328 udelay(40);
3329
3330 memset(&aninfo, 0, sizeof(aninfo));
3331 aninfo.flags |= MR_AN_ENABLE;
3332 aninfo.state = ANEG_STATE_UNKNOWN;
3333 aninfo.cur_time = 0;
3334 tick = 0;
3335 while (++tick < 195000) {
3336 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3337 if (status == ANEG_DONE || status == ANEG_FAILED)
3338 break;
3339
3340 udelay(1);
3341 }
3342
3343 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3344 tw32_f(MAC_MODE, tp->mac_mode);
3345 udelay(40);
3346
Matt Carlson5be73b42007-12-20 20:09:29 -08003347 *txflags = aninfo.txconfig;
3348 *rxflags = aninfo.flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349
3350 if (status == ANEG_DONE &&
3351 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3352 MR_LP_ADV_FULL_DUPLEX)))
3353 res = 1;
3354
3355 return res;
3356}
3357
3358static void tg3_init_bcm8002(struct tg3 *tp)
3359{
3360 u32 mac_status = tr32(MAC_STATUS);
3361 int i;
3362
3363 /* Reset when initting first time or we have a link. */
3364 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3365 !(mac_status & MAC_STATUS_PCS_SYNCED))
3366 return;
3367
3368 /* Set PLL lock range. */
3369 tg3_writephy(tp, 0x16, 0x8007);
3370
3371 /* SW reset */
3372 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3373
3374 /* Wait for reset to complete. */
3375 /* XXX schedule_timeout() ... */
3376 for (i = 0; i < 500; i++)
3377 udelay(10);
3378
3379 /* Config mode; select PMA/Ch 1 regs. */
3380 tg3_writephy(tp, 0x10, 0x8411);
3381
3382 /* Enable auto-lock and comdet, select txclk for tx. */
3383 tg3_writephy(tp, 0x11, 0x0a10);
3384
3385 tg3_writephy(tp, 0x18, 0x00a0);
3386 tg3_writephy(tp, 0x16, 0x41ff);
3387
3388 /* Assert and deassert POR. */
3389 tg3_writephy(tp, 0x13, 0x0400);
3390 udelay(40);
3391 tg3_writephy(tp, 0x13, 0x0000);
3392
3393 tg3_writephy(tp, 0x11, 0x0a50);
3394 udelay(40);
3395 tg3_writephy(tp, 0x11, 0x0a10);
3396
3397 /* Wait for signal to stabilize */
3398 /* XXX schedule_timeout() ... */
3399 for (i = 0; i < 15000; i++)
3400 udelay(10);
3401
3402 /* Deselect the channel register so we can read the PHYID
3403 * later.
3404 */
3405 tg3_writephy(tp, 0x10, 0x8011);
3406}
3407
3408static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3409{
Matt Carlson82cd3d12007-12-20 20:09:00 -08003410 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411 u32 sg_dig_ctrl, sg_dig_status;
3412 u32 serdes_cfg, expected_sg_dig_ctrl;
3413 int workaround, port_a;
3414 int current_link_up;
3415
3416 serdes_cfg = 0;
3417 expected_sg_dig_ctrl = 0;
3418 workaround = 0;
3419 port_a = 1;
3420 current_link_up = 0;
3421
3422 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3423 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3424 workaround = 1;
3425 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3426 port_a = 0;
3427
3428 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3429 /* preserve bits 20-23 for voltage regulator */
3430 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3431 }
3432
3433 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3434
3435 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003436 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003437 if (workaround) {
3438 u32 val = serdes_cfg;
3439
3440 if (port_a)
3441 val |= 0xc010000;
3442 else
3443 val |= 0x4010000;
3444 tw32_f(MAC_SERDES_CFG, val);
3445 }
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003446
3447 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003448 }
3449 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3450 tg3_setup_flow_control(tp, 0, 0);
3451 current_link_up = 1;
3452 }
3453 goto out;
3454 }
3455
3456 /* Want auto-negotiation. */
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003457 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458
Matt Carlson82cd3d12007-12-20 20:09:00 -08003459 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3460 if (flowctrl & ADVERTISE_1000XPAUSE)
3461 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3462 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3463 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464
3465 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003466 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3467 tp->serdes_counter &&
3468 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3469 MAC_STATUS_RCVD_CFG)) ==
3470 MAC_STATUS_PCS_SYNCED)) {
3471 tp->serdes_counter--;
3472 current_link_up = 1;
3473 goto out;
3474 }
3475restart_autoneg:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476 if (workaround)
3477 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003478 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003479 udelay(5);
3480 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3481
Michael Chan3d3ebe72006-09-27 15:59:15 -07003482 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3483 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3485 MAC_STATUS_SIGNAL_DET)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003486 sg_dig_status = tr32(SG_DIG_STATUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003487 mac_status = tr32(MAC_STATUS);
3488
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003489 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003490 (mac_status & MAC_STATUS_PCS_SYNCED)) {
Matt Carlson82cd3d12007-12-20 20:09:00 -08003491 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492
Matt Carlson82cd3d12007-12-20 20:09:00 -08003493 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3494 local_adv |= ADVERTISE_1000XPAUSE;
3495 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3496 local_adv |= ADVERTISE_1000XPSE_ASYM;
3497
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003498 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08003499 remote_adv |= LPA_1000XPAUSE;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003500 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08003501 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003502
3503 tg3_setup_flow_control(tp, local_adv, remote_adv);
3504 current_link_up = 1;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003505 tp->serdes_counter = 0;
3506 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003507 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003508 if (tp->serdes_counter)
3509 tp->serdes_counter--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003510 else {
3511 if (workaround) {
3512 u32 val = serdes_cfg;
3513
3514 if (port_a)
3515 val |= 0xc010000;
3516 else
3517 val |= 0x4010000;
3518
3519 tw32_f(MAC_SERDES_CFG, val);
3520 }
3521
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003522 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523 udelay(40);
3524
3525 /* Link parallel detection - link is up */
3526 /* only if we have PCS_SYNC and not */
3527 /* receiving config code words */
3528 mac_status = tr32(MAC_STATUS);
3529 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3530 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3531 tg3_setup_flow_control(tp, 0, 0);
3532 current_link_up = 1;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003533 tp->tg3_flags2 |=
3534 TG3_FLG2_PARALLEL_DETECT;
3535 tp->serdes_counter =
3536 SERDES_PARALLEL_DET_TIMEOUT;
3537 } else
3538 goto restart_autoneg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539 }
3540 }
Michael Chan3d3ebe72006-09-27 15:59:15 -07003541 } else {
3542 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3543 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003544 }
3545
3546out:
3547 return current_link_up;
3548}
3549
3550static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3551{
3552 int current_link_up = 0;
3553
Michael Chan5cf64b82007-05-05 12:11:21 -07003554 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003555 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003556
3557 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
Matt Carlson5be73b42007-12-20 20:09:29 -08003558 u32 txflags, rxflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003559 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003560
Matt Carlson5be73b42007-12-20 20:09:29 -08003561 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3562 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563
Matt Carlson5be73b42007-12-20 20:09:29 -08003564 if (txflags & ANEG_CFG_PS1)
3565 local_adv |= ADVERTISE_1000XPAUSE;
3566 if (txflags & ANEG_CFG_PS2)
3567 local_adv |= ADVERTISE_1000XPSE_ASYM;
3568
3569 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3570 remote_adv |= LPA_1000XPAUSE;
3571 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3572 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573
3574 tg3_setup_flow_control(tp, local_adv, remote_adv);
3575
Linus Torvalds1da177e2005-04-16 15:20:36 -07003576 current_link_up = 1;
3577 }
3578 for (i = 0; i < 30; i++) {
3579 udelay(20);
3580 tw32_f(MAC_STATUS,
3581 (MAC_STATUS_SYNC_CHANGED |
3582 MAC_STATUS_CFG_CHANGED));
3583 udelay(40);
3584 if ((tr32(MAC_STATUS) &
3585 (MAC_STATUS_SYNC_CHANGED |
3586 MAC_STATUS_CFG_CHANGED)) == 0)
3587 break;
3588 }
3589
3590 mac_status = tr32(MAC_STATUS);
3591 if (current_link_up == 0 &&
3592 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3593 !(mac_status & MAC_STATUS_RCVD_CFG))
3594 current_link_up = 1;
3595 } else {
Matt Carlson5be73b42007-12-20 20:09:29 -08003596 tg3_setup_flow_control(tp, 0, 0);
3597
Linus Torvalds1da177e2005-04-16 15:20:36 -07003598 /* Forcing 1000FD link up. */
3599 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003600
3601 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3602 udelay(40);
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07003603
3604 tw32_f(MAC_MODE, tp->mac_mode);
3605 udelay(40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606 }
3607
3608out:
3609 return current_link_up;
3610}
3611
3612static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3613{
3614 u32 orig_pause_cfg;
3615 u16 orig_active_speed;
3616 u8 orig_active_duplex;
3617 u32 mac_status;
3618 int current_link_up;
3619 int i;
3620
Matt Carlson8d018622007-12-20 20:05:44 -08003621 orig_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622 orig_active_speed = tp->link_config.active_speed;
3623 orig_active_duplex = tp->link_config.active_duplex;
3624
3625 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3626 netif_carrier_ok(tp->dev) &&
3627 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3628 mac_status = tr32(MAC_STATUS);
3629 mac_status &= (MAC_STATUS_PCS_SYNCED |
3630 MAC_STATUS_SIGNAL_DET |
3631 MAC_STATUS_CFG_CHANGED |
3632 MAC_STATUS_RCVD_CFG);
3633 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3634 MAC_STATUS_SIGNAL_DET)) {
3635 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3636 MAC_STATUS_CFG_CHANGED));
3637 return 0;
3638 }
3639 }
3640
3641 tw32_f(MAC_TX_AUTO_NEG, 0);
3642
3643 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3644 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3645 tw32_f(MAC_MODE, tp->mac_mode);
3646 udelay(40);
3647
3648 if (tp->phy_id == PHY_ID_BCM8002)
3649 tg3_init_bcm8002(tp);
3650
3651 /* Enable link change event even when serdes polling. */
3652 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3653 udelay(40);
3654
3655 current_link_up = 0;
3656 mac_status = tr32(MAC_STATUS);
3657
3658 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3659 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3660 else
3661 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3662
Linus Torvalds1da177e2005-04-16 15:20:36 -07003663 tp->hw_status->status =
3664 (SD_STATUS_UPDATED |
3665 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3666
3667 for (i = 0; i < 100; i++) {
3668 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3669 MAC_STATUS_CFG_CHANGED));
3670 udelay(5);
3671 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
Michael Chan3d3ebe72006-09-27 15:59:15 -07003672 MAC_STATUS_CFG_CHANGED |
3673 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003674 break;
3675 }
3676
3677 mac_status = tr32(MAC_STATUS);
3678 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3679 current_link_up = 0;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003680 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3681 tp->serdes_counter == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003682 tw32_f(MAC_MODE, (tp->mac_mode |
3683 MAC_MODE_SEND_CONFIGS));
3684 udelay(1);
3685 tw32_f(MAC_MODE, tp->mac_mode);
3686 }
3687 }
3688
3689 if (current_link_up == 1) {
3690 tp->link_config.active_speed = SPEED_1000;
3691 tp->link_config.active_duplex = DUPLEX_FULL;
3692 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3693 LED_CTRL_LNKLED_OVERRIDE |
3694 LED_CTRL_1000MBPS_ON));
3695 } else {
3696 tp->link_config.active_speed = SPEED_INVALID;
3697 tp->link_config.active_duplex = DUPLEX_INVALID;
3698 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3699 LED_CTRL_LNKLED_OVERRIDE |
3700 LED_CTRL_TRAFFIC_OVERRIDE));
3701 }
3702
3703 if (current_link_up != netif_carrier_ok(tp->dev)) {
3704 if (current_link_up)
3705 netif_carrier_on(tp->dev);
3706 else
3707 netif_carrier_off(tp->dev);
3708 tg3_link_report(tp);
3709 } else {
Matt Carlson8d018622007-12-20 20:05:44 -08003710 u32 now_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711 if (orig_pause_cfg != now_pause_cfg ||
3712 orig_active_speed != tp->link_config.active_speed ||
3713 orig_active_duplex != tp->link_config.active_duplex)
3714 tg3_link_report(tp);
3715 }
3716
3717 return 0;
3718}
3719
Michael Chan747e8f82005-07-25 12:33:22 -07003720static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3721{
3722 int current_link_up, err = 0;
3723 u32 bmsr, bmcr;
3724 u16 current_speed;
3725 u8 current_duplex;
Matt Carlsonef167e22007-12-20 20:10:01 -08003726 u32 local_adv, remote_adv;
Michael Chan747e8f82005-07-25 12:33:22 -07003727
3728 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3729 tw32_f(MAC_MODE, tp->mac_mode);
3730 udelay(40);
3731
3732 tw32(MAC_EVENT, 0);
3733
3734 tw32_f(MAC_STATUS,
3735 (MAC_STATUS_SYNC_CHANGED |
3736 MAC_STATUS_CFG_CHANGED |
3737 MAC_STATUS_MI_COMPLETION |
3738 MAC_STATUS_LNKSTATE_CHANGED));
3739 udelay(40);
3740
3741 if (force_reset)
3742 tg3_phy_reset(tp);
3743
3744 current_link_up = 0;
3745 current_speed = SPEED_INVALID;
3746 current_duplex = DUPLEX_INVALID;
3747
3748 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3749 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08003750 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3751 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3752 bmsr |= BMSR_LSTATUS;
3753 else
3754 bmsr &= ~BMSR_LSTATUS;
3755 }
Michael Chan747e8f82005-07-25 12:33:22 -07003756
3757 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3758
3759 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
Matt Carlson2bd3ed02008-06-09 15:39:55 -07003760 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
Michael Chan747e8f82005-07-25 12:33:22 -07003761 /* do nothing, just check for link up at the end */
3762 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3763 u32 adv, new_adv;
3764
3765 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3766 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3767 ADVERTISE_1000XPAUSE |
3768 ADVERTISE_1000XPSE_ASYM |
3769 ADVERTISE_SLCT);
3770
Matt Carlsonba4d07a2007-12-20 20:08:00 -08003771 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
Michael Chan747e8f82005-07-25 12:33:22 -07003772
3773 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3774 new_adv |= ADVERTISE_1000XHALF;
3775 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3776 new_adv |= ADVERTISE_1000XFULL;
3777
3778 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3779 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3780 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3781 tg3_writephy(tp, MII_BMCR, bmcr);
3782
3783 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
Michael Chan3d3ebe72006-09-27 15:59:15 -07003784 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
Michael Chan747e8f82005-07-25 12:33:22 -07003785 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3786
3787 return err;
3788 }
3789 } else {
3790 u32 new_bmcr;
3791
3792 bmcr &= ~BMCR_SPEED1000;
3793 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3794
3795 if (tp->link_config.duplex == DUPLEX_FULL)
3796 new_bmcr |= BMCR_FULLDPLX;
3797
3798 if (new_bmcr != bmcr) {
3799 /* BMCR_SPEED1000 is a reserved bit that needs
3800 * to be set on write.
3801 */
3802 new_bmcr |= BMCR_SPEED1000;
3803
3804 /* Force a linkdown */
3805 if (netif_carrier_ok(tp->dev)) {
3806 u32 adv;
3807
3808 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3809 adv &= ~(ADVERTISE_1000XFULL |
3810 ADVERTISE_1000XHALF |
3811 ADVERTISE_SLCT);
3812 tg3_writephy(tp, MII_ADVERTISE, adv);
3813 tg3_writephy(tp, MII_BMCR, bmcr |
3814 BMCR_ANRESTART |
3815 BMCR_ANENABLE);
3816 udelay(10);
3817 netif_carrier_off(tp->dev);
3818 }
3819 tg3_writephy(tp, MII_BMCR, new_bmcr);
3820 bmcr = new_bmcr;
3821 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3822 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08003823 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3824 ASIC_REV_5714) {
3825 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3826 bmsr |= BMSR_LSTATUS;
3827 else
3828 bmsr &= ~BMSR_LSTATUS;
3829 }
Michael Chan747e8f82005-07-25 12:33:22 -07003830 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3831 }
3832 }
3833
3834 if (bmsr & BMSR_LSTATUS) {
3835 current_speed = SPEED_1000;
3836 current_link_up = 1;
3837 if (bmcr & BMCR_FULLDPLX)
3838 current_duplex = DUPLEX_FULL;
3839 else
3840 current_duplex = DUPLEX_HALF;
3841
Matt Carlsonef167e22007-12-20 20:10:01 -08003842 local_adv = 0;
3843 remote_adv = 0;
3844
Michael Chan747e8f82005-07-25 12:33:22 -07003845 if (bmcr & BMCR_ANENABLE) {
Matt Carlsonef167e22007-12-20 20:10:01 -08003846 u32 common;
Michael Chan747e8f82005-07-25 12:33:22 -07003847
3848 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3849 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3850 common = local_adv & remote_adv;
3851 if (common & (ADVERTISE_1000XHALF |
3852 ADVERTISE_1000XFULL)) {
3853 if (common & ADVERTISE_1000XFULL)
3854 current_duplex = DUPLEX_FULL;
3855 else
3856 current_duplex = DUPLEX_HALF;
Michael Chan747e8f82005-07-25 12:33:22 -07003857 }
3858 else
3859 current_link_up = 0;
3860 }
3861 }
3862
Matt Carlsonef167e22007-12-20 20:10:01 -08003863 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3864 tg3_setup_flow_control(tp, local_adv, remote_adv);
3865
Michael Chan747e8f82005-07-25 12:33:22 -07003866 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3867 if (tp->link_config.active_duplex == DUPLEX_HALF)
3868 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3869
3870 tw32_f(MAC_MODE, tp->mac_mode);
3871 udelay(40);
3872
3873 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3874
3875 tp->link_config.active_speed = current_speed;
3876 tp->link_config.active_duplex = current_duplex;
3877
3878 if (current_link_up != netif_carrier_ok(tp->dev)) {
3879 if (current_link_up)
3880 netif_carrier_on(tp->dev);
3881 else {
3882 netif_carrier_off(tp->dev);
3883 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3884 }
3885 tg3_link_report(tp);
3886 }
3887 return err;
3888}
3889
3890static void tg3_serdes_parallel_detect(struct tg3 *tp)
3891{
Michael Chan3d3ebe72006-09-27 15:59:15 -07003892 if (tp->serdes_counter) {
Michael Chan747e8f82005-07-25 12:33:22 -07003893 /* Give autoneg time to complete. */
Michael Chan3d3ebe72006-09-27 15:59:15 -07003894 tp->serdes_counter--;
Michael Chan747e8f82005-07-25 12:33:22 -07003895 return;
3896 }
3897 if (!netif_carrier_ok(tp->dev) &&
3898 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3899 u32 bmcr;
3900
3901 tg3_readphy(tp, MII_BMCR, &bmcr);
3902 if (bmcr & BMCR_ANENABLE) {
3903 u32 phy1, phy2;
3904
3905 /* Select shadow register 0x1f */
3906 tg3_writephy(tp, 0x1c, 0x7c00);
3907 tg3_readphy(tp, 0x1c, &phy1);
3908
3909 /* Select expansion interrupt status register */
3910 tg3_writephy(tp, 0x17, 0x0f01);
3911 tg3_readphy(tp, 0x15, &phy2);
3912 tg3_readphy(tp, 0x15, &phy2);
3913
3914 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3915 /* We have signal detect and not receiving
3916 * config code words, link is up by parallel
3917 * detection.
3918 */
3919
3920 bmcr &= ~BMCR_ANENABLE;
3921 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3922 tg3_writephy(tp, MII_BMCR, bmcr);
3923 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3924 }
3925 }
3926 }
3927 else if (netif_carrier_ok(tp->dev) &&
3928 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3929 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3930 u32 phy2;
3931
3932 /* Select expansion interrupt status register */
3933 tg3_writephy(tp, 0x17, 0x0f01);
3934 tg3_readphy(tp, 0x15, &phy2);
3935 if (phy2 & 0x20) {
3936 u32 bmcr;
3937
3938 /* Config code words received, turn on autoneg. */
3939 tg3_readphy(tp, MII_BMCR, &bmcr);
3940 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3941
3942 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3943
3944 }
3945 }
3946}
3947
Linus Torvalds1da177e2005-04-16 15:20:36 -07003948static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3949{
3950 int err;
3951
3952 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3953 err = tg3_setup_fiber_phy(tp, force_reset);
Michael Chan747e8f82005-07-25 12:33:22 -07003954 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3955 err = tg3_setup_fiber_mii_phy(tp, force_reset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003956 } else {
3957 err = tg3_setup_copper_phy(tp, force_reset);
3958 }
3959
Matt Carlsonbcb37f62008-11-03 16:52:09 -08003960 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
Matt Carlsonaa6c91f2007-11-12 21:18:04 -08003961 u32 val, scale;
3962
3963 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3964 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3965 scale = 65;
3966 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3967 scale = 6;
3968 else
3969 scale = 12;
3970
3971 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3972 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3973 tw32(GRC_MISC_CFG, val);
3974 }
3975
Linus Torvalds1da177e2005-04-16 15:20:36 -07003976 if (tp->link_config.active_speed == SPEED_1000 &&
3977 tp->link_config.active_duplex == DUPLEX_HALF)
3978 tw32(MAC_TX_LENGTHS,
3979 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3980 (6 << TX_LENGTHS_IPG_SHIFT) |
3981 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3982 else
3983 tw32(MAC_TX_LENGTHS,
3984 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3985 (6 << TX_LENGTHS_IPG_SHIFT) |
3986 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3987
3988 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3989 if (netif_carrier_ok(tp->dev)) {
3990 tw32(HOSTCC_STAT_COAL_TICKS,
David S. Miller15f98502005-05-18 22:49:26 -07003991 tp->coal.stats_block_coalesce_usecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003992 } else {
3993 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3994 }
3995 }
3996
Matt Carlson8ed5d972007-05-07 00:25:49 -07003997 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3998 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3999 if (!netif_carrier_ok(tp->dev))
4000 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4001 tp->pwrmgmt_thresh;
4002 else
4003 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4004 tw32(PCIE_PWR_MGMT_THRESH, val);
4005 }
4006
Linus Torvalds1da177e2005-04-16 15:20:36 -07004007 return err;
4008}
4009
Michael Chandf3e6542006-05-26 17:48:07 -07004010/* This is called whenever we suspect that the system chipset is re-
4011 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4012 * is bogus tx completions. We try to recover by setting the
4013 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4014 * in the workqueue.
4015 */
4016static void tg3_tx_recover(struct tg3 *tp)
4017{
4018 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4019 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4020
4021 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
4022 "mapped I/O cycles to the network device, attempting to "
4023 "recover. Please report the problem to the driver maintainer "
4024 "and include system chipset information.\n", tp->dev->name);
4025
4026 spin_lock(&tp->lock);
Michael Chandf3e6542006-05-26 17:48:07 -07004027 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
Michael Chandf3e6542006-05-26 17:48:07 -07004028 spin_unlock(&tp->lock);
4029}
4030
Michael Chan1b2a7202006-08-07 21:46:02 -07004031static inline u32 tg3_tx_avail(struct tg3 *tp)
4032{
4033 smp_mb();
4034 return (tp->tx_pending -
4035 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
4036}
4037
Linus Torvalds1da177e2005-04-16 15:20:36 -07004038/* Tigon3 never reports partial packet sends. So we do not
4039 * need special logic to handle SKBs that have not had all
4040 * of their frags sent yet, like SunGEM does.
4041 */
4042static void tg3_tx(struct tg3 *tp)
4043{
4044 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
4045 u32 sw_idx = tp->tx_cons;
4046
4047 while (sw_idx != hw_idx) {
4048 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
4049 struct sk_buff *skb = ri->skb;
Michael Chandf3e6542006-05-26 17:48:07 -07004050 int i, tx_bug = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004051
Michael Chandf3e6542006-05-26 17:48:07 -07004052 if (unlikely(skb == NULL)) {
4053 tg3_tx_recover(tp);
4054 return;
4055 }
4056
David S. Miller90079ce2008-09-11 04:52:51 -07004057 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004058
4059 ri->skb = NULL;
4060
4061 sw_idx = NEXT_TX(sw_idx);
4062
4063 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004064 ri = &tp->tx_buffers[sw_idx];
Michael Chandf3e6542006-05-26 17:48:07 -07004065 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4066 tx_bug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004067 sw_idx = NEXT_TX(sw_idx);
4068 }
4069
David S. Millerf47c11e2005-06-24 20:18:35 -07004070 dev_kfree_skb(skb);
Michael Chandf3e6542006-05-26 17:48:07 -07004071
4072 if (unlikely(tx_bug)) {
4073 tg3_tx_recover(tp);
4074 return;
4075 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004076 }
4077
4078 tp->tx_cons = sw_idx;
4079
Michael Chan1b2a7202006-08-07 21:46:02 -07004080 /* Need to make the tx_cons update visible to tg3_start_xmit()
4081 * before checking for netif_queue_stopped(). Without the
4082 * memory barrier, there is a small possibility that tg3_start_xmit()
4083 * will miss it and cause the queue to be stopped forever.
4084 */
4085 smp_mb();
4086
4087 if (unlikely(netif_queue_stopped(tp->dev) &&
Ranjit Manomohan42952232006-10-18 20:54:26 -07004088 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
Michael Chan1b2a7202006-08-07 21:46:02 -07004089 netif_tx_lock(tp->dev);
Michael Chan51b91462005-09-01 17:41:28 -07004090 if (netif_queue_stopped(tp->dev) &&
Ranjit Manomohan42952232006-10-18 20:54:26 -07004091 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
Michael Chan51b91462005-09-01 17:41:28 -07004092 netif_wake_queue(tp->dev);
Michael Chan1b2a7202006-08-07 21:46:02 -07004093 netif_tx_unlock(tp->dev);
Michael Chan51b91462005-09-01 17:41:28 -07004094 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004095}
4096
4097/* Returns size of skb allocated or < 0 on error.
4098 *
4099 * We only need to fill in the address because the other members
4100 * of the RX descriptor are invariant, see tg3_init_rings.
4101 *
4102 * Note the purposeful assymetry of cpu vs. chip accesses. For
4103 * posting buffers we only dirty the first cache line of the RX
4104 * descriptor (containing the address). Whereas for the RX status
4105 * buffers the cpu only reads the last cacheline of the RX descriptor
4106 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4107 */
4108static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
4109 int src_idx, u32 dest_idx_unmasked)
4110{
4111 struct tg3_rx_buffer_desc *desc;
4112 struct ring_info *map, *src_map;
4113 struct sk_buff *skb;
4114 dma_addr_t mapping;
4115 int skb_size, dest_idx;
4116
4117 src_map = NULL;
4118 switch (opaque_key) {
4119 case RXD_OPAQUE_RING_STD:
4120 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4121 desc = &tp->rx_std[dest_idx];
4122 map = &tp->rx_std_buffers[dest_idx];
4123 if (src_idx >= 0)
4124 src_map = &tp->rx_std_buffers[src_idx];
Michael Chan7e72aad2005-07-25 12:31:17 -07004125 skb_size = tp->rx_pkt_buf_sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004126 break;
4127
4128 case RXD_OPAQUE_RING_JUMBO:
4129 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4130 desc = &tp->rx_jumbo[dest_idx];
4131 map = &tp->rx_jumbo_buffers[dest_idx];
4132 if (src_idx >= 0)
4133 src_map = &tp->rx_jumbo_buffers[src_idx];
4134 skb_size = RX_JUMBO_PKT_BUF_SZ;
4135 break;
4136
4137 default:
4138 return -EINVAL;
Stephen Hemminger855e1112008-04-16 16:37:28 -07004139 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004140
4141 /* Do not overwrite any of the map or rp information
4142 * until we are sure we can commit to a new buffer.
4143 *
4144 * Callers depend upon this behavior and assume that
4145 * we leave everything unchanged if we fail.
4146 */
David S. Millera20e9c62006-07-31 22:38:16 -07004147 skb = netdev_alloc_skb(tp->dev, skb_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004148 if (skb == NULL)
4149 return -ENOMEM;
4150
Linus Torvalds1da177e2005-04-16 15:20:36 -07004151 skb_reserve(skb, tp->rx_offset);
4152
4153 mapping = pci_map_single(tp->pdev, skb->data,
4154 skb_size - tp->rx_offset,
4155 PCI_DMA_FROMDEVICE);
4156
4157 map->skb = skb;
4158 pci_unmap_addr_set(map, mapping, mapping);
4159
4160 if (src_map != NULL)
4161 src_map->skb = NULL;
4162
4163 desc->addr_hi = ((u64)mapping >> 32);
4164 desc->addr_lo = ((u64)mapping & 0xffffffff);
4165
4166 return skb_size;
4167}
4168
4169/* We only need to move over in the address because the other
4170 * members of the RX descriptor are invariant. See notes above
4171 * tg3_alloc_rx_skb for full details.
4172 */
4173static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
4174 int src_idx, u32 dest_idx_unmasked)
4175{
4176 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4177 struct ring_info *src_map, *dest_map;
4178 int dest_idx;
4179
4180 switch (opaque_key) {
4181 case RXD_OPAQUE_RING_STD:
4182 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4183 dest_desc = &tp->rx_std[dest_idx];
4184 dest_map = &tp->rx_std_buffers[dest_idx];
4185 src_desc = &tp->rx_std[src_idx];
4186 src_map = &tp->rx_std_buffers[src_idx];
4187 break;
4188
4189 case RXD_OPAQUE_RING_JUMBO:
4190 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4191 dest_desc = &tp->rx_jumbo[dest_idx];
4192 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4193 src_desc = &tp->rx_jumbo[src_idx];
4194 src_map = &tp->rx_jumbo_buffers[src_idx];
4195 break;
4196
4197 default:
4198 return;
Stephen Hemminger855e1112008-04-16 16:37:28 -07004199 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004200
4201 dest_map->skb = src_map->skb;
4202 pci_unmap_addr_set(dest_map, mapping,
4203 pci_unmap_addr(src_map, mapping));
4204 dest_desc->addr_hi = src_desc->addr_hi;
4205 dest_desc->addr_lo = src_desc->addr_lo;
4206
4207 src_map->skb = NULL;
4208}
4209
4210#if TG3_VLAN_TAG_USED
4211static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4212{
4213 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4214}
4215#endif
4216
4217/* The RX ring scheme is composed of multiple rings which post fresh
4218 * buffers to the chip, and one special ring the chip uses to report
4219 * status back to the host.
4220 *
4221 * The special ring reports the status of received packets to the
4222 * host. The chip does not write into the original descriptor the
4223 * RX buffer was obtained from. The chip simply takes the original
4224 * descriptor as provided by the host, updates the status and length
4225 * field, then writes this into the next status ring entry.
4226 *
4227 * Each ring the host uses to post buffers to the chip is described
4228 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4229 * it is first placed into the on-chip ram. When the packet's length
4230 * is known, it walks down the TG3_BDINFO entries to select the ring.
4231 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4232 * which is within the range of the new packet's length is chosen.
4233 *
4234 * The "separate ring for rx status" scheme may sound queer, but it makes
4235 * sense from a cache coherency perspective. If only the host writes
4236 * to the buffer post rings, and only the chip writes to the rx status
4237 * rings, then cache lines never move beyond shared-modified state.
4238 * If both the host and chip were to write into the same ring, cache line
4239 * eviction could occur since both entities want it in an exclusive state.
4240 */
4241static int tg3_rx(struct tg3 *tp, int budget)
4242{
Michael Chanf92905d2006-06-29 20:14:29 -07004243 u32 work_mask, rx_std_posted = 0;
Michael Chan483ba502005-04-25 15:14:03 -07004244 u32 sw_idx = tp->rx_rcb_ptr;
4245 u16 hw_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004246 int received;
4247
4248 hw_idx = tp->hw_status->idx[0].rx_producer;
4249 /*
4250 * We need to order the read of hw_idx and the read of
4251 * the opaque cookie.
4252 */
4253 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004254 work_mask = 0;
4255 received = 0;
4256 while (sw_idx != hw_idx && budget > 0) {
4257 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4258 unsigned int len;
4259 struct sk_buff *skb;
4260 dma_addr_t dma_addr;
4261 u32 opaque_key, desc_idx, *post_ptr;
4262
4263 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4264 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4265 if (opaque_key == RXD_OPAQUE_RING_STD) {
4266 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4267 mapping);
4268 skb = tp->rx_std_buffers[desc_idx].skb;
4269 post_ptr = &tp->rx_std_ptr;
Michael Chanf92905d2006-06-29 20:14:29 -07004270 rx_std_posted++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004271 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4272 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4273 mapping);
4274 skb = tp->rx_jumbo_buffers[desc_idx].skb;
4275 post_ptr = &tp->rx_jumbo_ptr;
4276 }
4277 else {
4278 goto next_pkt_nopost;
4279 }
4280
4281 work_mask |= opaque_key;
4282
4283 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4284 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4285 drop_it:
4286 tg3_recycle_rx(tp, opaque_key,
4287 desc_idx, *post_ptr);
4288 drop_it_no_recycle:
4289 /* Other statistics kept track of by card. */
4290 tp->net_stats.rx_dropped++;
4291 goto next_pkt;
4292 }
4293
Matt Carlsonad829262008-11-21 17:16:16 -08004294 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4295 ETH_FCS_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004296
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004297 if (len > RX_COPY_THRESHOLD
Matt Carlsonad829262008-11-21 17:16:16 -08004298 && tp->rx_offset == NET_IP_ALIGN
4299 /* rx_offset will likely not equal NET_IP_ALIGN
4300 * if this is a 5701 card running in PCI-X mode
4301 * [see tg3_get_invariants()]
4302 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004303 ) {
4304 int skb_size;
4305
4306 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4307 desc_idx, *post_ptr);
4308 if (skb_size < 0)
4309 goto drop_it;
4310
4311 pci_unmap_single(tp->pdev, dma_addr,
4312 skb_size - tp->rx_offset,
4313 PCI_DMA_FROMDEVICE);
4314
4315 skb_put(skb, len);
4316 } else {
4317 struct sk_buff *copy_skb;
4318
4319 tg3_recycle_rx(tp, opaque_key,
4320 desc_idx, *post_ptr);
4321
Matt Carlsonad829262008-11-21 17:16:16 -08004322 copy_skb = netdev_alloc_skb(tp->dev,
4323 len + TG3_RAW_IP_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004324 if (copy_skb == NULL)
4325 goto drop_it_no_recycle;
4326
Matt Carlsonad829262008-11-21 17:16:16 -08004327 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004328 skb_put(copy_skb, len);
4329 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03004330 skb_copy_from_linear_data(skb, copy_skb->data, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004331 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4332
4333 /* We'll reuse the original ring buffer. */
4334 skb = copy_skb;
4335 }
4336
4337 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4338 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4339 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4340 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4341 skb->ip_summed = CHECKSUM_UNNECESSARY;
4342 else
4343 skb->ip_summed = CHECKSUM_NONE;
4344
4345 skb->protocol = eth_type_trans(skb, tp->dev);
4346#if TG3_VLAN_TAG_USED
4347 if (tp->vlgrp != NULL &&
4348 desc->type_flags & RXD_FLAG_VLAN) {
4349 tg3_vlan_rx(tp, skb,
4350 desc->err_vlan & RXD_VLAN_MASK);
4351 } else
4352#endif
4353 netif_receive_skb(skb);
4354
Linus Torvalds1da177e2005-04-16 15:20:36 -07004355 received++;
4356 budget--;
4357
4358next_pkt:
4359 (*post_ptr)++;
Michael Chanf92905d2006-06-29 20:14:29 -07004360
4361 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4362 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4363
4364 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4365 TG3_64BIT_REG_LOW, idx);
4366 work_mask &= ~RXD_OPAQUE_RING_STD;
4367 rx_std_posted = 0;
4368 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004369next_pkt_nopost:
Michael Chan483ba502005-04-25 15:14:03 -07004370 sw_idx++;
Eric Dumazet6b31a512007-02-06 13:29:21 -08004371 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
Michael Chan52f6d692005-04-25 15:14:32 -07004372
4373 /* Refresh hw_idx to see if there is new work */
4374 if (sw_idx == hw_idx) {
4375 hw_idx = tp->hw_status->idx[0].rx_producer;
4376 rmb();
4377 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004378 }
4379
4380 /* ACK the status ring. */
Michael Chan483ba502005-04-25 15:14:03 -07004381 tp->rx_rcb_ptr = sw_idx;
4382 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004383
4384 /* Refill RX ring(s). */
4385 if (work_mask & RXD_OPAQUE_RING_STD) {
4386 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4387 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4388 sw_idx);
4389 }
4390 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4391 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4392 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4393 sw_idx);
4394 }
4395 mmiowb();
4396
4397 return received;
4398}
4399
David S. Miller6f535762007-10-11 18:08:29 -07004400static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004401{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004402 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004403
Linus Torvalds1da177e2005-04-16 15:20:36 -07004404 /* handle link change and other phy events */
4405 if (!(tp->tg3_flags &
4406 (TG3_FLAG_USE_LINKCHG_REG |
4407 TG3_FLAG_POLL_SERDES))) {
4408 if (sblk->status & SD_STATUS_LINK_CHG) {
4409 sblk->status = SD_STATUS_UPDATED |
4410 (sblk->status & ~SD_STATUS_LINK_CHG);
David S. Millerf47c11e2005-06-24 20:18:35 -07004411 spin_lock(&tp->lock);
Matt Carlsondd477002008-05-25 23:45:58 -07004412 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4413 tw32_f(MAC_STATUS,
4414 (MAC_STATUS_SYNC_CHANGED |
4415 MAC_STATUS_CFG_CHANGED |
4416 MAC_STATUS_MI_COMPLETION |
4417 MAC_STATUS_LNKSTATE_CHANGED));
4418 udelay(40);
4419 } else
4420 tg3_setup_phy(tp, 0);
David S. Millerf47c11e2005-06-24 20:18:35 -07004421 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004422 }
4423 }
4424
4425 /* run TX completion thread */
4426 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004427 tg3_tx(tp);
David S. Miller6f535762007-10-11 18:08:29 -07004428 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
Michael Chan4fd7ab52007-10-12 01:39:50 -07004429 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004430 }
4431
Linus Torvalds1da177e2005-04-16 15:20:36 -07004432 /* run RX thread, within the bounds set by NAPI.
4433 * All RX "locking" is done by ensuring outside
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004434 * code synchronizes with tg3->napi.poll()
Linus Torvalds1da177e2005-04-16 15:20:36 -07004435 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004436 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
David S. Miller6f535762007-10-11 18:08:29 -07004437 work_done += tg3_rx(tp, budget - work_done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004438
David S. Miller6f535762007-10-11 18:08:29 -07004439 return work_done;
4440}
David S. Millerf7383c22005-05-18 22:50:53 -07004441
David S. Miller6f535762007-10-11 18:08:29 -07004442static int tg3_poll(struct napi_struct *napi, int budget)
4443{
4444 struct tg3 *tp = container_of(napi, struct tg3, napi);
4445 int work_done = 0;
Michael Chan4fd7ab52007-10-12 01:39:50 -07004446 struct tg3_hw_status *sblk = tp->hw_status;
David S. Miller6f535762007-10-11 18:08:29 -07004447
4448 while (1) {
4449 work_done = tg3_poll_work(tp, work_done, budget);
4450
4451 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4452 goto tx_recovery;
4453
4454 if (unlikely(work_done >= budget))
4455 break;
4456
Michael Chan4fd7ab52007-10-12 01:39:50 -07004457 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4458 /* tp->last_tag is used in tg3_restart_ints() below
4459 * to tell the hw how much work has been processed,
4460 * so we must read it before checking for more work.
4461 */
4462 tp->last_tag = sblk->status_tag;
4463 rmb();
4464 } else
4465 sblk->status &= ~SD_STATUS_UPDATED;
4466
David S. Miller6f535762007-10-11 18:08:29 -07004467 if (likely(!tg3_has_work(tp))) {
David S. Miller6f535762007-10-11 18:08:29 -07004468 netif_rx_complete(tp->dev, napi);
4469 tg3_restart_ints(tp);
4470 break;
4471 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004472 }
4473
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004474 return work_done;
David S. Miller6f535762007-10-11 18:08:29 -07004475
4476tx_recovery:
Michael Chan4fd7ab52007-10-12 01:39:50 -07004477 /* work_done is guaranteed to be less than budget. */
David S. Miller6f535762007-10-11 18:08:29 -07004478 netif_rx_complete(tp->dev, napi);
4479 schedule_work(&tp->reset_task);
Michael Chan4fd7ab52007-10-12 01:39:50 -07004480 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004481}
4482
David S. Millerf47c11e2005-06-24 20:18:35 -07004483static void tg3_irq_quiesce(struct tg3 *tp)
4484{
4485 BUG_ON(tp->irq_sync);
4486
4487 tp->irq_sync = 1;
4488 smp_mb();
4489
4490 synchronize_irq(tp->pdev->irq);
4491}
4492
4493static inline int tg3_irq_sync(struct tg3 *tp)
4494{
4495 return tp->irq_sync;
4496}
4497
4498/* Fully shutdown all tg3 driver activity elsewhere in the system.
4499 * If irq_sync is non-zero, then the IRQ handler must be synchronized
4500 * with as well. Most of the time, this is not necessary except when
4501 * shutting down the device.
4502 */
4503static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4504{
Michael Chan46966542007-07-11 19:47:19 -07004505 spin_lock_bh(&tp->lock);
David S. Millerf47c11e2005-06-24 20:18:35 -07004506 if (irq_sync)
4507 tg3_irq_quiesce(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07004508}
4509
4510static inline void tg3_full_unlock(struct tg3 *tp)
4511{
David S. Millerf47c11e2005-06-24 20:18:35 -07004512 spin_unlock_bh(&tp->lock);
4513}
4514
Michael Chanfcfa0a32006-03-20 22:28:41 -08004515/* One-shot MSI handler - Chip automatically disables interrupt
4516 * after sending MSI so driver doesn't have to do it.
4517 */
David Howells7d12e782006-10-05 14:55:46 +01004518static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
Michael Chanfcfa0a32006-03-20 22:28:41 -08004519{
4520 struct net_device *dev = dev_id;
4521 struct tg3 *tp = netdev_priv(dev);
4522
4523 prefetch(tp->hw_status);
4524 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4525
4526 if (likely(!tg3_irq_sync(tp)))
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004527 netif_rx_schedule(dev, &tp->napi);
Michael Chanfcfa0a32006-03-20 22:28:41 -08004528
4529 return IRQ_HANDLED;
4530}
4531
Michael Chan88b06bc2005-04-21 17:13:25 -07004532/* MSI ISR - No need to check for interrupt sharing and no need to
4533 * flush status block and interrupt mailbox. PCI ordering rules
4534 * guarantee that MSI will arrive after the status block.
4535 */
David Howells7d12e782006-10-05 14:55:46 +01004536static irqreturn_t tg3_msi(int irq, void *dev_id)
Michael Chan88b06bc2005-04-21 17:13:25 -07004537{
4538 struct net_device *dev = dev_id;
4539 struct tg3 *tp = netdev_priv(dev);
Michael Chan88b06bc2005-04-21 17:13:25 -07004540
Michael Chan61487482005-09-05 17:53:19 -07004541 prefetch(tp->hw_status);
4542 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Michael Chan88b06bc2005-04-21 17:13:25 -07004543 /*
David S. Millerfac9b832005-05-18 22:46:34 -07004544 * Writing any value to intr-mbox-0 clears PCI INTA# and
Michael Chan88b06bc2005-04-21 17:13:25 -07004545 * chip-internal interrupt pending events.
David S. Millerfac9b832005-05-18 22:46:34 -07004546 * Writing non-zero to intr-mbox-0 additional tells the
Michael Chan88b06bc2005-04-21 17:13:25 -07004547 * NIC to stop sending us irqs, engaging "in-intr-handler"
4548 * event coalescing.
4549 */
4550 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chan61487482005-09-05 17:53:19 -07004551 if (likely(!tg3_irq_sync(tp)))
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004552 netif_rx_schedule(dev, &tp->napi);
Michael Chan61487482005-09-05 17:53:19 -07004553
Michael Chan88b06bc2005-04-21 17:13:25 -07004554 return IRQ_RETVAL(1);
4555}
4556
David Howells7d12e782006-10-05 14:55:46 +01004557static irqreturn_t tg3_interrupt(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004558{
4559 struct net_device *dev = dev_id;
4560 struct tg3 *tp = netdev_priv(dev);
4561 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004562 unsigned int handled = 1;
4563
Linus Torvalds1da177e2005-04-16 15:20:36 -07004564 /* In INTx mode, it is possible for the interrupt to arrive at
4565 * the CPU before the status block posted prior to the interrupt.
4566 * Reading the PCI State register will confirm whether the
4567 * interrupt is ours and will flush the status block.
4568 */
Michael Chand18edcb2007-03-24 20:57:11 -07004569 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4570 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4571 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4572 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07004573 goto out;
David S. Millerfac9b832005-05-18 22:46:34 -07004574 }
Michael Chand18edcb2007-03-24 20:57:11 -07004575 }
4576
4577 /*
4578 * Writing any value to intr-mbox-0 clears PCI INTA# and
4579 * chip-internal interrupt pending events.
4580 * Writing non-zero to intr-mbox-0 additional tells the
4581 * NIC to stop sending us irqs, engaging "in-intr-handler"
4582 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07004583 *
4584 * Flush the mailbox to de-assert the IRQ immediately to prevent
4585 * spurious interrupts. The flush impacts performance but
4586 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07004587 */
Michael Chanc04cb342007-05-07 00:26:15 -07004588 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chand18edcb2007-03-24 20:57:11 -07004589 if (tg3_irq_sync(tp))
4590 goto out;
4591 sblk->status &= ~SD_STATUS_UPDATED;
4592 if (likely(tg3_has_work(tp))) {
4593 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004594 netif_rx_schedule(dev, &tp->napi);
Michael Chand18edcb2007-03-24 20:57:11 -07004595 } else {
4596 /* No work, shared interrupt perhaps? re-enable
4597 * interrupts, and flush that PCI write
4598 */
4599 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4600 0x00000000);
David S. Millerfac9b832005-05-18 22:46:34 -07004601 }
David S. Millerf47c11e2005-06-24 20:18:35 -07004602out:
David S. Millerfac9b832005-05-18 22:46:34 -07004603 return IRQ_RETVAL(handled);
4604}
4605
David Howells7d12e782006-10-05 14:55:46 +01004606static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
David S. Millerfac9b832005-05-18 22:46:34 -07004607{
4608 struct net_device *dev = dev_id;
4609 struct tg3 *tp = netdev_priv(dev);
4610 struct tg3_hw_status *sblk = tp->hw_status;
David S. Millerfac9b832005-05-18 22:46:34 -07004611 unsigned int handled = 1;
4612
David S. Millerfac9b832005-05-18 22:46:34 -07004613 /* In INTx mode, it is possible for the interrupt to arrive at
4614 * the CPU before the status block posted prior to the interrupt.
4615 * Reading the PCI State register will confirm whether the
4616 * interrupt is ours and will flush the status block.
4617 */
Michael Chand18edcb2007-03-24 20:57:11 -07004618 if (unlikely(sblk->status_tag == tp->last_tag)) {
4619 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4620 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4621 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07004622 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004623 }
Michael Chand18edcb2007-03-24 20:57:11 -07004624 }
4625
4626 /*
4627 * writing any value to intr-mbox-0 clears PCI INTA# and
4628 * chip-internal interrupt pending events.
4629 * writing non-zero to intr-mbox-0 additional tells the
4630 * NIC to stop sending us irqs, engaging "in-intr-handler"
4631 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07004632 *
4633 * Flush the mailbox to de-assert the IRQ immediately to prevent
4634 * spurious interrupts. The flush impacts performance but
4635 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07004636 */
Michael Chanc04cb342007-05-07 00:26:15 -07004637 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chand18edcb2007-03-24 20:57:11 -07004638 if (tg3_irq_sync(tp))
4639 goto out;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004640 if (netif_rx_schedule_prep(dev, &tp->napi)) {
Michael Chand18edcb2007-03-24 20:57:11 -07004641 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4642 /* Update last_tag to mark that this status has been
4643 * seen. Because interrupt may be shared, we may be
4644 * racing with tg3_poll(), so only update last_tag
4645 * if tg3_poll() is not scheduled.
4646 */
4647 tp->last_tag = sblk->status_tag;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004648 __netif_rx_schedule(dev, &tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004649 }
David S. Millerf47c11e2005-06-24 20:18:35 -07004650out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004651 return IRQ_RETVAL(handled);
4652}
4653
Michael Chan79381092005-04-21 17:13:59 -07004654/* ISR for interrupt test */
David Howells7d12e782006-10-05 14:55:46 +01004655static irqreturn_t tg3_test_isr(int irq, void *dev_id)
Michael Chan79381092005-04-21 17:13:59 -07004656{
4657 struct net_device *dev = dev_id;
4658 struct tg3 *tp = netdev_priv(dev);
4659 struct tg3_hw_status *sblk = tp->hw_status;
4660
Michael Chanf9804dd2005-09-27 12:13:10 -07004661 if ((sblk->status & SD_STATUS_UPDATED) ||
4662 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
Michael Chanb16250e2006-09-27 16:10:14 -07004663 tg3_disable_ints(tp);
Michael Chan79381092005-04-21 17:13:59 -07004664 return IRQ_RETVAL(1);
4665 }
4666 return IRQ_RETVAL(0);
4667}
4668
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07004669static int tg3_init_hw(struct tg3 *, int);
Michael Chan944d9802005-05-29 14:57:48 -07004670static int tg3_halt(struct tg3 *, int, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004671
Michael Chanb9ec6c12006-07-25 16:37:27 -07004672/* Restart hardware after configuration changes, self-test, etc.
4673 * Invoked with tp->lock held.
4674 */
4675static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
Eric Dumazet78c61462008-04-24 23:33:06 -07004676 __releases(tp->lock)
4677 __acquires(tp->lock)
Michael Chanb9ec6c12006-07-25 16:37:27 -07004678{
4679 int err;
4680
4681 err = tg3_init_hw(tp, reset_phy);
4682 if (err) {
4683 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4684 "aborting.\n", tp->dev->name);
4685 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4686 tg3_full_unlock(tp);
4687 del_timer_sync(&tp->timer);
4688 tp->irq_sync = 0;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004689 napi_enable(&tp->napi);
Michael Chanb9ec6c12006-07-25 16:37:27 -07004690 dev_close(tp->dev);
4691 tg3_full_lock(tp, 0);
4692 }
4693 return err;
4694}
4695
Linus Torvalds1da177e2005-04-16 15:20:36 -07004696#ifdef CONFIG_NET_POLL_CONTROLLER
4697static void tg3_poll_controller(struct net_device *dev)
4698{
Michael Chan88b06bc2005-04-21 17:13:25 -07004699 struct tg3 *tp = netdev_priv(dev);
4700
David Howells7d12e782006-10-05 14:55:46 +01004701 tg3_interrupt(tp->pdev->irq, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004702}
4703#endif
4704
David Howellsc4028952006-11-22 14:57:56 +00004705static void tg3_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004706{
David Howellsc4028952006-11-22 14:57:56 +00004707 struct tg3 *tp = container_of(work, struct tg3, reset_task);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004708 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004709 unsigned int restart_timer;
4710
Michael Chan7faa0062006-02-02 17:29:28 -08004711 tg3_full_lock(tp, 0);
Michael Chan7faa0062006-02-02 17:29:28 -08004712
4713 if (!netif_running(tp->dev)) {
Michael Chan7faa0062006-02-02 17:29:28 -08004714 tg3_full_unlock(tp);
4715 return;
4716 }
4717
4718 tg3_full_unlock(tp);
4719
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004720 tg3_phy_stop(tp);
4721
Linus Torvalds1da177e2005-04-16 15:20:36 -07004722 tg3_netif_stop(tp);
4723
David S. Millerf47c11e2005-06-24 20:18:35 -07004724 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004725
4726 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4727 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4728
Michael Chandf3e6542006-05-26 17:48:07 -07004729 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4730 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4731 tp->write32_rx_mbox = tg3_write_flush_reg32;
4732 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4733 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4734 }
4735
Michael Chan944d9802005-05-29 14:57:48 -07004736 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004737 err = tg3_init_hw(tp, 1);
4738 if (err)
Michael Chanb9ec6c12006-07-25 16:37:27 -07004739 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004740
4741 tg3_netif_start(tp);
4742
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743 if (restart_timer)
4744 mod_timer(&tp->timer, jiffies + 1);
Michael Chan7faa0062006-02-02 17:29:28 -08004745
Michael Chanb9ec6c12006-07-25 16:37:27 -07004746out:
Michael Chan7faa0062006-02-02 17:29:28 -08004747 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004748
4749 if (!err)
4750 tg3_phy_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004751}
4752
Michael Chanb0408752007-02-13 12:18:30 -08004753static void tg3_dump_short_state(struct tg3 *tp)
4754{
4755 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4756 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4757 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4758 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4759}
4760
Linus Torvalds1da177e2005-04-16 15:20:36 -07004761static void tg3_tx_timeout(struct net_device *dev)
4762{
4763 struct tg3 *tp = netdev_priv(dev);
4764
Michael Chanb0408752007-02-13 12:18:30 -08004765 if (netif_msg_tx_err(tp)) {
Michael Chan9f88f292006-12-07 00:22:54 -08004766 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4767 dev->name);
Michael Chanb0408752007-02-13 12:18:30 -08004768 tg3_dump_short_state(tp);
4769 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004770
4771 schedule_work(&tp->reset_task);
4772}
4773
Michael Chanc58ec932005-09-17 00:46:27 -07004774/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4775static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4776{
4777 u32 base = (u32) mapping & 0xffffffff;
4778
4779 return ((base > 0xffffdcc0) &&
4780 (base + len + 8 < base));
4781}
4782
Michael Chan72f2afb2006-03-06 19:28:35 -08004783/* Test for DMA addresses > 40-bit */
4784static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4785 int len)
4786{
4787#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
Michael Chan6728a8e2006-03-27 23:16:49 -08004788 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
Michael Chan72f2afb2006-03-06 19:28:35 -08004789 return (((u64) mapping + len) > DMA_40BIT_MASK);
4790 return 0;
4791#else
4792 return 0;
4793#endif
4794}
4795
Linus Torvalds1da177e2005-04-16 15:20:36 -07004796static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4797
Michael Chan72f2afb2006-03-06 19:28:35 -08004798/* Workaround 4GB and 40-bit hardware DMA bugs. */
4799static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
Michael Chanc58ec932005-09-17 00:46:27 -07004800 u32 last_plus_one, u32 *start,
4801 u32 base_flags, u32 mss)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004802{
Matt Carlson41588ba2008-04-19 18:12:33 -07004803 struct sk_buff *new_skb;
Michael Chanc58ec932005-09-17 00:46:27 -07004804 dma_addr_t new_addr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004805 u32 entry = *start;
Michael Chanc58ec932005-09-17 00:46:27 -07004806 int i, ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004807
Matt Carlson41588ba2008-04-19 18:12:33 -07004808 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4809 new_skb = skb_copy(skb, GFP_ATOMIC);
4810 else {
4811 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4812
4813 new_skb = skb_copy_expand(skb,
4814 skb_headroom(skb) + more_headroom,
4815 skb_tailroom(skb), GFP_ATOMIC);
4816 }
4817
Linus Torvalds1da177e2005-04-16 15:20:36 -07004818 if (!new_skb) {
Michael Chanc58ec932005-09-17 00:46:27 -07004819 ret = -1;
4820 } else {
4821 /* New SKB is guaranteed to be linear. */
4822 entry = *start;
David S. Miller90079ce2008-09-11 04:52:51 -07004823 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4824 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4825
Michael Chanc58ec932005-09-17 00:46:27 -07004826 /* Make sure new skb does not cross any 4G boundaries.
4827 * Drop the packet if it does.
4828 */
David S. Miller90079ce2008-09-11 04:52:51 -07004829 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
David S. Miller638266f2008-09-11 15:45:19 -07004830 if (!ret)
4831 skb_dma_unmap(&tp->pdev->dev, new_skb,
4832 DMA_TO_DEVICE);
Michael Chanc58ec932005-09-17 00:46:27 -07004833 ret = -1;
4834 dev_kfree_skb(new_skb);
4835 new_skb = NULL;
4836 } else {
4837 tg3_set_txd(tp, entry, new_addr, new_skb->len,
4838 base_flags, 1 | (mss << 1));
4839 *start = NEXT_TX(entry);
4840 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004841 }
4842
Linus Torvalds1da177e2005-04-16 15:20:36 -07004843 /* Now clean up the sw ring entries. */
4844 i = 0;
4845 while (entry != last_plus_one) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004846 if (i == 0) {
4847 tp->tx_buffers[entry].skb = new_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004848 } else {
4849 tp->tx_buffers[entry].skb = NULL;
4850 }
4851 entry = NEXT_TX(entry);
4852 i++;
4853 }
4854
David S. Miller90079ce2008-09-11 04:52:51 -07004855 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004856 dev_kfree_skb(skb);
4857
Michael Chanc58ec932005-09-17 00:46:27 -07004858 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004859}
4860
4861static void tg3_set_txd(struct tg3 *tp, int entry,
4862 dma_addr_t mapping, int len, u32 flags,
4863 u32 mss_and_is_end)
4864{
4865 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4866 int is_end = (mss_and_is_end & 0x1);
4867 u32 mss = (mss_and_is_end >> 1);
4868 u32 vlan_tag = 0;
4869
4870 if (is_end)
4871 flags |= TXD_FLAG_END;
4872 if (flags & TXD_FLAG_VLAN) {
4873 vlan_tag = flags >> 16;
4874 flags &= 0xffff;
4875 }
4876 vlan_tag |= (mss << TXD_MSS_SHIFT);
4877
4878 txd->addr_hi = ((u64) mapping >> 32);
4879 txd->addr_lo = ((u64) mapping & 0xffffffff);
4880 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4881 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4882}
4883
Michael Chan5a6f3072006-03-20 22:28:05 -08004884/* hard_start_xmit for devices that don't have any bugs and
4885 * support TG3_FLG2_HW_TSO_2 only.
4886 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004887static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4888{
4889 struct tg3 *tp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004890 u32 len, entry, base_flags, mss;
David S. Miller90079ce2008-09-11 04:52:51 -07004891 struct skb_shared_info *sp;
4892 dma_addr_t mapping;
Michael Chan5a6f3072006-03-20 22:28:05 -08004893
4894 len = skb_headlen(skb);
4895
Michael Chan00b70502006-06-17 21:58:45 -07004896 /* We are running in BH disabled context with netif_tx_lock
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004897 * and TX reclaim runs via tp->napi.poll inside of a software
Michael Chan5a6f3072006-03-20 22:28:05 -08004898 * interrupt. Furthermore, IRQ processing runs lockless so we have
4899 * no IRQ context deadlocks to worry about either. Rejoice!
4900 */
Michael Chan1b2a7202006-08-07 21:46:02 -07004901 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004902 if (!netif_queue_stopped(dev)) {
4903 netif_stop_queue(dev);
4904
4905 /* This is a hard error, log it. */
4906 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4907 "queue awake!\n", dev->name);
4908 }
Michael Chan5a6f3072006-03-20 22:28:05 -08004909 return NETDEV_TX_BUSY;
4910 }
4911
4912 entry = tp->tx_prod;
4913 base_flags = 0;
Michael Chan5a6f3072006-03-20 22:28:05 -08004914 mss = 0;
Matt Carlsonc13e3712007-05-05 11:50:04 -07004915 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004916 int tcp_opt_len, ip_tcp_len;
4917
4918 if (skb_header_cloned(skb) &&
4919 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4920 dev_kfree_skb(skb);
4921 goto out_unlock;
4922 }
4923
Michael Chanb0026622006-07-03 19:42:14 -07004924 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4925 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4926 else {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004927 struct iphdr *iph = ip_hdr(skb);
4928
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004929 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -03004930 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
Michael Chanb0026622006-07-03 19:42:14 -07004931
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004932 iph->check = 0;
4933 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
Michael Chanb0026622006-07-03 19:42:14 -07004934 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4935 }
Michael Chan5a6f3072006-03-20 22:28:05 -08004936
4937 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4938 TXD_FLAG_CPU_POST_DMA);
4939
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004940 tcp_hdr(skb)->check = 0;
Michael Chan5a6f3072006-03-20 22:28:05 -08004941
Michael Chan5a6f3072006-03-20 22:28:05 -08004942 }
Patrick McHardy84fa7932006-08-29 16:44:56 -07004943 else if (skb->ip_summed == CHECKSUM_PARTIAL)
Michael Chan5a6f3072006-03-20 22:28:05 -08004944 base_flags |= TXD_FLAG_TCPUDP_CSUM;
Michael Chan5a6f3072006-03-20 22:28:05 -08004945#if TG3_VLAN_TAG_USED
4946 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4947 base_flags |= (TXD_FLAG_VLAN |
4948 (vlan_tx_tag_get(skb) << 16));
4949#endif
4950
David S. Miller90079ce2008-09-11 04:52:51 -07004951 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4952 dev_kfree_skb(skb);
4953 goto out_unlock;
4954 }
4955
4956 sp = skb_shinfo(skb);
4957
4958 mapping = sp->dma_maps[0];
Michael Chan5a6f3072006-03-20 22:28:05 -08004959
4960 tp->tx_buffers[entry].skb = skb;
Michael Chan5a6f3072006-03-20 22:28:05 -08004961
4962 tg3_set_txd(tp, entry, mapping, len, base_flags,
4963 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4964
4965 entry = NEXT_TX(entry);
4966
4967 /* Now loop through additional data fragments, and queue them. */
4968 if (skb_shinfo(skb)->nr_frags > 0) {
4969 unsigned int i, last;
4970
4971 last = skb_shinfo(skb)->nr_frags - 1;
4972 for (i = 0; i <= last; i++) {
4973 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4974
4975 len = frag->size;
David S. Miller90079ce2008-09-11 04:52:51 -07004976 mapping = sp->dma_maps[i + 1];
Michael Chan5a6f3072006-03-20 22:28:05 -08004977 tp->tx_buffers[entry].skb = NULL;
Michael Chan5a6f3072006-03-20 22:28:05 -08004978
4979 tg3_set_txd(tp, entry, mapping, len,
4980 base_flags, (i == last) | (mss << 1));
4981
4982 entry = NEXT_TX(entry);
4983 }
4984 }
4985
4986 /* Packets are ready, update Tx producer idx local and on card. */
4987 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4988
4989 tp->tx_prod = entry;
Michael Chan1b2a7202006-08-07 21:46:02 -07004990 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004991 netif_stop_queue(dev);
Ranjit Manomohan42952232006-10-18 20:54:26 -07004992 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
Michael Chan5a6f3072006-03-20 22:28:05 -08004993 netif_wake_queue(tp->dev);
4994 }
4995
4996out_unlock:
4997 mmiowb();
Michael Chan5a6f3072006-03-20 22:28:05 -08004998
4999 dev->trans_start = jiffies;
5000
5001 return NETDEV_TX_OK;
5002}
5003
Michael Chan52c0fd82006-06-29 20:15:54 -07005004static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
5005
5006/* Use GSO to workaround a rare TSO bug that may be triggered when the
5007 * TSO header is greater than 80 bytes.
5008 */
5009static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5010{
5011 struct sk_buff *segs, *nskb;
5012
5013 /* Estimate the number of fragments in the worst case */
Michael Chan1b2a7202006-08-07 21:46:02 -07005014 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
Michael Chan52c0fd82006-06-29 20:15:54 -07005015 netif_stop_queue(tp->dev);
Michael Chan7f62ad52007-02-20 23:25:40 -08005016 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
5017 return NETDEV_TX_BUSY;
5018
5019 netif_wake_queue(tp->dev);
Michael Chan52c0fd82006-06-29 20:15:54 -07005020 }
5021
5022 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07005023 if (IS_ERR(segs))
Michael Chan52c0fd82006-06-29 20:15:54 -07005024 goto tg3_tso_bug_end;
5025
5026 do {
5027 nskb = segs;
5028 segs = segs->next;
5029 nskb->next = NULL;
5030 tg3_start_xmit_dma_bug(nskb, tp->dev);
5031 } while (segs);
5032
5033tg3_tso_bug_end:
5034 dev_kfree_skb(skb);
5035
5036 return NETDEV_TX_OK;
5037}
Michael Chan52c0fd82006-06-29 20:15:54 -07005038
Michael Chan5a6f3072006-03-20 22:28:05 -08005039/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5040 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5041 */
5042static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5043{
5044 struct tg3 *tp = netdev_priv(dev);
Michael Chan5a6f3072006-03-20 22:28:05 -08005045 u32 len, entry, base_flags, mss;
David S. Miller90079ce2008-09-11 04:52:51 -07005046 struct skb_shared_info *sp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005047 int would_hit_hwbug;
David S. Miller90079ce2008-09-11 04:52:51 -07005048 dma_addr_t mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005049
5050 len = skb_headlen(skb);
5051
Michael Chan00b70502006-06-17 21:58:45 -07005052 /* We are running in BH disabled context with netif_tx_lock
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005053 * and TX reclaim runs via tp->napi.poll inside of a software
David S. Millerf47c11e2005-06-24 20:18:35 -07005054 * interrupt. Furthermore, IRQ processing runs lockless so we have
5055 * no IRQ context deadlocks to worry about either. Rejoice!
Linus Torvalds1da177e2005-04-16 15:20:36 -07005056 */
Michael Chan1b2a7202006-08-07 21:46:02 -07005057 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
Stephen Hemminger1f064a82005-12-06 17:36:44 -08005058 if (!netif_queue_stopped(dev)) {
5059 netif_stop_queue(dev);
5060
5061 /* This is a hard error, log it. */
5062 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5063 "queue awake!\n", dev->name);
5064 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005065 return NETDEV_TX_BUSY;
5066 }
5067
5068 entry = tp->tx_prod;
5069 base_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07005070 if (skb->ip_summed == CHECKSUM_PARTIAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005071 base_flags |= TXD_FLAG_TCPUDP_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005072 mss = 0;
Matt Carlsonc13e3712007-05-05 11:50:04 -07005073 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005074 struct iphdr *iph;
Michael Chan52c0fd82006-06-29 20:15:54 -07005075 int tcp_opt_len, ip_tcp_len, hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005076
5077 if (skb_header_cloned(skb) &&
5078 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5079 dev_kfree_skb(skb);
5080 goto out_unlock;
5081 }
5082
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07005083 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -03005084 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005085
Michael Chan52c0fd82006-06-29 20:15:54 -07005086 hdr_len = ip_tcp_len + tcp_opt_len;
5087 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
Michael Chan7f62ad52007-02-20 23:25:40 -08005088 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
Michael Chan52c0fd82006-06-29 20:15:54 -07005089 return (tg3_tso_bug(tp, skb));
5090
Linus Torvalds1da177e2005-04-16 15:20:36 -07005091 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5092 TXD_FLAG_CPU_POST_DMA);
5093
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005094 iph = ip_hdr(skb);
5095 iph->check = 0;
5096 iph->tot_len = htons(mss + hdr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005097 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07005098 tcp_hdr(skb)->check = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005099 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07005100 } else
5101 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5102 iph->daddr, 0,
5103 IPPROTO_TCP,
5104 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005105
5106 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
5107 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005108 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005109 int tsflags;
5110
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005111 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005112 mss |= (tsflags << 11);
5113 }
5114 } else {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005115 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005116 int tsflags;
5117
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005118 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005119 base_flags |= tsflags << 12;
5120 }
5121 }
5122 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005123#if TG3_VLAN_TAG_USED
5124 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5125 base_flags |= (TXD_FLAG_VLAN |
5126 (vlan_tx_tag_get(skb) << 16));
5127#endif
5128
David S. Miller90079ce2008-09-11 04:52:51 -07005129 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5130 dev_kfree_skb(skb);
5131 goto out_unlock;
5132 }
5133
5134 sp = skb_shinfo(skb);
5135
5136 mapping = sp->dma_maps[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -07005137
5138 tp->tx_buffers[entry].skb = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005139
5140 would_hit_hwbug = 0;
5141
Matt Carlson41588ba2008-04-19 18:12:33 -07005142 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5143 would_hit_hwbug = 1;
5144 else if (tg3_4g_overflow_test(mapping, len))
Michael Chanc58ec932005-09-17 00:46:27 -07005145 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005146
5147 tg3_set_txd(tp, entry, mapping, len, base_flags,
5148 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5149
5150 entry = NEXT_TX(entry);
5151
5152 /* Now loop through additional data fragments, and queue them. */
5153 if (skb_shinfo(skb)->nr_frags > 0) {
5154 unsigned int i, last;
5155
5156 last = skb_shinfo(skb)->nr_frags - 1;
5157 for (i = 0; i <= last; i++) {
5158 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5159
5160 len = frag->size;
David S. Miller90079ce2008-09-11 04:52:51 -07005161 mapping = sp->dma_maps[i + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07005162
5163 tp->tx_buffers[entry].skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005164
Michael Chanc58ec932005-09-17 00:46:27 -07005165 if (tg3_4g_overflow_test(mapping, len))
5166 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005167
Michael Chan72f2afb2006-03-06 19:28:35 -08005168 if (tg3_40bit_overflow_test(tp, mapping, len))
5169 would_hit_hwbug = 1;
5170
Linus Torvalds1da177e2005-04-16 15:20:36 -07005171 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5172 tg3_set_txd(tp, entry, mapping, len,
5173 base_flags, (i == last)|(mss << 1));
5174 else
5175 tg3_set_txd(tp, entry, mapping, len,
5176 base_flags, (i == last));
5177
5178 entry = NEXT_TX(entry);
5179 }
5180 }
5181
5182 if (would_hit_hwbug) {
5183 u32 last_plus_one = entry;
5184 u32 start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005185
Michael Chanc58ec932005-09-17 00:46:27 -07005186 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5187 start &= (TG3_TX_RING_SIZE - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005188
5189 /* If the workaround fails due to memory/mapping
5190 * failure, silently drop this packet.
5191 */
Michael Chan72f2afb2006-03-06 19:28:35 -08005192 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
Michael Chanc58ec932005-09-17 00:46:27 -07005193 &start, base_flags, mss))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005194 goto out_unlock;
5195
5196 entry = start;
5197 }
5198
5199 /* Packets are ready, update Tx producer idx local and on card. */
5200 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5201
5202 tp->tx_prod = entry;
Michael Chan1b2a7202006-08-07 21:46:02 -07005203 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005204 netif_stop_queue(dev);
Ranjit Manomohan42952232006-10-18 20:54:26 -07005205 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
Michael Chan51b91462005-09-01 17:41:28 -07005206 netif_wake_queue(tp->dev);
5207 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005208
5209out_unlock:
5210 mmiowb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005211
5212 dev->trans_start = jiffies;
5213
5214 return NETDEV_TX_OK;
5215}
5216
5217static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5218 int new_mtu)
5219{
5220 dev->mtu = new_mtu;
5221
Michael Chanef7f5ec2005-07-25 12:32:25 -07005222 if (new_mtu > ETH_DATA_LEN) {
Michael Chana4e2b342005-10-26 15:46:52 -07005223 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chanef7f5ec2005-07-25 12:32:25 -07005224 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5225 ethtool_op_set_tso(dev, 0);
5226 }
5227 else
5228 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5229 } else {
Michael Chana4e2b342005-10-26 15:46:52 -07005230 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chanef7f5ec2005-07-25 12:32:25 -07005231 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
Michael Chan0f893dc2005-07-25 12:30:38 -07005232 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
Michael Chanef7f5ec2005-07-25 12:32:25 -07005233 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005234}
5235
5236static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5237{
5238 struct tg3 *tp = netdev_priv(dev);
Michael Chanb9ec6c12006-07-25 16:37:27 -07005239 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005240
5241 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5242 return -EINVAL;
5243
5244 if (!netif_running(dev)) {
5245 /* We'll just catch it later when the
5246 * device is up'd.
5247 */
5248 tg3_set_mtu(dev, tp, new_mtu);
5249 return 0;
5250 }
5251
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07005252 tg3_phy_stop(tp);
5253
Linus Torvalds1da177e2005-04-16 15:20:36 -07005254 tg3_netif_stop(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07005255
5256 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005257
Michael Chan944d9802005-05-29 14:57:48 -07005258 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005259
5260 tg3_set_mtu(dev, tp, new_mtu);
5261
Michael Chanb9ec6c12006-07-25 16:37:27 -07005262 err = tg3_restart_hw(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005263
Michael Chanb9ec6c12006-07-25 16:37:27 -07005264 if (!err)
5265 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005266
David S. Millerf47c11e2005-06-24 20:18:35 -07005267 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005268
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07005269 if (!err)
5270 tg3_phy_start(tp);
5271
Michael Chanb9ec6c12006-07-25 16:37:27 -07005272 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005273}
5274
5275/* Free up pending packets in all rx/tx rings.
5276 *
5277 * The chip has been shut down and the driver detached from
5278 * the networking, so no interrupts or new tx packets will
5279 * end up in the driver. tp->{tx,}lock is not held and we are not
5280 * in an interrupt context and thus may sleep.
5281 */
5282static void tg3_free_rings(struct tg3 *tp)
5283{
5284 struct ring_info *rxp;
5285 int i;
5286
5287 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5288 rxp = &tp->rx_std_buffers[i];
5289
5290 if (rxp->skb == NULL)
5291 continue;
5292 pci_unmap_single(tp->pdev,
5293 pci_unmap_addr(rxp, mapping),
Michael Chan7e72aad2005-07-25 12:31:17 -07005294 tp->rx_pkt_buf_sz - tp->rx_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005295 PCI_DMA_FROMDEVICE);
5296 dev_kfree_skb_any(rxp->skb);
5297 rxp->skb = NULL;
5298 }
5299
5300 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5301 rxp = &tp->rx_jumbo_buffers[i];
5302
5303 if (rxp->skb == NULL)
5304 continue;
5305 pci_unmap_single(tp->pdev,
5306 pci_unmap_addr(rxp, mapping),
5307 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5308 PCI_DMA_FROMDEVICE);
5309 dev_kfree_skb_any(rxp->skb);
5310 rxp->skb = NULL;
5311 }
5312
5313 for (i = 0; i < TG3_TX_RING_SIZE; ) {
5314 struct tx_ring_info *txp;
5315 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005316
5317 txp = &tp->tx_buffers[i];
5318 skb = txp->skb;
5319
5320 if (skb == NULL) {
5321 i++;
5322 continue;
5323 }
5324
David S. Miller90079ce2008-09-11 04:52:51 -07005325 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5326
Linus Torvalds1da177e2005-04-16 15:20:36 -07005327 txp->skb = NULL;
5328
David S. Miller90079ce2008-09-11 04:52:51 -07005329 i += skb_shinfo(skb)->nr_frags + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005330
5331 dev_kfree_skb_any(skb);
5332 }
5333}
5334
5335/* Initialize tx/rx rings for packet processing.
5336 *
5337 * The chip has been shut down and the driver detached from
5338 * the networking, so no interrupts or new tx packets will
5339 * end up in the driver. tp->{tx,}lock are held and thus
5340 * we may not sleep.
5341 */
Michael Chan32d8c572006-07-25 16:38:29 -07005342static int tg3_init_rings(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005343{
5344 u32 i;
5345
5346 /* Free up all the SKBs. */
5347 tg3_free_rings(tp);
5348
5349 /* Zero out all descriptors. */
5350 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5351 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5352 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5353 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5354
Michael Chan7e72aad2005-07-25 12:31:17 -07005355 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
Michael Chana4e2b342005-10-26 15:46:52 -07005356 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
Michael Chan7e72aad2005-07-25 12:31:17 -07005357 (tp->dev->mtu > ETH_DATA_LEN))
5358 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5359
Linus Torvalds1da177e2005-04-16 15:20:36 -07005360 /* Initialize invariants of the rings, we only set this
5361 * stuff once. This works because the card does not
5362 * write into the rx buffer posting rings.
5363 */
5364 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5365 struct tg3_rx_buffer_desc *rxd;
5366
5367 rxd = &tp->rx_std[i];
Michael Chan7e72aad2005-07-25 12:31:17 -07005368 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005369 << RXD_LEN_SHIFT;
5370 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5371 rxd->opaque = (RXD_OPAQUE_RING_STD |
5372 (i << RXD_OPAQUE_INDEX_SHIFT));
5373 }
5374
Michael Chan0f893dc2005-07-25 12:30:38 -07005375 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005376 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5377 struct tg3_rx_buffer_desc *rxd;
5378
5379 rxd = &tp->rx_jumbo[i];
5380 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5381 << RXD_LEN_SHIFT;
5382 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5383 RXD_FLAG_JUMBO;
5384 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5385 (i << RXD_OPAQUE_INDEX_SHIFT));
5386 }
5387 }
5388
5389 /* Now allocate fresh SKBs for each rx ring. */
5390 for (i = 0; i < tp->rx_pending; i++) {
Michael Chan32d8c572006-07-25 16:38:29 -07005391 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5392 printk(KERN_WARNING PFX
5393 "%s: Using a smaller RX standard ring, "
5394 "only %d out of %d buffers were allocated "
5395 "successfully.\n",
5396 tp->dev->name, i, tp->rx_pending);
5397 if (i == 0)
5398 return -ENOMEM;
5399 tp->rx_pending = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005400 break;
Michael Chan32d8c572006-07-25 16:38:29 -07005401 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005402 }
5403
Michael Chan0f893dc2005-07-25 12:30:38 -07005404 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005405 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5406 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
Michael Chan32d8c572006-07-25 16:38:29 -07005407 -1, i) < 0) {
5408 printk(KERN_WARNING PFX
5409 "%s: Using a smaller RX jumbo ring, "
5410 "only %d out of %d buffers were "
5411 "allocated successfully.\n",
5412 tp->dev->name, i, tp->rx_jumbo_pending);
5413 if (i == 0) {
5414 tg3_free_rings(tp);
5415 return -ENOMEM;
5416 }
5417 tp->rx_jumbo_pending = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005418 break;
Michael Chan32d8c572006-07-25 16:38:29 -07005419 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005420 }
5421 }
Michael Chan32d8c572006-07-25 16:38:29 -07005422 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005423}
5424
5425/*
5426 * Must not be invoked with interrupt sources disabled and
5427 * the hardware shutdown down.
5428 */
5429static void tg3_free_consistent(struct tg3 *tp)
5430{
Jesper Juhlb4558ea2005-10-28 16:53:13 -04005431 kfree(tp->rx_std_buffers);
5432 tp->rx_std_buffers = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005433 if (tp->rx_std) {
5434 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5435 tp->rx_std, tp->rx_std_mapping);
5436 tp->rx_std = NULL;
5437 }
5438 if (tp->rx_jumbo) {
5439 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5440 tp->rx_jumbo, tp->rx_jumbo_mapping);
5441 tp->rx_jumbo = NULL;
5442 }
5443 if (tp->rx_rcb) {
5444 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5445 tp->rx_rcb, tp->rx_rcb_mapping);
5446 tp->rx_rcb = NULL;
5447 }
5448 if (tp->tx_ring) {
5449 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5450 tp->tx_ring, tp->tx_desc_mapping);
5451 tp->tx_ring = NULL;
5452 }
5453 if (tp->hw_status) {
5454 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5455 tp->hw_status, tp->status_mapping);
5456 tp->hw_status = NULL;
5457 }
5458 if (tp->hw_stats) {
5459 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5460 tp->hw_stats, tp->stats_mapping);
5461 tp->hw_stats = NULL;
5462 }
5463}
5464
5465/*
5466 * Must not be invoked with interrupt sources disabled and
5467 * the hardware shutdown down. Can sleep.
5468 */
5469static int tg3_alloc_consistent(struct tg3 *tp)
5470{
Yan Burmanbd2b3342006-12-14 15:25:00 -08005471 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005472 (TG3_RX_RING_SIZE +
5473 TG3_RX_JUMBO_RING_SIZE)) +
5474 (sizeof(struct tx_ring_info) *
5475 TG3_TX_RING_SIZE),
5476 GFP_KERNEL);
5477 if (!tp->rx_std_buffers)
5478 return -ENOMEM;
5479
Linus Torvalds1da177e2005-04-16 15:20:36 -07005480 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5481 tp->tx_buffers = (struct tx_ring_info *)
5482 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5483
5484 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5485 &tp->rx_std_mapping);
5486 if (!tp->rx_std)
5487 goto err_out;
5488
5489 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5490 &tp->rx_jumbo_mapping);
5491
5492 if (!tp->rx_jumbo)
5493 goto err_out;
5494
5495 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5496 &tp->rx_rcb_mapping);
5497 if (!tp->rx_rcb)
5498 goto err_out;
5499
5500 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5501 &tp->tx_desc_mapping);
5502 if (!tp->tx_ring)
5503 goto err_out;
5504
5505 tp->hw_status = pci_alloc_consistent(tp->pdev,
5506 TG3_HW_STATUS_SIZE,
5507 &tp->status_mapping);
5508 if (!tp->hw_status)
5509 goto err_out;
5510
5511 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5512 sizeof(struct tg3_hw_stats),
5513 &tp->stats_mapping);
5514 if (!tp->hw_stats)
5515 goto err_out;
5516
5517 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5518 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5519
5520 return 0;
5521
5522err_out:
5523 tg3_free_consistent(tp);
5524 return -ENOMEM;
5525}
5526
5527#define MAX_WAIT_CNT 1000
5528
5529/* To stop a block, clear the enable bit and poll till it
5530 * clears. tp->lock is held.
5531 */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005532static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005533{
5534 unsigned int i;
5535 u32 val;
5536
5537 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5538 switch (ofs) {
5539 case RCVLSC_MODE:
5540 case DMAC_MODE:
5541 case MBFREE_MODE:
5542 case BUFMGR_MODE:
5543 case MEMARB_MODE:
5544 /* We can't enable/disable these bits of the
5545 * 5705/5750, just say success.
5546 */
5547 return 0;
5548
5549 default:
5550 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005551 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005552 }
5553
5554 val = tr32(ofs);
5555 val &= ~enable_bit;
5556 tw32_f(ofs, val);
5557
5558 for (i = 0; i < MAX_WAIT_CNT; i++) {
5559 udelay(100);
5560 val = tr32(ofs);
5561 if ((val & enable_bit) == 0)
5562 break;
5563 }
5564
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005565 if (i == MAX_WAIT_CNT && !silent) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005566 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5567 "ofs=%lx enable_bit=%x\n",
5568 ofs, enable_bit);
5569 return -ENODEV;
5570 }
5571
5572 return 0;
5573}
5574
5575/* tp->lock is held. */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005576static int tg3_abort_hw(struct tg3 *tp, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005577{
5578 int i, err;
5579
5580 tg3_disable_ints(tp);
5581
5582 tp->rx_mode &= ~RX_MODE_ENABLE;
5583 tw32_f(MAC_RX_MODE, tp->rx_mode);
5584 udelay(10);
5585
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005586 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5587 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5588 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5589 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5590 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5591 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005592
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005593 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5594 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5595 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5596 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5597 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5598 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5599 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005600
5601 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5602 tw32_f(MAC_MODE, tp->mac_mode);
5603 udelay(40);
5604
5605 tp->tx_mode &= ~TX_MODE_ENABLE;
5606 tw32_f(MAC_TX_MODE, tp->tx_mode);
5607
5608 for (i = 0; i < MAX_WAIT_CNT; i++) {
5609 udelay(100);
5610 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5611 break;
5612 }
5613 if (i >= MAX_WAIT_CNT) {
5614 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5615 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5616 tp->dev->name, tr32(MAC_TX_MODE));
Michael Chane6de8ad2005-05-05 14:42:41 -07005617 err |= -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005618 }
5619
Michael Chane6de8ad2005-05-05 14:42:41 -07005620 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005621 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5622 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005623
5624 tw32(FTQ_RESET, 0xffffffff);
5625 tw32(FTQ_RESET, 0x00000000);
5626
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005627 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5628 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005629
5630 if (tp->hw_status)
5631 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5632 if (tp->hw_stats)
5633 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5634
Linus Torvalds1da177e2005-04-16 15:20:36 -07005635 return err;
5636}
5637
5638/* tp->lock is held. */
5639static int tg3_nvram_lock(struct tg3 *tp)
5640{
5641 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5642 int i;
5643
Michael Chanec41c7d2006-01-17 02:40:55 -08005644 if (tp->nvram_lock_cnt == 0) {
5645 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5646 for (i = 0; i < 8000; i++) {
5647 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5648 break;
5649 udelay(20);
5650 }
5651 if (i == 8000) {
5652 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5653 return -ENODEV;
5654 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005655 }
Michael Chanec41c7d2006-01-17 02:40:55 -08005656 tp->nvram_lock_cnt++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005657 }
5658 return 0;
5659}
5660
5661/* tp->lock is held. */
5662static void tg3_nvram_unlock(struct tg3 *tp)
5663{
Michael Chanec41c7d2006-01-17 02:40:55 -08005664 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5665 if (tp->nvram_lock_cnt > 0)
5666 tp->nvram_lock_cnt--;
5667 if (tp->nvram_lock_cnt == 0)
5668 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5669 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005670}
5671
5672/* tp->lock is held. */
Michael Chane6af3012005-04-21 17:12:05 -07005673static void tg3_enable_nvram_access(struct tg3 *tp)
5674{
5675 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5676 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5677 u32 nvaccess = tr32(NVRAM_ACCESS);
5678
5679 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5680 }
5681}
5682
5683/* tp->lock is held. */
5684static void tg3_disable_nvram_access(struct tg3 *tp)
5685{
5686 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5687 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5688 u32 nvaccess = tr32(NVRAM_ACCESS);
5689
5690 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5691 }
5692}
5693
Matt Carlson0d3031d2007-10-10 18:02:43 -07005694static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5695{
5696 int i;
5697 u32 apedata;
5698
5699 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5700 if (apedata != APE_SEG_SIG_MAGIC)
5701 return;
5702
5703 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
Matt Carlson731fd792008-08-15 14:07:51 -07005704 if (!(apedata & APE_FW_STATUS_READY))
Matt Carlson0d3031d2007-10-10 18:02:43 -07005705 return;
5706
5707 /* Wait for up to 1 millisecond for APE to service previous event. */
5708 for (i = 0; i < 10; i++) {
5709 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5710 return;
5711
5712 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5713
5714 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5715 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5716 event | APE_EVENT_STATUS_EVENT_PENDING);
5717
5718 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5719
5720 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5721 break;
5722
5723 udelay(100);
5724 }
5725
5726 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5727 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5728}
5729
5730static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5731{
5732 u32 event;
5733 u32 apedata;
5734
5735 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5736 return;
5737
5738 switch (kind) {
5739 case RESET_KIND_INIT:
5740 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5741 APE_HOST_SEG_SIG_MAGIC);
5742 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5743 APE_HOST_SEG_LEN_MAGIC);
5744 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5745 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5746 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5747 APE_HOST_DRIVER_ID_MAGIC);
5748 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5749 APE_HOST_BEHAV_NO_PHYLOCK);
5750
5751 event = APE_EVENT_STATUS_STATE_START;
5752 break;
5753 case RESET_KIND_SHUTDOWN:
Matt Carlsonb2aee152008-11-03 16:51:11 -08005754 /* With the interface we are currently using,
5755 * APE does not track driver state. Wiping
5756 * out the HOST SEGMENT SIGNATURE forces
5757 * the APE to assume OS absent status.
5758 */
5759 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
5760
Matt Carlson0d3031d2007-10-10 18:02:43 -07005761 event = APE_EVENT_STATUS_STATE_UNLOAD;
5762 break;
5763 case RESET_KIND_SUSPEND:
5764 event = APE_EVENT_STATUS_STATE_SUSPEND;
5765 break;
5766 default:
5767 return;
5768 }
5769
5770 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5771
5772 tg3_ape_send_event(tp, event);
5773}
5774
Michael Chane6af3012005-04-21 17:12:05 -07005775/* tp->lock is held. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005776static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5777{
David S. Millerf49639e2006-06-09 11:58:36 -07005778 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5779 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005780
5781 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5782 switch (kind) {
5783 case RESET_KIND_INIT:
5784 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5785 DRV_STATE_START);
5786 break;
5787
5788 case RESET_KIND_SHUTDOWN:
5789 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5790 DRV_STATE_UNLOAD);
5791 break;
5792
5793 case RESET_KIND_SUSPEND:
5794 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5795 DRV_STATE_SUSPEND);
5796 break;
5797
5798 default:
5799 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005800 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005801 }
Matt Carlson0d3031d2007-10-10 18:02:43 -07005802
5803 if (kind == RESET_KIND_INIT ||
5804 kind == RESET_KIND_SUSPEND)
5805 tg3_ape_driver_state_change(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005806}
5807
5808/* tp->lock is held. */
5809static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5810{
5811 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5812 switch (kind) {
5813 case RESET_KIND_INIT:
5814 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5815 DRV_STATE_START_DONE);
5816 break;
5817
5818 case RESET_KIND_SHUTDOWN:
5819 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5820 DRV_STATE_UNLOAD_DONE);
5821 break;
5822
5823 default:
5824 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005825 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005826 }
Matt Carlson0d3031d2007-10-10 18:02:43 -07005827
5828 if (kind == RESET_KIND_SHUTDOWN)
5829 tg3_ape_driver_state_change(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005830}
5831
5832/* tp->lock is held. */
5833static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5834{
5835 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5836 switch (kind) {
5837 case RESET_KIND_INIT:
5838 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5839 DRV_STATE_START);
5840 break;
5841
5842 case RESET_KIND_SHUTDOWN:
5843 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5844 DRV_STATE_UNLOAD);
5845 break;
5846
5847 case RESET_KIND_SUSPEND:
5848 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5849 DRV_STATE_SUSPEND);
5850 break;
5851
5852 default:
5853 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005854 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005855 }
5856}
5857
Michael Chan7a6f4362006-09-27 16:03:31 -07005858static int tg3_poll_fw(struct tg3 *tp)
5859{
5860 int i;
5861 u32 val;
5862
Michael Chanb5d37722006-09-27 16:06:21 -07005863 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Gary Zambrano0ccead12006-11-14 16:34:00 -08005864 /* Wait up to 20ms for init done. */
5865 for (i = 0; i < 200; i++) {
Michael Chanb5d37722006-09-27 16:06:21 -07005866 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5867 return 0;
Gary Zambrano0ccead12006-11-14 16:34:00 -08005868 udelay(100);
Michael Chanb5d37722006-09-27 16:06:21 -07005869 }
5870 return -ENODEV;
5871 }
5872
Michael Chan7a6f4362006-09-27 16:03:31 -07005873 /* Wait for firmware initialization to complete. */
5874 for (i = 0; i < 100000; i++) {
5875 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5876 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5877 break;
5878 udelay(10);
5879 }
5880
5881 /* Chip might not be fitted with firmware. Some Sun onboard
5882 * parts are configured like that. So don't signal the timeout
5883 * of the above loop as an error, but do report the lack of
5884 * running firmware once.
5885 */
5886 if (i >= 100000 &&
5887 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5888 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5889
5890 printk(KERN_INFO PFX "%s: No firmware running.\n",
5891 tp->dev->name);
5892 }
5893
5894 return 0;
5895}
5896
Michael Chanee6a99b2007-07-18 21:49:10 -07005897/* Save PCI command register before chip reset */
5898static void tg3_save_pci_state(struct tg3 *tp)
5899{
Matt Carlson8a6eac92007-10-21 16:17:55 -07005900 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07005901}
5902
5903/* Restore PCI state after chip reset */
5904static void tg3_restore_pci_state(struct tg3 *tp)
5905{
5906 u32 val;
5907
5908 /* Re-enable indirect register accesses. */
5909 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5910 tp->misc_host_ctrl);
5911
5912 /* Set MAX PCI retry to zero. */
5913 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5914 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5915 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5916 val |= PCISTATE_RETRY_SAME_DMA;
Matt Carlson0d3031d2007-10-10 18:02:43 -07005917 /* Allow reads and writes to the APE register and memory space. */
5918 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5919 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5920 PCISTATE_ALLOW_APE_SHMEM_WR;
Michael Chanee6a99b2007-07-18 21:49:10 -07005921 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5922
Matt Carlson8a6eac92007-10-21 16:17:55 -07005923 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07005924
Matt Carlsonfcb389d2008-11-03 16:55:44 -08005925 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
5926 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5927 pcie_set_readrq(tp->pdev, 4096);
5928 else {
5929 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5930 tp->pci_cacheline_sz);
5931 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5932 tp->pci_lat_timer);
5933 }
Michael Chan114342f2007-10-15 02:12:26 -07005934 }
Matt Carlson5f5c51e2007-11-12 21:19:37 -08005935
Michael Chanee6a99b2007-07-18 21:49:10 -07005936 /* Make sure PCI-X relaxed ordering bit is clear. */
Matt Carlson52f44902008-11-21 17:17:04 -08005937 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
Matt Carlson9974a352007-10-07 23:27:28 -07005938 u16 pcix_cmd;
5939
5940 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5941 &pcix_cmd);
5942 pcix_cmd &= ~PCI_X_CMD_ERO;
5943 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5944 pcix_cmd);
5945 }
Michael Chanee6a99b2007-07-18 21:49:10 -07005946
5947 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chanee6a99b2007-07-18 21:49:10 -07005948
5949 /* Chip reset on 5780 will reset MSI enable bit,
5950 * so need to restore it.
5951 */
5952 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5953 u16 ctrl;
5954
5955 pci_read_config_word(tp->pdev,
5956 tp->msi_cap + PCI_MSI_FLAGS,
5957 &ctrl);
5958 pci_write_config_word(tp->pdev,
5959 tp->msi_cap + PCI_MSI_FLAGS,
5960 ctrl | PCI_MSI_FLAGS_ENABLE);
5961 val = tr32(MSGINT_MODE);
5962 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5963 }
5964 }
5965}
5966
Linus Torvalds1da177e2005-04-16 15:20:36 -07005967static void tg3_stop_fw(struct tg3 *);
5968
5969/* tp->lock is held. */
5970static int tg3_chip_reset(struct tg3 *tp)
5971{
5972 u32 val;
Michael Chan1ee582d2005-08-09 20:16:46 -07005973 void (*write_op)(struct tg3 *, u32, u32);
Michael Chan7a6f4362006-09-27 16:03:31 -07005974 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005975
David S. Millerf49639e2006-06-09 11:58:36 -07005976 tg3_nvram_lock(tp);
5977
Matt Carlson158d7ab2008-05-29 01:37:54 -07005978 tg3_mdio_stop(tp);
5979
Matt Carlson77b483f2008-08-15 14:07:24 -07005980 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5981
David S. Millerf49639e2006-06-09 11:58:36 -07005982 /* No matching tg3_nvram_unlock() after this because
5983 * chip reset below will undo the nvram lock.
5984 */
5985 tp->nvram_lock_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005986
Michael Chanee6a99b2007-07-18 21:49:10 -07005987 /* GRC_MISC_CFG core clock reset will clear the memory
5988 * enable bit in PCI register 4 and the MSI enable bit
5989 * on some chips, so we save relevant registers here.
5990 */
5991 tg3_save_pci_state(tp);
5992
Michael Chand9ab5ad2006-03-20 22:27:35 -08005993 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -08005994 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07005995 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07005996 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -07005997 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5998 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chand9ab5ad2006-03-20 22:27:35 -08005999 tw32(GRC_FASTBOOT_PC, 0);
6000
Linus Torvalds1da177e2005-04-16 15:20:36 -07006001 /*
6002 * We must avoid the readl() that normally takes place.
6003 * It locks machines, causes machine checks, and other
6004 * fun things. So, temporarily disable the 5701
6005 * hardware workaround, while we do the reset.
6006 */
Michael Chan1ee582d2005-08-09 20:16:46 -07006007 write_op = tp->write32;
6008 if (write_op == tg3_write_flush_reg32)
6009 tp->write32 = tg3_write32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006010
Michael Chand18edcb2007-03-24 20:57:11 -07006011 /* Prevent the irq handler from reading or writing PCI registers
6012 * during chip reset when the memory enable bit in the PCI command
6013 * register may be cleared. The chip does not generate interrupt
6014 * at this time, but the irq handler may still be called due to irq
6015 * sharing or irqpoll.
6016 */
6017 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
Michael Chanb8fa2f32007-04-06 17:35:37 -07006018 if (tp->hw_status) {
6019 tp->hw_status->status = 0;
6020 tp->hw_status->status_tag = 0;
6021 }
Michael Chand18edcb2007-03-24 20:57:11 -07006022 tp->last_tag = 0;
6023 smp_mb();
6024 synchronize_irq(tp->pdev->irq);
6025
Linus Torvalds1da177e2005-04-16 15:20:36 -07006026 /* do the reset */
6027 val = GRC_MISC_CFG_CORECLK_RESET;
6028
6029 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6030 if (tr32(0x7e2c) == 0x60) {
6031 tw32(0x7e2c, 0x20);
6032 }
6033 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6034 tw32(GRC_MISC_CFG, (1 << 29));
6035 val |= (1 << 29);
6036 }
6037 }
6038
Michael Chanb5d37722006-09-27 16:06:21 -07006039 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6040 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
6041 tw32(GRC_VCPU_EXT_CTRL,
6042 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6043 }
6044
Linus Torvalds1da177e2005-04-16 15:20:36 -07006045 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6046 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6047 tw32(GRC_MISC_CFG, val);
6048
Michael Chan1ee582d2005-08-09 20:16:46 -07006049 /* restore 5701 hardware bug workaround write method */
6050 tp->write32 = write_op;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006051
6052 /* Unfortunately, we have to delay before the PCI read back.
6053 * Some 575X chips even will not respond to a PCI cfg access
6054 * when the reset command is given to the chip.
6055 *
6056 * How do these hardware designers expect things to work
6057 * properly if the PCI write is posted for a long period
6058 * of time? It is always necessary to have some method by
6059 * which a register read back can occur to push the write
6060 * out which does the reset.
6061 *
6062 * For most tg3 variants the trick below was working.
6063 * Ho hum...
6064 */
6065 udelay(120);
6066
6067 /* Flush PCI posted writes. The normal MMIO registers
6068 * are inaccessible at this time so this is the only
6069 * way to make this reliably (actually, this is no longer
6070 * the case, see above). I tried to use indirect
6071 * register read/write but this upset some 5701 variants.
6072 */
6073 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6074
6075 udelay(120);
6076
Matt Carlson5e7dfd02008-11-21 17:18:16 -08006077 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006078 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6079 int i;
6080 u32 cfg_val;
6081
6082 /* Wait for link training to complete. */
6083 for (i = 0; i < 5000; i++)
6084 udelay(100);
6085
6086 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6087 pci_write_config_dword(tp->pdev, 0xc4,
6088 cfg_val | (1 << 15));
6089 }
Matt Carlson5e7dfd02008-11-21 17:18:16 -08006090
6091 /* Set PCIE max payload size to 128 bytes and
6092 * clear the "no snoop" and "relaxed ordering" bits.
6093 */
6094 pci_write_config_word(tp->pdev,
6095 tp->pcie_cap + PCI_EXP_DEVCTL,
6096 0);
6097
6098 pcie_set_readrq(tp->pdev, 4096);
6099
6100 /* Clear error status */
6101 pci_write_config_word(tp->pdev,
6102 tp->pcie_cap + PCI_EXP_DEVSTA,
6103 PCI_EXP_DEVSTA_CED |
6104 PCI_EXP_DEVSTA_NFED |
6105 PCI_EXP_DEVSTA_FED |
6106 PCI_EXP_DEVSTA_URD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006107 }
6108
Michael Chanee6a99b2007-07-18 21:49:10 -07006109 tg3_restore_pci_state(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006110
Michael Chand18edcb2007-03-24 20:57:11 -07006111 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6112
Michael Chanee6a99b2007-07-18 21:49:10 -07006113 val = 0;
6114 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan4cf78e42005-07-25 12:29:19 -07006115 val = tr32(MEMARB_MODE);
Michael Chanee6a99b2007-07-18 21:49:10 -07006116 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006117
6118 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6119 tg3_stop_fw(tp);
6120 tw32(0x5000, 0x400);
6121 }
6122
6123 tw32(GRC_MODE, tp->grc_mode);
6124
6125 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01006126 val = tr32(0xc4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006127
6128 tw32(0xc4, val | (1 << 15));
6129 }
6130
6131 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6132 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6133 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6134 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6135 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6136 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6137 }
6138
6139 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6140 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6141 tw32_f(MAC_MODE, tp->mac_mode);
Michael Chan747e8f82005-07-25 12:33:22 -07006142 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6143 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6144 tw32_f(MAC_MODE, tp->mac_mode);
Matt Carlson3bda1252008-08-15 14:08:22 -07006145 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6146 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6147 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6148 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6149 tw32_f(MAC_MODE, tp->mac_mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006150 } else
6151 tw32_f(MAC_MODE, 0);
6152 udelay(40);
6153
Matt Carlson158d7ab2008-05-29 01:37:54 -07006154 tg3_mdio_start(tp);
6155
Matt Carlson77b483f2008-08-15 14:07:24 -07006156 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6157
Michael Chan7a6f4362006-09-27 16:03:31 -07006158 err = tg3_poll_fw(tp);
6159 if (err)
6160 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006161
6162 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6163 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01006164 val = tr32(0x7c00);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006165
6166 tw32(0x7c00, val | (1 << 25));
6167 }
6168
6169 /* Reprobe ASF enable state. */
6170 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6171 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6172 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6173 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6174 u32 nic_cfg;
6175
6176 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6177 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6178 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
Matt Carlson4ba526c2008-08-15 14:10:04 -07006179 tp->last_event_jiffies = jiffies;
John W. Linvillecbf46852005-04-21 17:01:29 -07006180 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006181 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6182 }
6183 }
6184
6185 return 0;
6186}
6187
6188/* tp->lock is held. */
6189static void tg3_stop_fw(struct tg3 *tp)
6190{
Matt Carlson0d3031d2007-10-10 18:02:43 -07006191 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6192 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Matt Carlson7c5026a2008-05-02 16:49:29 -07006193 /* Wait for RX cpu to ACK the previous event. */
6194 tg3_wait_for_event_ack(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006195
6196 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
Matt Carlson4ba526c2008-08-15 14:10:04 -07006197
6198 tg3_generate_fw_event(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006199
Matt Carlson7c5026a2008-05-02 16:49:29 -07006200 /* Wait for RX cpu to ACK this event. */
6201 tg3_wait_for_event_ack(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006202 }
6203}
6204
6205/* tp->lock is held. */
Michael Chan944d9802005-05-29 14:57:48 -07006206static int tg3_halt(struct tg3 *tp, int kind, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006207{
6208 int err;
6209
6210 tg3_stop_fw(tp);
6211
Michael Chan944d9802005-05-29 14:57:48 -07006212 tg3_write_sig_pre_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006213
David S. Millerb3b7d6b2005-05-05 14:40:20 -07006214 tg3_abort_hw(tp, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006215 err = tg3_chip_reset(tp);
6216
Michael Chan944d9802005-05-29 14:57:48 -07006217 tg3_write_sig_legacy(tp, kind);
6218 tg3_write_sig_post_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006219
6220 if (err)
6221 return err;
6222
6223 return 0;
6224}
6225
6226#define TG3_FW_RELEASE_MAJOR 0x0
6227#define TG3_FW_RELASE_MINOR 0x0
6228#define TG3_FW_RELEASE_FIX 0x0
6229#define TG3_FW_START_ADDR 0x08000000
6230#define TG3_FW_TEXT_ADDR 0x08000000
6231#define TG3_FW_TEXT_LEN 0x9c0
6232#define TG3_FW_RODATA_ADDR 0x080009c0
6233#define TG3_FW_RODATA_LEN 0x60
6234#define TG3_FW_DATA_ADDR 0x08000a40
6235#define TG3_FW_DATA_LEN 0x20
6236#define TG3_FW_SBSS_ADDR 0x08000a60
6237#define TG3_FW_SBSS_LEN 0xc
6238#define TG3_FW_BSS_ADDR 0x08000a70
6239#define TG3_FW_BSS_LEN 0x10
6240
Andreas Mohr50da8592006-08-14 23:54:30 -07006241static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006242 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6243 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6244 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6245 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6246 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6247 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6248 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6249 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6250 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6251 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6252 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6253 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6254 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6255 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6256 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6257 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6258 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6259 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6260 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6261 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6262 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6263 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6264 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6265 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6266 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6267 0, 0, 0, 0, 0, 0,
6268 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6269 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6270 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6271 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6272 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6273 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6274 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6275 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6276 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6277 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6278 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6279 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6280 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6281 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6282 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6283 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6284 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6285 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6286 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6287 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6288 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6289 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6290 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6291 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6292 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6293 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6294 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6295 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6296 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6297 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6298 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6299 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6300 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6301 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6302 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6303 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6304 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6305 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6306 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6307 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6308 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6309 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6310 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6311 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6312 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6313 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6314 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6315 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6316 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6317 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6318 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6319 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6320 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6321 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6322 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6323 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6324 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6325 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6326 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6327 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6328 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6329 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6330 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6331 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6332 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6333};
6334
Andreas Mohr50da8592006-08-14 23:54:30 -07006335static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006336 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6337 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6338 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6339 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6340 0x00000000
6341};
6342
6343#if 0 /* All zeros, don't eat up space with it. */
6344u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6345 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6346 0x00000000, 0x00000000, 0x00000000, 0x00000000
6347};
6348#endif
6349
6350#define RX_CPU_SCRATCH_BASE 0x30000
6351#define RX_CPU_SCRATCH_SIZE 0x04000
6352#define TX_CPU_SCRATCH_BASE 0x34000
6353#define TX_CPU_SCRATCH_SIZE 0x04000
6354
6355/* tp->lock is held. */
6356static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6357{
6358 int i;
6359
Eric Sesterhenn5d9428d2006-04-02 13:52:48 +02006360 BUG_ON(offset == TX_CPU_BASE &&
6361 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006362
Michael Chanb5d37722006-09-27 16:06:21 -07006363 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6364 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6365
6366 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6367 return 0;
6368 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006369 if (offset == RX_CPU_BASE) {
6370 for (i = 0; i < 10000; i++) {
6371 tw32(offset + CPU_STATE, 0xffffffff);
6372 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6373 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6374 break;
6375 }
6376
6377 tw32(offset + CPU_STATE, 0xffffffff);
6378 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
6379 udelay(10);
6380 } else {
6381 for (i = 0; i < 10000; i++) {
6382 tw32(offset + CPU_STATE, 0xffffffff);
6383 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6384 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6385 break;
6386 }
6387 }
6388
6389 if (i >= 10000) {
6390 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6391 "and %s CPU\n",
6392 tp->dev->name,
6393 (offset == RX_CPU_BASE ? "RX" : "TX"));
6394 return -ENODEV;
6395 }
Michael Chanec41c7d2006-01-17 02:40:55 -08006396
6397 /* Clear firmware's nvram arbitration. */
6398 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6399 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006400 return 0;
6401}
6402
6403struct fw_info {
6404 unsigned int text_base;
6405 unsigned int text_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006406 const u32 *text_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006407 unsigned int rodata_base;
6408 unsigned int rodata_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006409 const u32 *rodata_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006410 unsigned int data_base;
6411 unsigned int data_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006412 const u32 *data_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006413};
6414
6415/* tp->lock is held. */
6416static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6417 int cpu_scratch_size, struct fw_info *info)
6418{
Michael Chanec41c7d2006-01-17 02:40:55 -08006419 int err, lock_err, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006420 void (*write_op)(struct tg3 *, u32, u32);
6421
6422 if (cpu_base == TX_CPU_BASE &&
6423 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6424 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6425 "TX cpu firmware on %s which is 5705.\n",
6426 tp->dev->name);
6427 return -EINVAL;
6428 }
6429
6430 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6431 write_op = tg3_write_mem;
6432 else
6433 write_op = tg3_write_indirect_reg32;
6434
Michael Chan1b628152005-05-29 14:59:49 -07006435 /* It is possible that bootcode is still loading at this point.
6436 * Get the nvram lock first before halting the cpu.
6437 */
Michael Chanec41c7d2006-01-17 02:40:55 -08006438 lock_err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006439 err = tg3_halt_cpu(tp, cpu_base);
Michael Chanec41c7d2006-01-17 02:40:55 -08006440 if (!lock_err)
6441 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006442 if (err)
6443 goto out;
6444
6445 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6446 write_op(tp, cpu_scratch_base + i, 0);
6447 tw32(cpu_base + CPU_STATE, 0xffffffff);
6448 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6449 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6450 write_op(tp, (cpu_scratch_base +
6451 (info->text_base & 0xffff) +
6452 (i * sizeof(u32))),
6453 (info->text_data ?
6454 info->text_data[i] : 0));
6455 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6456 write_op(tp, (cpu_scratch_base +
6457 (info->rodata_base & 0xffff) +
6458 (i * sizeof(u32))),
6459 (info->rodata_data ?
6460 info->rodata_data[i] : 0));
6461 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6462 write_op(tp, (cpu_scratch_base +
6463 (info->data_base & 0xffff) +
6464 (i * sizeof(u32))),
6465 (info->data_data ?
6466 info->data_data[i] : 0));
6467
6468 err = 0;
6469
6470out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006471 return err;
6472}
6473
6474/* tp->lock is held. */
6475static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6476{
6477 struct fw_info info;
6478 int err, i;
6479
6480 info.text_base = TG3_FW_TEXT_ADDR;
6481 info.text_len = TG3_FW_TEXT_LEN;
6482 info.text_data = &tg3FwText[0];
6483 info.rodata_base = TG3_FW_RODATA_ADDR;
6484 info.rodata_len = TG3_FW_RODATA_LEN;
6485 info.rodata_data = &tg3FwRodata[0];
6486 info.data_base = TG3_FW_DATA_ADDR;
6487 info.data_len = TG3_FW_DATA_LEN;
6488 info.data_data = NULL;
6489
6490 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6491 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6492 &info);
6493 if (err)
6494 return err;
6495
6496 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6497 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6498 &info);
6499 if (err)
6500 return err;
6501
6502 /* Now startup only the RX cpu. */
6503 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6504 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6505
6506 for (i = 0; i < 5; i++) {
6507 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6508 break;
6509 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6510 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
6511 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6512 udelay(1000);
6513 }
6514 if (i >= 5) {
6515 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6516 "to set RX CPU PC, is %08x should be %08x\n",
6517 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6518 TG3_FW_TEXT_ADDR);
6519 return -ENODEV;
6520 }
6521 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6522 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
6523
6524 return 0;
6525}
6526
Linus Torvalds1da177e2005-04-16 15:20:36 -07006527
6528#define TG3_TSO_FW_RELEASE_MAJOR 0x1
6529#define TG3_TSO_FW_RELASE_MINOR 0x6
6530#define TG3_TSO_FW_RELEASE_FIX 0x0
6531#define TG3_TSO_FW_START_ADDR 0x08000000
6532#define TG3_TSO_FW_TEXT_ADDR 0x08000000
6533#define TG3_TSO_FW_TEXT_LEN 0x1aa0
6534#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
6535#define TG3_TSO_FW_RODATA_LEN 0x60
6536#define TG3_TSO_FW_DATA_ADDR 0x08001b20
6537#define TG3_TSO_FW_DATA_LEN 0x30
6538#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
6539#define TG3_TSO_FW_SBSS_LEN 0x2c
6540#define TG3_TSO_FW_BSS_ADDR 0x08001b80
6541#define TG3_TSO_FW_BSS_LEN 0x894
6542
Andreas Mohr50da8592006-08-14 23:54:30 -07006543static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006544 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6545 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6546 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6547 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6548 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6549 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6550 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6551 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6552 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6553 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6554 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6555 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6556 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6557 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6558 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6559 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6560 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6561 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6562 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6563 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6564 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6565 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6566 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6567 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6568 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6569 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6570 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6571 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6572 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6573 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6574 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6575 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6576 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6577 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6578 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6579 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6580 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6581 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6582 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6583 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6584 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6585 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6586 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6587 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6588 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6589 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6590 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6591 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6592 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6593 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6594 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6595 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6596 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6597 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6598 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6599 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6600 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6601 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6602 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6603 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6604 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6605 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6606 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6607 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6608 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6609 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6610 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6611 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6612 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6613 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6614 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6615 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6616 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6617 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6618 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6619 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6620 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6621 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6622 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6623 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6624 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6625 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6626 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6627 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6628 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6629 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6630 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6631 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6632 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6633 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6634 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6635 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6636 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6637 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6638 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6639 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6640 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6641 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6642 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6643 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6644 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6645 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6646 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6647 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6648 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6649 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6650 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6651 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6652 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6653 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6654 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6655 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6656 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6657 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6658 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6659 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6660 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6661 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6662 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6663 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6664 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6665 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6666 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6667 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6668 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6669 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6670 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6671 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6672 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6673 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6674 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6675 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6676 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6677 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6678 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6679 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6680 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6681 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6682 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6683 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6684 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6685 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6686 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6687 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6688 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6689 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6690 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6691 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6692 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6693 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6694 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6695 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6696 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6697 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6698 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6699 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6700 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6701 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6702 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6703 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6704 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6705 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6706 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6707 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6708 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6709 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6710 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6711 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6712 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6713 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6714 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6715 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6716 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6717 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6718 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6719 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6720 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6721 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6722 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6723 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6724 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6725 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6726 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6727 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6728 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6729 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6730 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6731 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6732 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6733 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6734 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6735 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6736 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6737 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6738 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6739 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6740 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6741 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6742 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6743 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6744 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6745 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6746 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6747 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6748 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6749 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6750 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6751 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6752 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6753 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6754 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6755 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6756 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6757 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6758 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6759 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6760 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6761 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6762 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6763 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6764 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6765 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6766 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6767 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6768 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6769 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6770 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6771 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6772 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6773 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6774 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6775 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6776 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6777 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6778 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6779 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6780 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6781 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6782 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6783 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6784 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6785 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6786 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6787 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6788 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6789 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6790 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6791 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6792 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6793 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6794 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6795 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6796 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6797 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6798 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6799 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6800 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6801 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6802 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6803 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6804 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6805 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6806 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6807 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6808 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6809 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6810 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6811 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6812 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6813 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6814 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6815 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6816 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6817 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6818 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6819 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6820 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6821 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6822 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6823 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6824 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6825 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6826 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6827 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6828};
6829
Andreas Mohr50da8592006-08-14 23:54:30 -07006830static const u32 tg3TsoFwRodata[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006831 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6832 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6833 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6834 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6835 0x00000000,
6836};
6837
Andreas Mohr50da8592006-08-14 23:54:30 -07006838static const u32 tg3TsoFwData[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006839 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6840 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6841 0x00000000,
6842};
6843
6844/* 5705 needs a special version of the TSO firmware. */
6845#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
6846#define TG3_TSO5_FW_RELASE_MINOR 0x2
6847#define TG3_TSO5_FW_RELEASE_FIX 0x0
6848#define TG3_TSO5_FW_START_ADDR 0x00010000
6849#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
6850#define TG3_TSO5_FW_TEXT_LEN 0xe90
6851#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
6852#define TG3_TSO5_FW_RODATA_LEN 0x50
6853#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
6854#define TG3_TSO5_FW_DATA_LEN 0x20
6855#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
6856#define TG3_TSO5_FW_SBSS_LEN 0x28
6857#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
6858#define TG3_TSO5_FW_BSS_LEN 0x88
6859
Andreas Mohr50da8592006-08-14 23:54:30 -07006860static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006861 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6862 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6863 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6864 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6865 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6866 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6867 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6868 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6869 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6870 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6871 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6872 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6873 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6874 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6875 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6876 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6877 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6878 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6879 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6880 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6881 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6882 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6883 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6884 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6885 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6886 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6887 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6888 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6889 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6890 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6891 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6892 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6893 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6894 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6895 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6896 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6897 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6898 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6899 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6900 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6901 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6902 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6903 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6904 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6905 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6906 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6907 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6908 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6909 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6910 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6911 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6912 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6913 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6914 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6915 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6916 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6917 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6918 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6919 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6920 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6921 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6922 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6923 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6924 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6925 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6926 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6927 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6928 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6929 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6930 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6931 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6932 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6933 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6934 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6935 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6936 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6937 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6938 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6939 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6940 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6941 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6942 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6943 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6944 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6945 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6946 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6947 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6948 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6949 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6950 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6951 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6952 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6953 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6954 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6955 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6956 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6957 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6958 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6959 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6960 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6961 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6962 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6963 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6964 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6965 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6966 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6967 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6968 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6969 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6970 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6971 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6972 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6973 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6974 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6975 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6976 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6977 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6978 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6979 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6980 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6981 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6982 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6983 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6984 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6985 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6986 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6987 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6988 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6989 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6990 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6991 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6992 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6993 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6994 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6995 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6996 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6997 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6998 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6999 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
7000 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
7001 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
7002 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
7003 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
7004 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
7005 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
7006 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
7007 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
7008 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
7009 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
7010 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
7011 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
7012 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
7013 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
7014 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
7015 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
7016 0x00000000, 0x00000000, 0x00000000,
7017};
7018
Andreas Mohr50da8592006-08-14 23:54:30 -07007019static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007020 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
7021 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
7022 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
7023 0x00000000, 0x00000000, 0x00000000,
7024};
7025
Andreas Mohr50da8592006-08-14 23:54:30 -07007026static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007027 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
7028 0x00000000, 0x00000000, 0x00000000,
7029};
7030
7031/* tp->lock is held. */
7032static int tg3_load_tso_firmware(struct tg3 *tp)
7033{
7034 struct fw_info info;
7035 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7036 int err, i;
7037
7038 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7039 return 0;
7040
7041 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7042 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
7043 info.text_len = TG3_TSO5_FW_TEXT_LEN;
7044 info.text_data = &tg3Tso5FwText[0];
7045 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
7046 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
7047 info.rodata_data = &tg3Tso5FwRodata[0];
7048 info.data_base = TG3_TSO5_FW_DATA_ADDR;
7049 info.data_len = TG3_TSO5_FW_DATA_LEN;
7050 info.data_data = &tg3Tso5FwData[0];
7051 cpu_base = RX_CPU_BASE;
7052 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7053 cpu_scratch_size = (info.text_len +
7054 info.rodata_len +
7055 info.data_len +
7056 TG3_TSO5_FW_SBSS_LEN +
7057 TG3_TSO5_FW_BSS_LEN);
7058 } else {
7059 info.text_base = TG3_TSO_FW_TEXT_ADDR;
7060 info.text_len = TG3_TSO_FW_TEXT_LEN;
7061 info.text_data = &tg3TsoFwText[0];
7062 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
7063 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
7064 info.rodata_data = &tg3TsoFwRodata[0];
7065 info.data_base = TG3_TSO_FW_DATA_ADDR;
7066 info.data_len = TG3_TSO_FW_DATA_LEN;
7067 info.data_data = &tg3TsoFwData[0];
7068 cpu_base = TX_CPU_BASE;
7069 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7070 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7071 }
7072
7073 err = tg3_load_firmware_cpu(tp, cpu_base,
7074 cpu_scratch_base, cpu_scratch_size,
7075 &info);
7076 if (err)
7077 return err;
7078
7079 /* Now startup the cpu. */
7080 tw32(cpu_base + CPU_STATE, 0xffffffff);
7081 tw32_f(cpu_base + CPU_PC, info.text_base);
7082
7083 for (i = 0; i < 5; i++) {
7084 if (tr32(cpu_base + CPU_PC) == info.text_base)
7085 break;
7086 tw32(cpu_base + CPU_STATE, 0xffffffff);
7087 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7088 tw32_f(cpu_base + CPU_PC, info.text_base);
7089 udelay(1000);
7090 }
7091 if (i >= 5) {
7092 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
7093 "to set CPU PC, is %08x should be %08x\n",
7094 tp->dev->name, tr32(cpu_base + CPU_PC),
7095 info.text_base);
7096 return -ENODEV;
7097 }
7098 tw32(cpu_base + CPU_STATE, 0xffffffff);
7099 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7100 return 0;
7101}
7102
Linus Torvalds1da177e2005-04-16 15:20:36 -07007103
Linus Torvalds1da177e2005-04-16 15:20:36 -07007104static int tg3_set_mac_addr(struct net_device *dev, void *p)
7105{
7106 struct tg3 *tp = netdev_priv(dev);
7107 struct sockaddr *addr = p;
Michael Chan986e0ae2007-05-05 12:10:20 -07007108 int err = 0, skip_mac_1 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007109
Michael Chanf9804dd2005-09-27 12:13:10 -07007110 if (!is_valid_ether_addr(addr->sa_data))
7111 return -EINVAL;
7112
Linus Torvalds1da177e2005-04-16 15:20:36 -07007113 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7114
Michael Chane75f7c92006-03-20 21:33:26 -08007115 if (!netif_running(dev))
7116 return 0;
7117
Michael Chan58712ef2006-04-29 18:58:01 -07007118 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
Michael Chan986e0ae2007-05-05 12:10:20 -07007119 u32 addr0_high, addr0_low, addr1_high, addr1_low;
Michael Chan58712ef2006-04-29 18:58:01 -07007120
Michael Chan986e0ae2007-05-05 12:10:20 -07007121 addr0_high = tr32(MAC_ADDR_0_HIGH);
7122 addr0_low = tr32(MAC_ADDR_0_LOW);
7123 addr1_high = tr32(MAC_ADDR_1_HIGH);
7124 addr1_low = tr32(MAC_ADDR_1_LOW);
7125
7126 /* Skip MAC addr 1 if ASF is using it. */
7127 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7128 !(addr1_high == 0 && addr1_low == 0))
7129 skip_mac_1 = 1;
Michael Chan58712ef2006-04-29 18:58:01 -07007130 }
Michael Chan986e0ae2007-05-05 12:10:20 -07007131 spin_lock_bh(&tp->lock);
7132 __tg3_set_mac_addr(tp, skip_mac_1);
7133 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007134
Michael Chanb9ec6c12006-07-25 16:37:27 -07007135 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007136}
7137
7138/* tp->lock is held. */
7139static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7140 dma_addr_t mapping, u32 maxlen_flags,
7141 u32 nic_addr)
7142{
7143 tg3_write_mem(tp,
7144 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7145 ((u64) mapping >> 32));
7146 tg3_write_mem(tp,
7147 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7148 ((u64) mapping & 0xffffffff));
7149 tg3_write_mem(tp,
7150 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7151 maxlen_flags);
7152
7153 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7154 tg3_write_mem(tp,
7155 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7156 nic_addr);
7157}
7158
7159static void __tg3_set_rx_mode(struct net_device *);
Michael Chand244c892005-07-05 14:42:33 -07007160static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
David S. Miller15f98502005-05-18 22:49:26 -07007161{
7162 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7163 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7164 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7165 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7166 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7167 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7168 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7169 }
7170 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7171 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7172 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7173 u32 val = ec->stats_block_coalesce_usecs;
7174
7175 if (!netif_carrier_ok(tp->dev))
7176 val = 0;
7177
7178 tw32(HOSTCC_STAT_COAL_TICKS, val);
7179 }
7180}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007181
7182/* tp->lock is held. */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007183static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007184{
7185 u32 val, rdmac_mode;
7186 int i, err, limit;
7187
7188 tg3_disable_ints(tp);
7189
7190 tg3_stop_fw(tp);
7191
7192 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7193
7194 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
Michael Chane6de8ad2005-05-05 14:42:41 -07007195 tg3_abort_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007196 }
7197
Matt Carlsondd477002008-05-25 23:45:58 -07007198 if (reset_phy &&
7199 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
Michael Chand4d2c552006-03-20 17:47:20 -08007200 tg3_phy_reset(tp);
7201
Linus Torvalds1da177e2005-04-16 15:20:36 -07007202 err = tg3_chip_reset(tp);
7203 if (err)
7204 return err;
7205
7206 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7207
Matt Carlsonbcb37f62008-11-03 16:52:09 -08007208 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07007209 val = tr32(TG3_CPMU_CTRL);
7210 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7211 tw32(TG3_CPMU_CTRL, val);
Matt Carlson9acb9612007-11-12 21:10:06 -08007212
7213 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7214 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7215 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7216 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7217
7218 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7219 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7220 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7221 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7222
7223 val = tr32(TG3_CPMU_HST_ACC);
7224 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7225 val |= CPMU_HST_ACC_MACCLK_6_25;
7226 tw32(TG3_CPMU_HST_ACC, val);
Matt Carlsond30cdd22007-10-07 23:28:35 -07007227 }
7228
Linus Torvalds1da177e2005-04-16 15:20:36 -07007229 /* This works around an issue with Athlon chipsets on
7230 * B3 tigon3 silicon. This bit has no effect on any
7231 * other revision. But do not set this on PCI Express
Matt Carlson795d01c2007-10-07 23:28:17 -07007232 * chips and don't even touch the clocks if the CPMU is present.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007233 */
Matt Carlson795d01c2007-10-07 23:28:17 -07007234 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7235 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7236 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7237 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7238 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007239
7240 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7241 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7242 val = tr32(TG3PCI_PCISTATE);
7243 val |= PCISTATE_RETRY_SAME_DMA;
7244 tw32(TG3PCI_PCISTATE, val);
7245 }
7246
Matt Carlson0d3031d2007-10-10 18:02:43 -07007247 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7248 /* Allow reads and writes to the
7249 * APE register and memory space.
7250 */
7251 val = tr32(TG3PCI_PCISTATE);
7252 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7253 PCISTATE_ALLOW_APE_SHMEM_WR;
7254 tw32(TG3PCI_PCISTATE, val);
7255 }
7256
Linus Torvalds1da177e2005-04-16 15:20:36 -07007257 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7258 /* Enable some hw fixes. */
7259 val = tr32(TG3PCI_MSI_DATA);
7260 val |= (1 << 26) | (1 << 28) | (1 << 29);
7261 tw32(TG3PCI_MSI_DATA, val);
7262 }
7263
7264 /* Descriptor ring init may make accesses to the
7265 * NIC SRAM area to setup the TX descriptors, so we
7266 * can only do this after the hardware has been
7267 * successfully reset.
7268 */
Michael Chan32d8c572006-07-25 16:38:29 -07007269 err = tg3_init_rings(tp);
7270 if (err)
7271 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007272
Matt Carlson9936bcf2007-10-10 18:03:07 -07007273 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
Matt Carlsonfcb389d2008-11-03 16:55:44 -08007274 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07007275 /* This value is determined during the probe time DMA
7276 * engine test, tg3_test_dma.
7277 */
7278 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7279 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007280
7281 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7282 GRC_MODE_4X_NIC_SEND_RINGS |
7283 GRC_MODE_NO_TX_PHDR_CSUM |
7284 GRC_MODE_NO_RX_PHDR_CSUM);
7285 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
Michael Chand2d746f2006-04-06 21:45:39 -07007286
7287 /* Pseudo-header checksum is done by hardware logic and not
7288 * the offload processers, so make the chip do the pseudo-
7289 * header checksums on receive. For transmit it is more
7290 * convenient to do the pseudo-header checksum in software
7291 * as Linux does that on transmit for us in all cases.
7292 */
7293 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007294
7295 tw32(GRC_MODE,
7296 tp->grc_mode |
7297 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7298
7299 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7300 val = tr32(GRC_MISC_CFG);
7301 val &= ~0xff;
7302 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7303 tw32(GRC_MISC_CFG, val);
7304
7305 /* Initialize MBUF/DESC pool. */
John W. Linvillecbf46852005-04-21 17:01:29 -07007306 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007307 /* Do nothing. */
7308 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7309 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7310 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7311 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7312 else
7313 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7314 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7315 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7316 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007317 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7318 int fw_len;
7319
7320 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7321 TG3_TSO5_FW_RODATA_LEN +
7322 TG3_TSO5_FW_DATA_LEN +
7323 TG3_TSO5_FW_SBSS_LEN +
7324 TG3_TSO5_FW_BSS_LEN);
7325 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7326 tw32(BUFMGR_MB_POOL_ADDR,
7327 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7328 tw32(BUFMGR_MB_POOL_SIZE,
7329 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7330 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007331
Michael Chan0f893dc2005-07-25 12:30:38 -07007332 if (tp->dev->mtu <= ETH_DATA_LEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007333 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7334 tp->bufmgr_config.mbuf_read_dma_low_water);
7335 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7336 tp->bufmgr_config.mbuf_mac_rx_low_water);
7337 tw32(BUFMGR_MB_HIGH_WATER,
7338 tp->bufmgr_config.mbuf_high_water);
7339 } else {
7340 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7341 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7342 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7343 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7344 tw32(BUFMGR_MB_HIGH_WATER,
7345 tp->bufmgr_config.mbuf_high_water_jumbo);
7346 }
7347 tw32(BUFMGR_DMA_LOW_WATER,
7348 tp->bufmgr_config.dma_low_water);
7349 tw32(BUFMGR_DMA_HIGH_WATER,
7350 tp->bufmgr_config.dma_high_water);
7351
7352 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7353 for (i = 0; i < 2000; i++) {
7354 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7355 break;
7356 udelay(10);
7357 }
7358 if (i >= 2000) {
7359 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7360 tp->dev->name);
7361 return -ENODEV;
7362 }
7363
7364 /* Setup replenish threshold. */
Michael Chanf92905d2006-06-29 20:14:29 -07007365 val = tp->rx_pending / 8;
7366 if (val == 0)
7367 val = 1;
7368 else if (val > tp->rx_std_max_post)
7369 val = tp->rx_std_max_post;
Michael Chanb5d37722006-09-27 16:06:21 -07007370 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7371 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7372 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7373
7374 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7375 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7376 }
Michael Chanf92905d2006-06-29 20:14:29 -07007377
7378 tw32(RCVBDI_STD_THRESH, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007379
7380 /* Initialize TG3_BDINFO's at:
7381 * RCVDBDI_STD_BD: standard eth size rx ring
7382 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7383 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7384 *
7385 * like so:
7386 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7387 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7388 * ring attribute flags
7389 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7390 *
7391 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7392 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7393 *
7394 * The size of each ring is fixed in the firmware, but the location is
7395 * configurable.
7396 */
7397 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7398 ((u64) tp->rx_std_mapping >> 32));
7399 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7400 ((u64) tp->rx_std_mapping & 0xffffffff));
7401 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7402 NIC_SRAM_RX_BUFFER_DESC);
7403
7404 /* Don't even try to program the JUMBO/MINI buffer descriptor
7405 * configs on 5705.
7406 */
7407 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7408 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7409 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7410 } else {
7411 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7412 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7413
7414 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7415 BDINFO_FLAGS_DISABLED);
7416
7417 /* Setup replenish threshold. */
7418 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7419
Michael Chan0f893dc2005-07-25 12:30:38 -07007420 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007421 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7422 ((u64) tp->rx_jumbo_mapping >> 32));
7423 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7424 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7425 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7426 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7427 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7428 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7429 } else {
7430 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7431 BDINFO_FLAGS_DISABLED);
7432 }
7433
7434 }
7435
7436 /* There is only one send ring on 5705/5750, no need to explicitly
7437 * disable the others.
7438 */
7439 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7440 /* Clear out send RCB ring in SRAM. */
7441 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7442 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7443 BDINFO_FLAGS_DISABLED);
7444 }
7445
7446 tp->tx_prod = 0;
7447 tp->tx_cons = 0;
7448 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7449 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7450
7451 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7452 tp->tx_desc_mapping,
7453 (TG3_TX_RING_SIZE <<
7454 BDINFO_FLAGS_MAXLEN_SHIFT),
7455 NIC_SRAM_TX_BUFFER_DESC);
7456
7457 /* There is only one receive return ring on 5705/5750, no need
7458 * to explicitly disable the others.
7459 */
7460 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7461 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7462 i += TG3_BDINFO_SIZE) {
7463 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7464 BDINFO_FLAGS_DISABLED);
7465 }
7466 }
7467
7468 tp->rx_rcb_ptr = 0;
7469 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7470
7471 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7472 tp->rx_rcb_mapping,
7473 (TG3_RX_RCB_RING_SIZE(tp) <<
7474 BDINFO_FLAGS_MAXLEN_SHIFT),
7475 0);
7476
7477 tp->rx_std_ptr = tp->rx_pending;
7478 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7479 tp->rx_std_ptr);
7480
Michael Chan0f893dc2005-07-25 12:30:38 -07007481 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
Linus Torvalds1da177e2005-04-16 15:20:36 -07007482 tp->rx_jumbo_pending : 0;
7483 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7484 tp->rx_jumbo_ptr);
7485
7486 /* Initialize MAC address and backoff seed. */
Michael Chan986e0ae2007-05-05 12:10:20 -07007487 __tg3_set_mac_addr(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007488
7489 /* MTU + ethernet header + FCS + optional VLAN tag */
7490 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7491
7492 /* The slot time is changed by tg3_setup_phy if we
7493 * run at gigabit with half duplex.
7494 */
7495 tw32(MAC_TX_LENGTHS,
7496 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7497 (6 << TX_LENGTHS_IPG_SHIFT) |
7498 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7499
7500 /* Receive rules. */
7501 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7502 tw32(RCVLPC_CONFIG, 0x0181);
7503
7504 /* Calculate RDMAC_MODE setting early, we need it to determine
7505 * the RCVLPC_STATE_ENABLE mask.
7506 */
7507 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7508 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7509 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7510 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7511 RDMAC_MODE_LNGREAD_ENAB);
Michael Chan85e94ce2005-04-21 17:05:28 -07007512
Matt Carlson57e69832008-05-25 23:48:31 -07007513 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7514 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsond30cdd22007-10-07 23:28:35 -07007515 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7516 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7517 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7518
Michael Chan85e94ce2005-04-21 17:05:28 -07007519 /* If statement applies to 5705 and 5750 PCI devices only */
7520 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7521 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7522 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007523 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
Matt Carlsonc13e3712007-05-05 11:50:04 -07007524 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007525 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7526 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7527 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7528 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7529 }
7530 }
7531
Michael Chan85e94ce2005-04-21 17:05:28 -07007532 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7533 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7534
Linus Torvalds1da177e2005-04-16 15:20:36 -07007535 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7536 rdmac_mode |= (1 << 27);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007537
7538 /* Receive/send statistics. */
Michael Chan16613942006-06-29 20:15:13 -07007539 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7540 val = tr32(RCVLPC_STATS_ENABLE);
7541 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7542 tw32(RCVLPC_STATS_ENABLE, val);
7543 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7544 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007545 val = tr32(RCVLPC_STATS_ENABLE);
7546 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7547 tw32(RCVLPC_STATS_ENABLE, val);
7548 } else {
7549 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7550 }
7551 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7552 tw32(SNDDATAI_STATSENAB, 0xffffff);
7553 tw32(SNDDATAI_STATSCTRL,
7554 (SNDDATAI_SCTRL_ENABLE |
7555 SNDDATAI_SCTRL_FASTUPD));
7556
7557 /* Setup host coalescing engine. */
7558 tw32(HOSTCC_MODE, 0);
7559 for (i = 0; i < 2000; i++) {
7560 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7561 break;
7562 udelay(10);
7563 }
7564
Michael Chand244c892005-07-05 14:42:33 -07007565 __tg3_set_coalesce(tp, &tp->coal);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007566
7567 /* set status block DMA address */
7568 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7569 ((u64) tp->status_mapping >> 32));
7570 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7571 ((u64) tp->status_mapping & 0xffffffff));
7572
7573 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7574 /* Status/statistics block address. See tg3_timer,
7575 * the tg3_periodic_fetch_stats call there, and
7576 * tg3_get_stats to see how this works for 5705/5750 chips.
7577 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007578 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7579 ((u64) tp->stats_mapping >> 32));
7580 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7581 ((u64) tp->stats_mapping & 0xffffffff));
7582 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7583 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7584 }
7585
7586 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7587
7588 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7589 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7590 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7591 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7592
7593 /* Clear statistics/status block in chip, and status block in ram. */
7594 for (i = NIC_SRAM_STATS_BLK;
7595 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7596 i += sizeof(u32)) {
7597 tg3_write_mem(tp, i, 0);
7598 udelay(40);
7599 }
7600 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7601
Michael Chanc94e3942005-09-27 12:12:42 -07007602 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7603 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7604 /* reset to prevent losing 1st rx packet intermittently */
7605 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7606 udelay(10);
7607 }
7608
Matt Carlson3bda1252008-08-15 14:08:22 -07007609 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7610 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7611 else
7612 tp->mac_mode = 0;
7613 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
Linus Torvalds1da177e2005-04-16 15:20:36 -07007614 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07007615 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7616 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7617 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7618 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007619 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7620 udelay(40);
7621
Michael Chan314fba32005-04-21 17:07:04 -07007622 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
Michael Chan9d26e212006-12-07 00:21:14 -08007623 * If TG3_FLG2_IS_NIC is zero, we should read the
Michael Chan314fba32005-04-21 17:07:04 -07007624 * register to preserve the GPIO settings for LOMs. The GPIOs,
7625 * whether used as inputs or outputs, are set by boot code after
7626 * reset.
7627 */
Michael Chan9d26e212006-12-07 00:21:14 -08007628 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
Michael Chan314fba32005-04-21 17:07:04 -07007629 u32 gpio_mask;
7630
Michael Chan9d26e212006-12-07 00:21:14 -08007631 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7632 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7633 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chan3e7d83b2005-04-21 17:10:36 -07007634
7635 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7636 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7637 GRC_LCLCTRL_GPIO_OUTPUT3;
7638
Michael Chanaf36e6b2006-03-23 01:28:06 -08007639 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7640 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7641
Gary Zambranoaaf84462007-05-05 11:51:45 -07007642 tp->grc_local_ctrl &= ~gpio_mask;
Michael Chan314fba32005-04-21 17:07:04 -07007643 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7644
7645 /* GPIO1 must be driven high for eeprom write protect */
Michael Chan9d26e212006-12-07 00:21:14 -08007646 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7647 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7648 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan314fba32005-04-21 17:07:04 -07007649 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007650 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7651 udelay(100);
7652
Michael Chan09ee9292005-08-09 20:17:00 -07007653 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
David S. Millerfac9b832005-05-18 22:46:34 -07007654 tp->last_tag = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007655
7656 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7657 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7658 udelay(40);
7659 }
7660
7661 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7662 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7663 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7664 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7665 WDMAC_MODE_LNGREAD_ENAB);
7666
Michael Chan85e94ce2005-04-21 17:05:28 -07007667 /* If statement applies to 5705 and 5750 PCI devices only */
7668 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7669 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7670 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007671 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7672 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7673 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7674 /* nothing */
7675 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7676 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7677 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7678 val |= WDMAC_MODE_RX_ACCEL;
7679 }
7680 }
7681
Michael Chand9ab5ad2006-03-20 22:27:35 -08007682 /* Enable host coalescing bug fix */
Michael Chanaf36e6b2006-03-23 01:28:06 -08007683 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07007684 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07007685 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
Matt Carlson57e69832008-05-25 23:48:31 -07007686 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7687 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
Matt Carlsonf51f3562008-05-25 23:45:08 -07007688 val |= WDMAC_MODE_STATUS_TAG_FIX;
Michael Chand9ab5ad2006-03-20 22:27:35 -08007689
Linus Torvalds1da177e2005-04-16 15:20:36 -07007690 tw32_f(WDMAC_MODE, val);
7691 udelay(40);
7692
Matt Carlson9974a352007-10-07 23:27:28 -07007693 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7694 u16 pcix_cmd;
7695
7696 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7697 &pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007698 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
Matt Carlson9974a352007-10-07 23:27:28 -07007699 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7700 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007701 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
Matt Carlson9974a352007-10-07 23:27:28 -07007702 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7703 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007704 }
Matt Carlson9974a352007-10-07 23:27:28 -07007705 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7706 pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007707 }
7708
7709 tw32_f(RDMAC_MODE, rdmac_mode);
7710 udelay(40);
7711
7712 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7713 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7714 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
Matt Carlson9936bcf2007-10-10 18:03:07 -07007715
7716 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7717 tw32(SNDDATAC_MODE,
7718 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7719 else
7720 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7721
Linus Torvalds1da177e2005-04-16 15:20:36 -07007722 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7723 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7724 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7725 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007726 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7727 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007728 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7729 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7730
7731 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7732 err = tg3_load_5701_a0_firmware_fix(tp);
7733 if (err)
7734 return err;
7735 }
7736
Linus Torvalds1da177e2005-04-16 15:20:36 -07007737 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7738 err = tg3_load_tso_firmware(tp);
7739 if (err)
7740 return err;
7741 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007742
7743 tp->tx_mode = TX_MODE_ENABLE;
7744 tw32_f(MAC_TX_MODE, tp->tx_mode);
7745 udelay(100);
7746
7747 tp->rx_mode = RX_MODE_ENABLE;
Matt Carlson9936bcf2007-10-10 18:03:07 -07007748 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlson57e69832008-05-25 23:48:31 -07007749 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7750 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7751 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chanaf36e6b2006-03-23 01:28:06 -08007752 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7753
Linus Torvalds1da177e2005-04-16 15:20:36 -07007754 tw32_f(MAC_RX_MODE, tp->rx_mode);
7755 udelay(10);
7756
Linus Torvalds1da177e2005-04-16 15:20:36 -07007757 tw32(MAC_LED_CTRL, tp->led_ctrl);
7758
7759 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
Michael Chanc94e3942005-09-27 12:12:42 -07007760 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007761 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7762 udelay(10);
7763 }
7764 tw32_f(MAC_RX_MODE, tp->rx_mode);
7765 udelay(10);
7766
7767 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7768 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7769 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7770 /* Set drive transmission level to 1.2V */
7771 /* only if the signal pre-emphasis bit is not set */
7772 val = tr32(MAC_SERDES_CFG);
7773 val &= 0xfffff000;
7774 val |= 0x880;
7775 tw32(MAC_SERDES_CFG, val);
7776 }
7777 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7778 tw32(MAC_SERDES_CFG, 0x616000);
7779 }
7780
7781 /* Prevent chip from dropping frames when flow control
7782 * is enabled.
7783 */
7784 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7785
7786 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7787 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7788 /* Use hardware link auto-negotiation */
7789 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7790 }
7791
Michael Chand4d2c552006-03-20 17:47:20 -08007792 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7793 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7794 u32 tmp;
7795
7796 tmp = tr32(SERDES_RX_CTRL);
7797 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7798 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7799 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7800 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7801 }
7802
Matt Carlsondd477002008-05-25 23:45:58 -07007803 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7804 if (tp->link_config.phy_is_low_power) {
7805 tp->link_config.phy_is_low_power = 0;
7806 tp->link_config.speed = tp->link_config.orig_speed;
7807 tp->link_config.duplex = tp->link_config.orig_duplex;
7808 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7809 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007810
Matt Carlsondd477002008-05-25 23:45:58 -07007811 err = tg3_setup_phy(tp, 0);
7812 if (err)
7813 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007814
Matt Carlsondd477002008-05-25 23:45:58 -07007815 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7816 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7817 u32 tmp;
7818
7819 /* Clear CRC stats. */
7820 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7821 tg3_writephy(tp, MII_TG3_TEST1,
7822 tmp | MII_TG3_TEST1_CRC_EN);
7823 tg3_readphy(tp, 0x14, &tmp);
7824 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007825 }
7826 }
7827
7828 __tg3_set_rx_mode(tp->dev);
7829
7830 /* Initialize receive rules. */
7831 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7832 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7833 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7834 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7835
Michael Chan4cf78e42005-07-25 12:29:19 -07007836 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
Michael Chana4e2b342005-10-26 15:46:52 -07007837 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007838 limit = 8;
7839 else
7840 limit = 16;
7841 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7842 limit -= 4;
7843 switch (limit) {
7844 case 16:
7845 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7846 case 15:
7847 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7848 case 14:
7849 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7850 case 13:
7851 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7852 case 12:
7853 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7854 case 11:
7855 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7856 case 10:
7857 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7858 case 9:
7859 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7860 case 8:
7861 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7862 case 7:
7863 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7864 case 6:
7865 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7866 case 5:
7867 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7868 case 4:
7869 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7870 case 3:
7871 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7872 case 2:
7873 case 1:
7874
7875 default:
7876 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07007877 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007878
Matt Carlson9ce768e2007-10-11 19:49:11 -07007879 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7880 /* Write our heartbeat update interval to APE. */
7881 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7882 APE_HOST_HEARTBEAT_INT_DISABLE);
Matt Carlson0d3031d2007-10-10 18:02:43 -07007883
Linus Torvalds1da177e2005-04-16 15:20:36 -07007884 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7885
Linus Torvalds1da177e2005-04-16 15:20:36 -07007886 return 0;
7887}
7888
7889/* Called at device open time to get the chip ready for
7890 * packet processing. Invoked with tp->lock held.
7891 */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007892static int tg3_init_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007893{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007894 tg3_switch_clocks(tp);
7895
7896 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7897
Matt Carlson2f751b62008-08-04 23:17:34 -07007898 return tg3_reset_hw(tp, reset_phy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007899}
7900
7901#define TG3_STAT_ADD32(PSTAT, REG) \
7902do { u32 __val = tr32(REG); \
7903 (PSTAT)->low += __val; \
7904 if ((PSTAT)->low < __val) \
7905 (PSTAT)->high += 1; \
7906} while (0)
7907
7908static void tg3_periodic_fetch_stats(struct tg3 *tp)
7909{
7910 struct tg3_hw_stats *sp = tp->hw_stats;
7911
7912 if (!netif_carrier_ok(tp->dev))
7913 return;
7914
7915 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7916 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7917 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7918 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7919 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7920 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7921 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7922 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7923 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7924 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7925 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7926 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7927 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7928
7929 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7930 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7931 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7932 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7933 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7934 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7935 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7936 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7937 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7938 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7939 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7940 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7941 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7942 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
Michael Chan463d3052006-05-22 16:36:27 -07007943
7944 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7945 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7946 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007947}
7948
7949static void tg3_timer(unsigned long __opaque)
7950{
7951 struct tg3 *tp = (struct tg3 *) __opaque;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007952
Michael Chanf475f162006-03-27 23:20:14 -08007953 if (tp->irq_sync)
7954 goto restart_timer;
7955
David S. Millerf47c11e2005-06-24 20:18:35 -07007956 spin_lock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007957
David S. Millerfac9b832005-05-18 22:46:34 -07007958 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7959 /* All of this garbage is because when using non-tagged
7960 * IRQ status the mailbox/status_block protocol the chip
7961 * uses with the cpu is race prone.
7962 */
7963 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7964 tw32(GRC_LOCAL_CTRL,
7965 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7966 } else {
7967 tw32(HOSTCC_MODE, tp->coalesce_mode |
7968 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7969 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007970
David S. Millerfac9b832005-05-18 22:46:34 -07007971 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7972 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
David S. Millerf47c11e2005-06-24 20:18:35 -07007973 spin_unlock(&tp->lock);
David S. Millerfac9b832005-05-18 22:46:34 -07007974 schedule_work(&tp->reset_task);
7975 return;
7976 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007977 }
7978
Linus Torvalds1da177e2005-04-16 15:20:36 -07007979 /* This part only runs once per second. */
7980 if (!--tp->timer_counter) {
David S. Millerfac9b832005-05-18 22:46:34 -07007981 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7982 tg3_periodic_fetch_stats(tp);
7983
Linus Torvalds1da177e2005-04-16 15:20:36 -07007984 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7985 u32 mac_stat;
7986 int phy_event;
7987
7988 mac_stat = tr32(MAC_STATUS);
7989
7990 phy_event = 0;
7991 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7992 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7993 phy_event = 1;
7994 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7995 phy_event = 1;
7996
7997 if (phy_event)
7998 tg3_setup_phy(tp, 0);
7999 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
8000 u32 mac_stat = tr32(MAC_STATUS);
8001 int need_setup = 0;
8002
8003 if (netif_carrier_ok(tp->dev) &&
8004 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8005 need_setup = 1;
8006 }
8007 if (! netif_carrier_ok(tp->dev) &&
8008 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8009 MAC_STATUS_SIGNAL_DET))) {
8010 need_setup = 1;
8011 }
8012 if (need_setup) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07008013 if (!tp->serdes_counter) {
8014 tw32_f(MAC_MODE,
8015 (tp->mac_mode &
8016 ~MAC_MODE_PORT_MODE_MASK));
8017 udelay(40);
8018 tw32_f(MAC_MODE, tp->mac_mode);
8019 udelay(40);
8020 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008021 tg3_setup_phy(tp, 0);
8022 }
Michael Chan747e8f82005-07-25 12:33:22 -07008023 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8024 tg3_serdes_parallel_detect(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008025
8026 tp->timer_counter = tp->timer_multiplier;
8027 }
8028
Michael Chan130b8e42006-09-27 16:00:40 -07008029 /* Heartbeat is only sent once every 2 seconds.
8030 *
8031 * The heartbeat is to tell the ASF firmware that the host
8032 * driver is still alive. In the event that the OS crashes,
8033 * ASF needs to reset the hardware to free up the FIFO space
8034 * that may be filled with rx packets destined for the host.
8035 * If the FIFO is full, ASF will no longer function properly.
8036 *
8037 * Unintended resets have been reported on real time kernels
8038 * where the timer doesn't run on time. Netpoll will also have
8039 * same problem.
8040 *
8041 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8042 * to check the ring condition when the heartbeat is expiring
8043 * before doing the reset. This will prevent most unintended
8044 * resets.
8045 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008046 if (!--tp->asf_counter) {
Matt Carlsonbc7959b2008-08-15 14:08:55 -07008047 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
8048 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Matt Carlson7c5026a2008-05-02 16:49:29 -07008049 tg3_wait_for_event_ack(tp);
8050
Michael Chanbbadf502006-04-06 21:46:34 -07008051 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
Michael Chan130b8e42006-09-27 16:00:40 -07008052 FWCMD_NICDRV_ALIVE3);
Michael Chanbbadf502006-04-06 21:46:34 -07008053 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
Michael Chan28fbef72005-10-26 15:48:35 -07008054 /* 5 seconds timeout */
Michael Chanbbadf502006-04-06 21:46:34 -07008055 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
Matt Carlson4ba526c2008-08-15 14:10:04 -07008056
8057 tg3_generate_fw_event(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008058 }
8059 tp->asf_counter = tp->asf_multiplier;
8060 }
8061
David S. Millerf47c11e2005-06-24 20:18:35 -07008062 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008063
Michael Chanf475f162006-03-27 23:20:14 -08008064restart_timer:
Linus Torvalds1da177e2005-04-16 15:20:36 -07008065 tp->timer.expires = jiffies + tp->timer_offset;
8066 add_timer(&tp->timer);
8067}
8068
Adrian Bunk81789ef2006-03-20 23:00:14 -08008069static int tg3_request_irq(struct tg3 *tp)
Michael Chanfcfa0a32006-03-20 22:28:41 -08008070{
David Howells7d12e782006-10-05 14:55:46 +01008071 irq_handler_t fn;
Michael Chanfcfa0a32006-03-20 22:28:41 -08008072 unsigned long flags;
8073 struct net_device *dev = tp->dev;
8074
8075 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8076 fn = tg3_msi;
8077 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8078 fn = tg3_msi_1shot;
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07008079 flags = IRQF_SAMPLE_RANDOM;
Michael Chanfcfa0a32006-03-20 22:28:41 -08008080 } else {
8081 fn = tg3_interrupt;
8082 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8083 fn = tg3_interrupt_tagged;
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07008084 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
Michael Chanfcfa0a32006-03-20 22:28:41 -08008085 }
8086 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
8087}
8088
Michael Chan79381092005-04-21 17:13:59 -07008089static int tg3_test_interrupt(struct tg3 *tp)
8090{
8091 struct net_device *dev = tp->dev;
Michael Chanb16250e2006-09-27 16:10:14 -07008092 int err, i, intr_ok = 0;
Michael Chan79381092005-04-21 17:13:59 -07008093
Michael Chand4bc3922005-05-29 14:59:20 -07008094 if (!netif_running(dev))
8095 return -ENODEV;
8096
Michael Chan79381092005-04-21 17:13:59 -07008097 tg3_disable_ints(tp);
8098
8099 free_irq(tp->pdev->irq, dev);
8100
8101 err = request_irq(tp->pdev->irq, tg3_test_isr,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07008102 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
Michael Chan79381092005-04-21 17:13:59 -07008103 if (err)
8104 return err;
8105
Michael Chan38f38432005-09-05 17:53:32 -07008106 tp->hw_status->status &= ~SD_STATUS_UPDATED;
Michael Chan79381092005-04-21 17:13:59 -07008107 tg3_enable_ints(tp);
8108
8109 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8110 HOSTCC_MODE_NOW);
8111
8112 for (i = 0; i < 5; i++) {
Michael Chanb16250e2006-09-27 16:10:14 -07008113 u32 int_mbox, misc_host_ctrl;
8114
Michael Chan09ee9292005-08-09 20:17:00 -07008115 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
8116 TG3_64BIT_REG_LOW);
Michael Chanb16250e2006-09-27 16:10:14 -07008117 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8118
8119 if ((int_mbox != 0) ||
8120 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8121 intr_ok = 1;
Michael Chan79381092005-04-21 17:13:59 -07008122 break;
Michael Chanb16250e2006-09-27 16:10:14 -07008123 }
8124
Michael Chan79381092005-04-21 17:13:59 -07008125 msleep(10);
8126 }
8127
8128 tg3_disable_ints(tp);
8129
8130 free_irq(tp->pdev->irq, dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008131
Michael Chanfcfa0a32006-03-20 22:28:41 -08008132 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07008133
8134 if (err)
8135 return err;
8136
Michael Chanb16250e2006-09-27 16:10:14 -07008137 if (intr_ok)
Michael Chan79381092005-04-21 17:13:59 -07008138 return 0;
8139
8140 return -EIO;
8141}
8142
8143/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8144 * successfully restored
8145 */
8146static int tg3_test_msi(struct tg3 *tp)
8147{
8148 struct net_device *dev = tp->dev;
8149 int err;
8150 u16 pci_cmd;
8151
8152 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8153 return 0;
8154
8155 /* Turn off SERR reporting in case MSI terminates with Master
8156 * Abort.
8157 */
8158 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8159 pci_write_config_word(tp->pdev, PCI_COMMAND,
8160 pci_cmd & ~PCI_COMMAND_SERR);
8161
8162 err = tg3_test_interrupt(tp);
8163
8164 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8165
8166 if (!err)
8167 return 0;
8168
8169 /* other failures */
8170 if (err != -EIO)
8171 return err;
8172
8173 /* MSI test failed, go back to INTx mode */
8174 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
8175 "switching to INTx mode. Please report this failure to "
8176 "the PCI maintainer and include system chipset information.\n",
8177 tp->dev->name);
8178
8179 free_irq(tp->pdev->irq, dev);
8180 pci_disable_msi(tp->pdev);
8181
8182 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8183
Michael Chanfcfa0a32006-03-20 22:28:41 -08008184 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07008185 if (err)
8186 return err;
8187
8188 /* Need to reset the chip because the MSI cycle may have terminated
8189 * with Master Abort.
8190 */
David S. Millerf47c11e2005-06-24 20:18:35 -07008191 tg3_full_lock(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07008192
Michael Chan944d9802005-05-29 14:57:48 -07008193 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07008194 err = tg3_init_hw(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07008195
David S. Millerf47c11e2005-06-24 20:18:35 -07008196 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07008197
8198 if (err)
8199 free_irq(tp->pdev->irq, dev);
8200
8201 return err;
8202}
8203
Linus Torvalds1da177e2005-04-16 15:20:36 -07008204static int tg3_open(struct net_device *dev)
8205{
8206 struct tg3 *tp = netdev_priv(dev);
8207 int err;
8208
Michael Chanc49a1562006-12-17 17:07:29 -08008209 netif_carrier_off(tp->dev);
8210
Michael Chanbc1c7562006-03-20 17:48:03 -08008211 err = tg3_set_power_state(tp, PCI_D0);
Matt Carlson2f751b62008-08-04 23:17:34 -07008212 if (err)
Michael Chanbc1c7562006-03-20 17:48:03 -08008213 return err;
Matt Carlson2f751b62008-08-04 23:17:34 -07008214
8215 tg3_full_lock(tp, 0);
Michael Chanbc1c7562006-03-20 17:48:03 -08008216
Linus Torvalds1da177e2005-04-16 15:20:36 -07008217 tg3_disable_ints(tp);
8218 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8219
David S. Millerf47c11e2005-06-24 20:18:35 -07008220 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008221
8222 /* The placement of this call is tied
8223 * to the setup and use of Host TX descriptors.
8224 */
8225 err = tg3_alloc_consistent(tp);
8226 if (err)
8227 return err;
8228
Michael Chan7544b092007-05-05 13:08:32 -07008229 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
David S. Millerfac9b832005-05-18 22:46:34 -07008230 /* All MSI supporting chips should support tagged
8231 * status. Assert that this is the case.
8232 */
8233 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8234 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8235 "Not using MSI.\n", tp->dev->name);
8236 } else if (pci_enable_msi(tp->pdev) == 0) {
Michael Chan88b06bc2005-04-21 17:13:25 -07008237 u32 msi_mode;
8238
8239 msi_mode = tr32(MSGINT_MODE);
8240 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8241 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8242 }
8243 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08008244 err = tg3_request_irq(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008245
8246 if (err) {
Michael Chan88b06bc2005-04-21 17:13:25 -07008247 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8248 pci_disable_msi(tp->pdev);
8249 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8250 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008251 tg3_free_consistent(tp);
8252 return err;
8253 }
8254
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008255 napi_enable(&tp->napi);
8256
David S. Millerf47c11e2005-06-24 20:18:35 -07008257 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008258
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07008259 err = tg3_init_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008260 if (err) {
Michael Chan944d9802005-05-29 14:57:48 -07008261 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008262 tg3_free_rings(tp);
8263 } else {
David S. Millerfac9b832005-05-18 22:46:34 -07008264 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8265 tp->timer_offset = HZ;
8266 else
8267 tp->timer_offset = HZ / 10;
8268
8269 BUG_ON(tp->timer_offset > HZ);
8270 tp->timer_counter = tp->timer_multiplier =
8271 (HZ / tp->timer_offset);
8272 tp->asf_counter = tp->asf_multiplier =
Michael Chan28fbef72005-10-26 15:48:35 -07008273 ((HZ / tp->timer_offset) * 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008274
8275 init_timer(&tp->timer);
8276 tp->timer.expires = jiffies + tp->timer_offset;
8277 tp->timer.data = (unsigned long) tp;
8278 tp->timer.function = tg3_timer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008279 }
8280
David S. Millerf47c11e2005-06-24 20:18:35 -07008281 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008282
8283 if (err) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008284 napi_disable(&tp->napi);
Michael Chan88b06bc2005-04-21 17:13:25 -07008285 free_irq(tp->pdev->irq, dev);
8286 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8287 pci_disable_msi(tp->pdev);
8288 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8289 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008290 tg3_free_consistent(tp);
8291 return err;
8292 }
8293
Michael Chan79381092005-04-21 17:13:59 -07008294 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8295 err = tg3_test_msi(tp);
David S. Millerfac9b832005-05-18 22:46:34 -07008296
Michael Chan79381092005-04-21 17:13:59 -07008297 if (err) {
David S. Millerf47c11e2005-06-24 20:18:35 -07008298 tg3_full_lock(tp, 0);
Michael Chan79381092005-04-21 17:13:59 -07008299
8300 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8301 pci_disable_msi(tp->pdev);
8302 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8303 }
Michael Chan944d9802005-05-29 14:57:48 -07008304 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan79381092005-04-21 17:13:59 -07008305 tg3_free_rings(tp);
8306 tg3_free_consistent(tp);
8307
David S. Millerf47c11e2005-06-24 20:18:35 -07008308 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07008309
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008310 napi_disable(&tp->napi);
8311
Michael Chan79381092005-04-21 17:13:59 -07008312 return err;
8313 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08008314
8315 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8316 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
Michael Chanb5d37722006-09-27 16:06:21 -07008317 u32 val = tr32(PCIE_TRANSACTION_CFG);
Michael Chanfcfa0a32006-03-20 22:28:41 -08008318
Michael Chanb5d37722006-09-27 16:06:21 -07008319 tw32(PCIE_TRANSACTION_CFG,
8320 val | PCIE_TRANS_CFG_1SHOT_MSI);
Michael Chanfcfa0a32006-03-20 22:28:41 -08008321 }
8322 }
Michael Chan79381092005-04-21 17:13:59 -07008323 }
8324
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07008325 tg3_phy_start(tp);
8326
David S. Millerf47c11e2005-06-24 20:18:35 -07008327 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008328
Michael Chan79381092005-04-21 17:13:59 -07008329 add_timer(&tp->timer);
8330 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008331 tg3_enable_ints(tp);
8332
David S. Millerf47c11e2005-06-24 20:18:35 -07008333 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008334
8335 netif_start_queue(dev);
8336
8337 return 0;
8338}
8339
8340#if 0
8341/*static*/ void tg3_dump_state(struct tg3 *tp)
8342{
8343 u32 val32, val32_2, val32_3, val32_4, val32_5;
8344 u16 val16;
8345 int i;
8346
8347 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8348 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8349 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8350 val16, val32);
8351
8352 /* MAC block */
8353 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8354 tr32(MAC_MODE), tr32(MAC_STATUS));
8355 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8356 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8357 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8358 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8359 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8360 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8361
8362 /* Send data initiator control block */
8363 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8364 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8365 printk(" SNDDATAI_STATSCTRL[%08x]\n",
8366 tr32(SNDDATAI_STATSCTRL));
8367
8368 /* Send data completion control block */
8369 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8370
8371 /* Send BD ring selector block */
8372 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8373 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8374
8375 /* Send BD initiator control block */
8376 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8377 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8378
8379 /* Send BD completion control block */
8380 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8381
8382 /* Receive list placement control block */
8383 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8384 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8385 printk(" RCVLPC_STATSCTRL[%08x]\n",
8386 tr32(RCVLPC_STATSCTRL));
8387
8388 /* Receive data and receive BD initiator control block */
8389 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8390 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8391
8392 /* Receive data completion control block */
8393 printk("DEBUG: RCVDCC_MODE[%08x]\n",
8394 tr32(RCVDCC_MODE));
8395
8396 /* Receive BD initiator control block */
8397 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8398 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8399
8400 /* Receive BD completion control block */
8401 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8402 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8403
8404 /* Receive list selector control block */
8405 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8406 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8407
8408 /* Mbuf cluster free block */
8409 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8410 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8411
8412 /* Host coalescing control block */
8413 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8414 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8415 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8416 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8417 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8418 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8419 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8420 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8421 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8422 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8423 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8424 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8425
8426 /* Memory arbiter control block */
8427 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8428 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8429
8430 /* Buffer manager control block */
8431 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8432 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8433 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8434 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8435 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8436 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8437 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8438 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8439
8440 /* Read DMA control block */
8441 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8442 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8443
8444 /* Write DMA control block */
8445 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8446 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8447
8448 /* DMA completion block */
8449 printk("DEBUG: DMAC_MODE[%08x]\n",
8450 tr32(DMAC_MODE));
8451
8452 /* GRC block */
8453 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8454 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8455 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8456 tr32(GRC_LOCAL_CTRL));
8457
8458 /* TG3_BDINFOs */
8459 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8460 tr32(RCVDBDI_JUMBO_BD + 0x0),
8461 tr32(RCVDBDI_JUMBO_BD + 0x4),
8462 tr32(RCVDBDI_JUMBO_BD + 0x8),
8463 tr32(RCVDBDI_JUMBO_BD + 0xc));
8464 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8465 tr32(RCVDBDI_STD_BD + 0x0),
8466 tr32(RCVDBDI_STD_BD + 0x4),
8467 tr32(RCVDBDI_STD_BD + 0x8),
8468 tr32(RCVDBDI_STD_BD + 0xc));
8469 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8470 tr32(RCVDBDI_MINI_BD + 0x0),
8471 tr32(RCVDBDI_MINI_BD + 0x4),
8472 tr32(RCVDBDI_MINI_BD + 0x8),
8473 tr32(RCVDBDI_MINI_BD + 0xc));
8474
8475 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8476 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8477 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8478 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8479 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8480 val32, val32_2, val32_3, val32_4);
8481
8482 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8483 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8484 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8485 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8486 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8487 val32, val32_2, val32_3, val32_4);
8488
8489 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8490 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8491 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8492 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8493 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8494 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8495 val32, val32_2, val32_3, val32_4, val32_5);
8496
8497 /* SW status block */
8498 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8499 tp->hw_status->status,
8500 tp->hw_status->status_tag,
8501 tp->hw_status->rx_jumbo_consumer,
8502 tp->hw_status->rx_consumer,
8503 tp->hw_status->rx_mini_consumer,
8504 tp->hw_status->idx[0].rx_producer,
8505 tp->hw_status->idx[0].tx_consumer);
8506
8507 /* SW statistics block */
8508 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8509 ((u32 *)tp->hw_stats)[0],
8510 ((u32 *)tp->hw_stats)[1],
8511 ((u32 *)tp->hw_stats)[2],
8512 ((u32 *)tp->hw_stats)[3]);
8513
8514 /* Mailboxes */
8515 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
Michael Chan09ee9292005-08-09 20:17:00 -07008516 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8517 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8518 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8519 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008520
8521 /* NIC side send descriptors. */
8522 for (i = 0; i < 6; i++) {
8523 unsigned long txd;
8524
8525 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8526 + (i * sizeof(struct tg3_tx_buffer_desc));
8527 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8528 i,
8529 readl(txd + 0x0), readl(txd + 0x4),
8530 readl(txd + 0x8), readl(txd + 0xc));
8531 }
8532
8533 /* NIC side RX descriptors. */
8534 for (i = 0; i < 6; i++) {
8535 unsigned long rxd;
8536
8537 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8538 + (i * sizeof(struct tg3_rx_buffer_desc));
8539 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8540 i,
8541 readl(rxd + 0x0), readl(rxd + 0x4),
8542 readl(rxd + 0x8), readl(rxd + 0xc));
8543 rxd += (4 * sizeof(u32));
8544 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8545 i,
8546 readl(rxd + 0x0), readl(rxd + 0x4),
8547 readl(rxd + 0x8), readl(rxd + 0xc));
8548 }
8549
8550 for (i = 0; i < 6; i++) {
8551 unsigned long rxd;
8552
8553 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8554 + (i * sizeof(struct tg3_rx_buffer_desc));
8555 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8556 i,
8557 readl(rxd + 0x0), readl(rxd + 0x4),
8558 readl(rxd + 0x8), readl(rxd + 0xc));
8559 rxd += (4 * sizeof(u32));
8560 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8561 i,
8562 readl(rxd + 0x0), readl(rxd + 0x4),
8563 readl(rxd + 0x8), readl(rxd + 0xc));
8564 }
8565}
8566#endif
8567
8568static struct net_device_stats *tg3_get_stats(struct net_device *);
8569static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8570
8571static int tg3_close(struct net_device *dev)
8572{
8573 struct tg3 *tp = netdev_priv(dev);
8574
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008575 napi_disable(&tp->napi);
Oleg Nesterov28e53bd2007-05-09 02:34:22 -07008576 cancel_work_sync(&tp->reset_task);
Michael Chan7faa0062006-02-02 17:29:28 -08008577
Linus Torvalds1da177e2005-04-16 15:20:36 -07008578 netif_stop_queue(dev);
8579
8580 del_timer_sync(&tp->timer);
8581
David S. Millerf47c11e2005-06-24 20:18:35 -07008582 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008583#if 0
8584 tg3_dump_state(tp);
8585#endif
8586
8587 tg3_disable_ints(tp);
8588
Michael Chan944d9802005-05-29 14:57:48 -07008589 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008590 tg3_free_rings(tp);
Michael Chan5cf64b82007-05-05 12:11:21 -07008591 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008592
David S. Millerf47c11e2005-06-24 20:18:35 -07008593 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008594
Michael Chan88b06bc2005-04-21 17:13:25 -07008595 free_irq(tp->pdev->irq, dev);
8596 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8597 pci_disable_msi(tp->pdev);
8598 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8599 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008600
8601 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8602 sizeof(tp->net_stats_prev));
8603 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8604 sizeof(tp->estats_prev));
8605
8606 tg3_free_consistent(tp);
8607
Michael Chanbc1c7562006-03-20 17:48:03 -08008608 tg3_set_power_state(tp, PCI_D3hot);
8609
8610 netif_carrier_off(tp->dev);
8611
Linus Torvalds1da177e2005-04-16 15:20:36 -07008612 return 0;
8613}
8614
8615static inline unsigned long get_stat64(tg3_stat64_t *val)
8616{
8617 unsigned long ret;
8618
8619#if (BITS_PER_LONG == 32)
8620 ret = val->low;
8621#else
8622 ret = ((u64)val->high << 32) | ((u64)val->low);
8623#endif
8624 return ret;
8625}
8626
Stefan Buehler816f8b82008-08-15 14:10:54 -07008627static inline u64 get_estat64(tg3_stat64_t *val)
8628{
8629 return ((u64)val->high << 32) | ((u64)val->low);
8630}
8631
Linus Torvalds1da177e2005-04-16 15:20:36 -07008632static unsigned long calc_crc_errors(struct tg3 *tp)
8633{
8634 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8635
8636 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8637 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8638 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008639 u32 val;
8640
David S. Millerf47c11e2005-06-24 20:18:35 -07008641 spin_lock_bh(&tp->lock);
Michael Chan569a5df2007-02-13 12:18:15 -08008642 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8643 tg3_writephy(tp, MII_TG3_TEST1,
8644 val | MII_TG3_TEST1_CRC_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008645 tg3_readphy(tp, 0x14, &val);
8646 } else
8647 val = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07008648 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008649
8650 tp->phy_crc_errors += val;
8651
8652 return tp->phy_crc_errors;
8653 }
8654
8655 return get_stat64(&hw_stats->rx_fcs_errors);
8656}
8657
8658#define ESTAT_ADD(member) \
8659 estats->member = old_estats->member + \
Stefan Buehler816f8b82008-08-15 14:10:54 -07008660 get_estat64(&hw_stats->member)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008661
8662static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8663{
8664 struct tg3_ethtool_stats *estats = &tp->estats;
8665 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8666 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8667
8668 if (!hw_stats)
8669 return old_estats;
8670
8671 ESTAT_ADD(rx_octets);
8672 ESTAT_ADD(rx_fragments);
8673 ESTAT_ADD(rx_ucast_packets);
8674 ESTAT_ADD(rx_mcast_packets);
8675 ESTAT_ADD(rx_bcast_packets);
8676 ESTAT_ADD(rx_fcs_errors);
8677 ESTAT_ADD(rx_align_errors);
8678 ESTAT_ADD(rx_xon_pause_rcvd);
8679 ESTAT_ADD(rx_xoff_pause_rcvd);
8680 ESTAT_ADD(rx_mac_ctrl_rcvd);
8681 ESTAT_ADD(rx_xoff_entered);
8682 ESTAT_ADD(rx_frame_too_long_errors);
8683 ESTAT_ADD(rx_jabbers);
8684 ESTAT_ADD(rx_undersize_packets);
8685 ESTAT_ADD(rx_in_length_errors);
8686 ESTAT_ADD(rx_out_length_errors);
8687 ESTAT_ADD(rx_64_or_less_octet_packets);
8688 ESTAT_ADD(rx_65_to_127_octet_packets);
8689 ESTAT_ADD(rx_128_to_255_octet_packets);
8690 ESTAT_ADD(rx_256_to_511_octet_packets);
8691 ESTAT_ADD(rx_512_to_1023_octet_packets);
8692 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8693 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8694 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8695 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8696 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8697
8698 ESTAT_ADD(tx_octets);
8699 ESTAT_ADD(tx_collisions);
8700 ESTAT_ADD(tx_xon_sent);
8701 ESTAT_ADD(tx_xoff_sent);
8702 ESTAT_ADD(tx_flow_control);
8703 ESTAT_ADD(tx_mac_errors);
8704 ESTAT_ADD(tx_single_collisions);
8705 ESTAT_ADD(tx_mult_collisions);
8706 ESTAT_ADD(tx_deferred);
8707 ESTAT_ADD(tx_excessive_collisions);
8708 ESTAT_ADD(tx_late_collisions);
8709 ESTAT_ADD(tx_collide_2times);
8710 ESTAT_ADD(tx_collide_3times);
8711 ESTAT_ADD(tx_collide_4times);
8712 ESTAT_ADD(tx_collide_5times);
8713 ESTAT_ADD(tx_collide_6times);
8714 ESTAT_ADD(tx_collide_7times);
8715 ESTAT_ADD(tx_collide_8times);
8716 ESTAT_ADD(tx_collide_9times);
8717 ESTAT_ADD(tx_collide_10times);
8718 ESTAT_ADD(tx_collide_11times);
8719 ESTAT_ADD(tx_collide_12times);
8720 ESTAT_ADD(tx_collide_13times);
8721 ESTAT_ADD(tx_collide_14times);
8722 ESTAT_ADD(tx_collide_15times);
8723 ESTAT_ADD(tx_ucast_packets);
8724 ESTAT_ADD(tx_mcast_packets);
8725 ESTAT_ADD(tx_bcast_packets);
8726 ESTAT_ADD(tx_carrier_sense_errors);
8727 ESTAT_ADD(tx_discards);
8728 ESTAT_ADD(tx_errors);
8729
8730 ESTAT_ADD(dma_writeq_full);
8731 ESTAT_ADD(dma_write_prioq_full);
8732 ESTAT_ADD(rxbds_empty);
8733 ESTAT_ADD(rx_discards);
8734 ESTAT_ADD(rx_errors);
8735 ESTAT_ADD(rx_threshold_hit);
8736
8737 ESTAT_ADD(dma_readq_full);
8738 ESTAT_ADD(dma_read_prioq_full);
8739 ESTAT_ADD(tx_comp_queue_full);
8740
8741 ESTAT_ADD(ring_set_send_prod_index);
8742 ESTAT_ADD(ring_status_update);
8743 ESTAT_ADD(nic_irqs);
8744 ESTAT_ADD(nic_avoided_irqs);
8745 ESTAT_ADD(nic_tx_threshold_hit);
8746
8747 return estats;
8748}
8749
8750static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8751{
8752 struct tg3 *tp = netdev_priv(dev);
8753 struct net_device_stats *stats = &tp->net_stats;
8754 struct net_device_stats *old_stats = &tp->net_stats_prev;
8755 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8756
8757 if (!hw_stats)
8758 return old_stats;
8759
8760 stats->rx_packets = old_stats->rx_packets +
8761 get_stat64(&hw_stats->rx_ucast_packets) +
8762 get_stat64(&hw_stats->rx_mcast_packets) +
8763 get_stat64(&hw_stats->rx_bcast_packets);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008764
Linus Torvalds1da177e2005-04-16 15:20:36 -07008765 stats->tx_packets = old_stats->tx_packets +
8766 get_stat64(&hw_stats->tx_ucast_packets) +
8767 get_stat64(&hw_stats->tx_mcast_packets) +
8768 get_stat64(&hw_stats->tx_bcast_packets);
8769
8770 stats->rx_bytes = old_stats->rx_bytes +
8771 get_stat64(&hw_stats->rx_octets);
8772 stats->tx_bytes = old_stats->tx_bytes +
8773 get_stat64(&hw_stats->tx_octets);
8774
8775 stats->rx_errors = old_stats->rx_errors +
John W. Linville4f63b872005-09-12 14:43:18 -07008776 get_stat64(&hw_stats->rx_errors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008777 stats->tx_errors = old_stats->tx_errors +
8778 get_stat64(&hw_stats->tx_errors) +
8779 get_stat64(&hw_stats->tx_mac_errors) +
8780 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8781 get_stat64(&hw_stats->tx_discards);
8782
8783 stats->multicast = old_stats->multicast +
8784 get_stat64(&hw_stats->rx_mcast_packets);
8785 stats->collisions = old_stats->collisions +
8786 get_stat64(&hw_stats->tx_collisions);
8787
8788 stats->rx_length_errors = old_stats->rx_length_errors +
8789 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8790 get_stat64(&hw_stats->rx_undersize_packets);
8791
8792 stats->rx_over_errors = old_stats->rx_over_errors +
8793 get_stat64(&hw_stats->rxbds_empty);
8794 stats->rx_frame_errors = old_stats->rx_frame_errors +
8795 get_stat64(&hw_stats->rx_align_errors);
8796 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8797 get_stat64(&hw_stats->tx_discards);
8798 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8799 get_stat64(&hw_stats->tx_carrier_sense_errors);
8800
8801 stats->rx_crc_errors = old_stats->rx_crc_errors +
8802 calc_crc_errors(tp);
8803
John W. Linville4f63b872005-09-12 14:43:18 -07008804 stats->rx_missed_errors = old_stats->rx_missed_errors +
8805 get_stat64(&hw_stats->rx_discards);
8806
Linus Torvalds1da177e2005-04-16 15:20:36 -07008807 return stats;
8808}
8809
8810static inline u32 calc_crc(unsigned char *buf, int len)
8811{
8812 u32 reg;
8813 u32 tmp;
8814 int j, k;
8815
8816 reg = 0xffffffff;
8817
8818 for (j = 0; j < len; j++) {
8819 reg ^= buf[j];
8820
8821 for (k = 0; k < 8; k++) {
8822 tmp = reg & 0x01;
8823
8824 reg >>= 1;
8825
8826 if (tmp) {
8827 reg ^= 0xedb88320;
8828 }
8829 }
8830 }
8831
8832 return ~reg;
8833}
8834
8835static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8836{
8837 /* accept or reject all multicast frames */
8838 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8839 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8840 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8841 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8842}
8843
8844static void __tg3_set_rx_mode(struct net_device *dev)
8845{
8846 struct tg3 *tp = netdev_priv(dev);
8847 u32 rx_mode;
8848
8849 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8850 RX_MODE_KEEP_VLAN_TAG);
8851
8852 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8853 * flag clear.
8854 */
8855#if TG3_VLAN_TAG_USED
8856 if (!tp->vlgrp &&
8857 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8858 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8859#else
8860 /* By definition, VLAN is disabled always in this
8861 * case.
8862 */
8863 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8864 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8865#endif
8866
8867 if (dev->flags & IFF_PROMISC) {
8868 /* Promiscuous mode. */
8869 rx_mode |= RX_MODE_PROMISC;
8870 } else if (dev->flags & IFF_ALLMULTI) {
8871 /* Accept all multicast. */
8872 tg3_set_multi (tp, 1);
8873 } else if (dev->mc_count < 1) {
8874 /* Reject all multicast. */
8875 tg3_set_multi (tp, 0);
8876 } else {
8877 /* Accept one or more multicast(s). */
8878 struct dev_mc_list *mclist;
8879 unsigned int i;
8880 u32 mc_filter[4] = { 0, };
8881 u32 regidx;
8882 u32 bit;
8883 u32 crc;
8884
8885 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8886 i++, mclist = mclist->next) {
8887
8888 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8889 bit = ~crc & 0x7f;
8890 regidx = (bit & 0x60) >> 5;
8891 bit &= 0x1f;
8892 mc_filter[regidx] |= (1 << bit);
8893 }
8894
8895 tw32(MAC_HASH_REG_0, mc_filter[0]);
8896 tw32(MAC_HASH_REG_1, mc_filter[1]);
8897 tw32(MAC_HASH_REG_2, mc_filter[2]);
8898 tw32(MAC_HASH_REG_3, mc_filter[3]);
8899 }
8900
8901 if (rx_mode != tp->rx_mode) {
8902 tp->rx_mode = rx_mode;
8903 tw32_f(MAC_RX_MODE, rx_mode);
8904 udelay(10);
8905 }
8906}
8907
8908static void tg3_set_rx_mode(struct net_device *dev)
8909{
8910 struct tg3 *tp = netdev_priv(dev);
8911
Michael Chane75f7c92006-03-20 21:33:26 -08008912 if (!netif_running(dev))
8913 return;
8914
David S. Millerf47c11e2005-06-24 20:18:35 -07008915 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008916 __tg3_set_rx_mode(dev);
David S. Millerf47c11e2005-06-24 20:18:35 -07008917 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008918}
8919
8920#define TG3_REGDUMP_LEN (32 * 1024)
8921
8922static int tg3_get_regs_len(struct net_device *dev)
8923{
8924 return TG3_REGDUMP_LEN;
8925}
8926
8927static void tg3_get_regs(struct net_device *dev,
8928 struct ethtool_regs *regs, void *_p)
8929{
8930 u32 *p = _p;
8931 struct tg3 *tp = netdev_priv(dev);
8932 u8 *orig_p = _p;
8933 int i;
8934
8935 regs->version = 0;
8936
8937 memset(p, 0, TG3_REGDUMP_LEN);
8938
Michael Chanbc1c7562006-03-20 17:48:03 -08008939 if (tp->link_config.phy_is_low_power)
8940 return;
8941
David S. Millerf47c11e2005-06-24 20:18:35 -07008942 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008943
8944#define __GET_REG32(reg) (*(p)++ = tr32(reg))
8945#define GET_REG32_LOOP(base,len) \
8946do { p = (u32 *)(orig_p + (base)); \
8947 for (i = 0; i < len; i += 4) \
8948 __GET_REG32((base) + i); \
8949} while (0)
8950#define GET_REG32_1(reg) \
8951do { p = (u32 *)(orig_p + (reg)); \
8952 __GET_REG32((reg)); \
8953} while (0)
8954
8955 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8956 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8957 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8958 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8959 GET_REG32_1(SNDDATAC_MODE);
8960 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8961 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8962 GET_REG32_1(SNDBDC_MODE);
8963 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8964 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8965 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8966 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8967 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8968 GET_REG32_1(RCVDCC_MODE);
8969 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8970 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8971 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8972 GET_REG32_1(MBFREE_MODE);
8973 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8974 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8975 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8976 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8977 GET_REG32_LOOP(WDMAC_MODE, 0x08);
Chris Elmquist091465d2005-12-20 13:25:19 -08008978 GET_REG32_1(RX_CPU_MODE);
8979 GET_REG32_1(RX_CPU_STATE);
8980 GET_REG32_1(RX_CPU_PGMCTR);
8981 GET_REG32_1(RX_CPU_HWBKPT);
8982 GET_REG32_1(TX_CPU_MODE);
8983 GET_REG32_1(TX_CPU_STATE);
8984 GET_REG32_1(TX_CPU_PGMCTR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008985 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8986 GET_REG32_LOOP(FTQ_RESET, 0x120);
8987 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8988 GET_REG32_1(DMAC_MODE);
8989 GET_REG32_LOOP(GRC_MODE, 0x4c);
8990 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8991 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8992
8993#undef __GET_REG32
8994#undef GET_REG32_LOOP
8995#undef GET_REG32_1
8996
David S. Millerf47c11e2005-06-24 20:18:35 -07008997 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008998}
8999
9000static int tg3_get_eeprom_len(struct net_device *dev)
9001{
9002 struct tg3 *tp = netdev_priv(dev);
9003
9004 return tp->nvram_size;
9005}
9006
9007static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
Al Virob9fc7dc2007-12-17 22:59:57 -08009008static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
Michael Chan18201802006-03-20 22:29:15 -08009009static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009010
9011static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9012{
9013 struct tg3 *tp = netdev_priv(dev);
9014 int ret;
9015 u8 *pd;
Al Virob9fc7dc2007-12-17 22:59:57 -08009016 u32 i, offset, len, b_offset, b_count;
9017 __le32 val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009018
Michael Chanbc1c7562006-03-20 17:48:03 -08009019 if (tp->link_config.phy_is_low_power)
9020 return -EAGAIN;
9021
Linus Torvalds1da177e2005-04-16 15:20:36 -07009022 offset = eeprom->offset;
9023 len = eeprom->len;
9024 eeprom->len = 0;
9025
9026 eeprom->magic = TG3_EEPROM_MAGIC;
9027
9028 if (offset & 3) {
9029 /* adjustments to start on required 4 byte boundary */
9030 b_offset = offset & 3;
9031 b_count = 4 - b_offset;
9032 if (b_count > len) {
9033 /* i.e. offset=1 len=2 */
9034 b_count = len;
9035 }
Al Virob9fc7dc2007-12-17 22:59:57 -08009036 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009037 if (ret)
9038 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009039 memcpy(data, ((char*)&val) + b_offset, b_count);
9040 len -= b_count;
9041 offset += b_count;
9042 eeprom->len += b_count;
9043 }
9044
9045 /* read bytes upto the last 4 byte boundary */
9046 pd = &data[eeprom->len];
9047 for (i = 0; i < (len - (len & 3)); i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -08009048 ret = tg3_nvram_read_le(tp, offset + i, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009049 if (ret) {
9050 eeprom->len += i;
9051 return ret;
9052 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009053 memcpy(pd + i, &val, 4);
9054 }
9055 eeprom->len += i;
9056
9057 if (len & 3) {
9058 /* read last bytes not ending on 4 byte boundary */
9059 pd = &data[eeprom->len];
9060 b_count = len & 3;
9061 b_offset = offset + len - b_count;
Al Virob9fc7dc2007-12-17 22:59:57 -08009062 ret = tg3_nvram_read_le(tp, b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009063 if (ret)
9064 return ret;
Al Virob9fc7dc2007-12-17 22:59:57 -08009065 memcpy(pd, &val, b_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009066 eeprom->len += b_count;
9067 }
9068 return 0;
9069}
9070
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009071static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009072
9073static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9074{
9075 struct tg3 *tp = netdev_priv(dev);
9076 int ret;
Al Virob9fc7dc2007-12-17 22:59:57 -08009077 u32 offset, len, b_offset, odd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009078 u8 *buf;
Al Virob9fc7dc2007-12-17 22:59:57 -08009079 __le32 start, end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009080
Michael Chanbc1c7562006-03-20 17:48:03 -08009081 if (tp->link_config.phy_is_low_power)
9082 return -EAGAIN;
9083
Linus Torvalds1da177e2005-04-16 15:20:36 -07009084 if (eeprom->magic != TG3_EEPROM_MAGIC)
9085 return -EINVAL;
9086
9087 offset = eeprom->offset;
9088 len = eeprom->len;
9089
9090 if ((b_offset = (offset & 3))) {
9091 /* adjustments to start on required 4 byte boundary */
Al Virob9fc7dc2007-12-17 22:59:57 -08009092 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009093 if (ret)
9094 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009095 len += b_offset;
9096 offset &= ~3;
Michael Chan1c8594b2005-04-21 17:12:46 -07009097 if (len < 4)
9098 len = 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009099 }
9100
9101 odd_len = 0;
Michael Chan1c8594b2005-04-21 17:12:46 -07009102 if (len & 3) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009103 /* adjustments to end on required 4 byte boundary */
9104 odd_len = 1;
9105 len = (len + 3) & ~3;
Al Virob9fc7dc2007-12-17 22:59:57 -08009106 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009107 if (ret)
9108 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009109 }
9110
9111 buf = data;
9112 if (b_offset || odd_len) {
9113 buf = kmalloc(len, GFP_KERNEL);
Andy Gospodarekab0049b2007-09-06 20:42:14 +01009114 if (!buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009115 return -ENOMEM;
9116 if (b_offset)
9117 memcpy(buf, &start, 4);
9118 if (odd_len)
9119 memcpy(buf+len-4, &end, 4);
9120 memcpy(buf + b_offset, data, eeprom->len);
9121 }
9122
9123 ret = tg3_nvram_write_block(tp, offset, len, buf);
9124
9125 if (buf != data)
9126 kfree(buf);
9127
9128 return ret;
9129}
9130
9131static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9132{
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009133 struct tg3 *tp = netdev_priv(dev);
9134
9135 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9136 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9137 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009138 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009139 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009140
Linus Torvalds1da177e2005-04-16 15:20:36 -07009141 cmd->supported = (SUPPORTED_Autoneg);
9142
9143 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9144 cmd->supported |= (SUPPORTED_1000baseT_Half |
9145 SUPPORTED_1000baseT_Full);
9146
Karsten Keilef348142006-05-12 12:49:08 -07009147 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009148 cmd->supported |= (SUPPORTED_100baseT_Half |
9149 SUPPORTED_100baseT_Full |
9150 SUPPORTED_10baseT_Half |
9151 SUPPORTED_10baseT_Full |
Matt Carlson3bebab52007-11-12 21:22:40 -08009152 SUPPORTED_TP);
Karsten Keilef348142006-05-12 12:49:08 -07009153 cmd->port = PORT_TP;
9154 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009155 cmd->supported |= SUPPORTED_FIBRE;
Karsten Keilef348142006-05-12 12:49:08 -07009156 cmd->port = PORT_FIBRE;
9157 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009158
Linus Torvalds1da177e2005-04-16 15:20:36 -07009159 cmd->advertising = tp->link_config.advertising;
9160 if (netif_running(dev)) {
9161 cmd->speed = tp->link_config.active_speed;
9162 cmd->duplex = tp->link_config.active_duplex;
9163 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009164 cmd->phy_address = PHY_ADDR;
9165 cmd->transceiver = 0;
9166 cmd->autoneg = tp->link_config.autoneg;
9167 cmd->maxtxpkt = 0;
9168 cmd->maxrxpkt = 0;
9169 return 0;
9170}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009171
Linus Torvalds1da177e2005-04-16 15:20:36 -07009172static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9173{
9174 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009175
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009176 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9177 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9178 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009179 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009180 }
9181
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009182 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009183 /* These are the only valid advertisement bits allowed. */
9184 if (cmd->autoneg == AUTONEG_ENABLE &&
9185 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
9186 ADVERTISED_1000baseT_Full |
9187 ADVERTISED_Autoneg |
9188 ADVERTISED_FIBRE)))
9189 return -EINVAL;
Michael Chan37ff2382005-10-26 15:49:51 -07009190 /* Fiber can only do SPEED_1000. */
9191 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9192 (cmd->speed != SPEED_1000))
9193 return -EINVAL;
9194 /* Copper cannot force SPEED_1000. */
9195 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9196 (cmd->speed == SPEED_1000))
9197 return -EINVAL;
9198 else if ((cmd->speed == SPEED_1000) &&
Matt Carlson0ba11fb2008-06-09 15:40:26 -07009199 (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
Michael Chan37ff2382005-10-26 15:49:51 -07009200 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009201
David S. Millerf47c11e2005-06-24 20:18:35 -07009202 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009203
9204 tp->link_config.autoneg = cmd->autoneg;
9205 if (cmd->autoneg == AUTONEG_ENABLE) {
Andy Gospodarek405d8e52007-10-08 01:08:47 -07009206 tp->link_config.advertising = (cmd->advertising |
9207 ADVERTISED_Autoneg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009208 tp->link_config.speed = SPEED_INVALID;
9209 tp->link_config.duplex = DUPLEX_INVALID;
9210 } else {
9211 tp->link_config.advertising = 0;
9212 tp->link_config.speed = cmd->speed;
9213 tp->link_config.duplex = cmd->duplex;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009214 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009215
Michael Chan24fcad62006-12-17 17:06:46 -08009216 tp->link_config.orig_speed = tp->link_config.speed;
9217 tp->link_config.orig_duplex = tp->link_config.duplex;
9218 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9219
Linus Torvalds1da177e2005-04-16 15:20:36 -07009220 if (netif_running(dev))
9221 tg3_setup_phy(tp, 1);
9222
David S. Millerf47c11e2005-06-24 20:18:35 -07009223 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009224
Linus Torvalds1da177e2005-04-16 15:20:36 -07009225 return 0;
9226}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009227
Linus Torvalds1da177e2005-04-16 15:20:36 -07009228static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9229{
9230 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009231
Linus Torvalds1da177e2005-04-16 15:20:36 -07009232 strcpy(info->driver, DRV_MODULE_NAME);
9233 strcpy(info->version, DRV_MODULE_VERSION);
Michael Chanc4e65752006-03-20 22:29:32 -08009234 strcpy(info->fw_version, tp->fw_ver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009235 strcpy(info->bus_info, pci_name(tp->pdev));
9236}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009237
Linus Torvalds1da177e2005-04-16 15:20:36 -07009238static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9239{
9240 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009241
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009242 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9243 device_can_wakeup(&tp->pdev->dev))
Gary Zambranoa85feb82007-05-05 11:52:19 -07009244 wol->supported = WAKE_MAGIC;
9245 else
9246 wol->supported = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009247 wol->wolopts = 0;
Matt Carlson05ac4cb2008-11-03 16:53:46 -08009248 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9249 device_can_wakeup(&tp->pdev->dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009250 wol->wolopts = WAKE_MAGIC;
9251 memset(&wol->sopass, 0, sizeof(wol->sopass));
9252}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009253
Linus Torvalds1da177e2005-04-16 15:20:36 -07009254static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9255{
9256 struct tg3 *tp = netdev_priv(dev);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009257 struct device *dp = &tp->pdev->dev;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009258
Linus Torvalds1da177e2005-04-16 15:20:36 -07009259 if (wol->wolopts & ~WAKE_MAGIC)
9260 return -EINVAL;
9261 if ((wol->wolopts & WAKE_MAGIC) &&
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009262 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009263 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009264
David S. Millerf47c11e2005-06-24 20:18:35 -07009265 spin_lock_bh(&tp->lock);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009266 if (wol->wolopts & WAKE_MAGIC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009267 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009268 device_set_wakeup_enable(dp, true);
9269 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009270 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009271 device_set_wakeup_enable(dp, false);
9272 }
David S. Millerf47c11e2005-06-24 20:18:35 -07009273 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009274
Linus Torvalds1da177e2005-04-16 15:20:36 -07009275 return 0;
9276}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009277
Linus Torvalds1da177e2005-04-16 15:20:36 -07009278static u32 tg3_get_msglevel(struct net_device *dev)
9279{
9280 struct tg3 *tp = netdev_priv(dev);
9281 return tp->msg_enable;
9282}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009283
Linus Torvalds1da177e2005-04-16 15:20:36 -07009284static void tg3_set_msglevel(struct net_device *dev, u32 value)
9285{
9286 struct tg3 *tp = netdev_priv(dev);
9287 tp->msg_enable = value;
9288}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009289
Linus Torvalds1da177e2005-04-16 15:20:36 -07009290static int tg3_set_tso(struct net_device *dev, u32 value)
9291{
9292 struct tg3 *tp = netdev_priv(dev);
9293
9294 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9295 if (value)
9296 return -EINVAL;
9297 return 0;
9298 }
Michael Chanb5d37722006-09-27 16:06:21 -07009299 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
9300 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
Matt Carlson9936bcf2007-10-10 18:03:07 -07009301 if (value) {
Michael Chanb0026622006-07-03 19:42:14 -07009302 dev->features |= NETIF_F_TSO6;
Matt Carlson57e69832008-05-25 23:48:31 -07009303 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9304 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9305 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9306 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlson9936bcf2007-10-10 18:03:07 -07009307 dev->features |= NETIF_F_TSO_ECN;
9308 } else
9309 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
Michael Chanb0026622006-07-03 19:42:14 -07009310 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009311 return ethtool_op_set_tso(dev, value);
9312}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009313
Linus Torvalds1da177e2005-04-16 15:20:36 -07009314static int tg3_nway_reset(struct net_device *dev)
9315{
9316 struct tg3 *tp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009317 int r;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009318
Linus Torvalds1da177e2005-04-16 15:20:36 -07009319 if (!netif_running(dev))
9320 return -EAGAIN;
9321
Michael Chanc94e3942005-09-27 12:12:42 -07009322 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9323 return -EINVAL;
9324
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009325 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9326 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9327 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009328 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009329 } else {
9330 u32 bmcr;
9331
9332 spin_lock_bh(&tp->lock);
9333 r = -EINVAL;
9334 tg3_readphy(tp, MII_BMCR, &bmcr);
9335 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9336 ((bmcr & BMCR_ANENABLE) ||
9337 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9338 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9339 BMCR_ANENABLE);
9340 r = 0;
9341 }
9342 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009343 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009344
Linus Torvalds1da177e2005-04-16 15:20:36 -07009345 return r;
9346}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009347
Linus Torvalds1da177e2005-04-16 15:20:36 -07009348static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9349{
9350 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009351
Linus Torvalds1da177e2005-04-16 15:20:36 -07009352 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9353 ering->rx_mini_max_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08009354 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9355 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9356 else
9357 ering->rx_jumbo_max_pending = 0;
9358
9359 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009360
9361 ering->rx_pending = tp->rx_pending;
9362 ering->rx_mini_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08009363 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9364 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9365 else
9366 ering->rx_jumbo_pending = 0;
9367
Linus Torvalds1da177e2005-04-16 15:20:36 -07009368 ering->tx_pending = tp->tx_pending;
9369}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009370
Linus Torvalds1da177e2005-04-16 15:20:36 -07009371static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9372{
9373 struct tg3 *tp = netdev_priv(dev);
Michael Chanb9ec6c12006-07-25 16:37:27 -07009374 int irq_sync = 0, err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009375
Linus Torvalds1da177e2005-04-16 15:20:36 -07009376 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9377 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
Michael Chanbc3a9252006-10-18 20:55:18 -07009378 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9379 (ering->tx_pending <= MAX_SKB_FRAGS) ||
Michael Chan7f62ad52007-02-20 23:25:40 -08009380 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
Michael Chanbc3a9252006-10-18 20:55:18 -07009381 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009382 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009383
Michael Chanbbe832c2005-06-24 20:20:04 -07009384 if (netif_running(dev)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009385 tg3_phy_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009386 tg3_netif_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07009387 irq_sync = 1;
9388 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009389
Michael Chanbbe832c2005-06-24 20:20:04 -07009390 tg3_full_lock(tp, irq_sync);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009391
Linus Torvalds1da177e2005-04-16 15:20:36 -07009392 tp->rx_pending = ering->rx_pending;
9393
9394 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9395 tp->rx_pending > 63)
9396 tp->rx_pending = 63;
9397 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9398 tp->tx_pending = ering->tx_pending;
9399
9400 if (netif_running(dev)) {
Michael Chan944d9802005-05-29 14:57:48 -07009401 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chanb9ec6c12006-07-25 16:37:27 -07009402 err = tg3_restart_hw(tp, 1);
9403 if (!err)
9404 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009405 }
9406
David S. Millerf47c11e2005-06-24 20:18:35 -07009407 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009408
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009409 if (irq_sync && !err)
9410 tg3_phy_start(tp);
9411
Michael Chanb9ec6c12006-07-25 16:37:27 -07009412 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009413}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009414
Linus Torvalds1da177e2005-04-16 15:20:36 -07009415static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9416{
9417 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009418
Linus Torvalds1da177e2005-04-16 15:20:36 -07009419 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
Matt Carlson8d018622007-12-20 20:05:44 -08009420
9421 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
9422 epause->rx_pause = 1;
9423 else
9424 epause->rx_pause = 0;
9425
9426 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
9427 epause->tx_pause = 1;
9428 else
9429 epause->tx_pause = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009430}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009431
Linus Torvalds1da177e2005-04-16 15:20:36 -07009432static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9433{
9434 struct tg3 *tp = netdev_priv(dev);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009435 int err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009436
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009437 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9438 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9439 return -EAGAIN;
9440
9441 if (epause->autoneg) {
9442 u32 newadv;
9443 struct phy_device *phydev;
9444
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009445 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009446
9447 if (epause->rx_pause) {
9448 if (epause->tx_pause)
9449 newadv = ADVERTISED_Pause;
9450 else
9451 newadv = ADVERTISED_Pause |
9452 ADVERTISED_Asym_Pause;
9453 } else if (epause->tx_pause) {
9454 newadv = ADVERTISED_Asym_Pause;
9455 } else
9456 newadv = 0;
9457
9458 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9459 u32 oldadv = phydev->advertising &
9460 (ADVERTISED_Pause |
9461 ADVERTISED_Asym_Pause);
9462 if (oldadv != newadv) {
9463 phydev->advertising &=
9464 ~(ADVERTISED_Pause |
9465 ADVERTISED_Asym_Pause);
9466 phydev->advertising |= newadv;
9467 err = phy_start_aneg(phydev);
9468 }
9469 } else {
9470 tp->link_config.advertising &=
9471 ~(ADVERTISED_Pause |
9472 ADVERTISED_Asym_Pause);
9473 tp->link_config.advertising |= newadv;
9474 }
9475 } else {
9476 if (epause->rx_pause)
9477 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9478 else
9479 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9480
9481 if (epause->tx_pause)
9482 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9483 else
9484 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9485
9486 if (netif_running(dev))
9487 tg3_setup_flow_control(tp, 0, 0);
9488 }
9489 } else {
9490 int irq_sync = 0;
9491
9492 if (netif_running(dev)) {
9493 tg3_netif_stop(tp);
9494 irq_sync = 1;
9495 }
9496
9497 tg3_full_lock(tp, irq_sync);
9498
9499 if (epause->autoneg)
9500 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9501 else
9502 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9503 if (epause->rx_pause)
9504 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9505 else
9506 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9507 if (epause->tx_pause)
9508 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9509 else
9510 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9511
9512 if (netif_running(dev)) {
9513 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9514 err = tg3_restart_hw(tp, 1);
9515 if (!err)
9516 tg3_netif_start(tp);
9517 }
9518
9519 tg3_full_unlock(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07009520 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009521
Michael Chanb9ec6c12006-07-25 16:37:27 -07009522 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009523}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009524
Linus Torvalds1da177e2005-04-16 15:20:36 -07009525static u32 tg3_get_rx_csum(struct net_device *dev)
9526{
9527 struct tg3 *tp = netdev_priv(dev);
9528 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9529}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009530
Linus Torvalds1da177e2005-04-16 15:20:36 -07009531static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9532{
9533 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009534
Linus Torvalds1da177e2005-04-16 15:20:36 -07009535 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9536 if (data != 0)
9537 return -EINVAL;
9538 return 0;
9539 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009540
David S. Millerf47c11e2005-06-24 20:18:35 -07009541 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009542 if (data)
9543 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9544 else
9545 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
David S. Millerf47c11e2005-06-24 20:18:35 -07009546 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009547
Linus Torvalds1da177e2005-04-16 15:20:36 -07009548 return 0;
9549}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009550
Linus Torvalds1da177e2005-04-16 15:20:36 -07009551static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9552{
9553 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009554
Linus Torvalds1da177e2005-04-16 15:20:36 -07009555 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9556 if (data != 0)
9557 return -EINVAL;
9558 return 0;
9559 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009560
Michael Chanaf36e6b2006-03-23 01:28:06 -08009561 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07009562 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07009563 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -07009564 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9565 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan6460d942007-07-14 19:07:52 -07009566 ethtool_op_set_tx_ipv6_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009567 else
Michael Chan9c27dbd2006-03-20 22:28:27 -08009568 ethtool_op_set_tx_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009569
9570 return 0;
9571}
9572
Jeff Garzikb9f2c042007-10-03 18:07:32 -07009573static int tg3_get_sset_count (struct net_device *dev, int sset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009574{
Jeff Garzikb9f2c042007-10-03 18:07:32 -07009575 switch (sset) {
9576 case ETH_SS_TEST:
9577 return TG3_NUM_TEST;
9578 case ETH_SS_STATS:
9579 return TG3_NUM_STATS;
9580 default:
9581 return -EOPNOTSUPP;
9582 }
Michael Chan4cafd3f2005-05-29 14:56:34 -07009583}
9584
Linus Torvalds1da177e2005-04-16 15:20:36 -07009585static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9586{
9587 switch (stringset) {
9588 case ETH_SS_STATS:
9589 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9590 break;
Michael Chan4cafd3f2005-05-29 14:56:34 -07009591 case ETH_SS_TEST:
9592 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9593 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009594 default:
9595 WARN_ON(1); /* we need a WARN() */
9596 break;
9597 }
9598}
9599
Michael Chan4009a932005-09-05 17:52:54 -07009600static int tg3_phys_id(struct net_device *dev, u32 data)
9601{
9602 struct tg3 *tp = netdev_priv(dev);
9603 int i;
9604
9605 if (!netif_running(tp->dev))
9606 return -EAGAIN;
9607
9608 if (data == 0)
Stephen Hemminger759afc32008-02-23 19:51:59 -08009609 data = UINT_MAX / 2;
Michael Chan4009a932005-09-05 17:52:54 -07009610
9611 for (i = 0; i < (data * 2); i++) {
9612 if ((i % 2) == 0)
9613 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9614 LED_CTRL_1000MBPS_ON |
9615 LED_CTRL_100MBPS_ON |
9616 LED_CTRL_10MBPS_ON |
9617 LED_CTRL_TRAFFIC_OVERRIDE |
9618 LED_CTRL_TRAFFIC_BLINK |
9619 LED_CTRL_TRAFFIC_LED);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009620
Michael Chan4009a932005-09-05 17:52:54 -07009621 else
9622 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9623 LED_CTRL_TRAFFIC_OVERRIDE);
9624
9625 if (msleep_interruptible(500))
9626 break;
9627 }
9628 tw32(MAC_LED_CTRL, tp->led_ctrl);
9629 return 0;
9630}
9631
Linus Torvalds1da177e2005-04-16 15:20:36 -07009632static void tg3_get_ethtool_stats (struct net_device *dev,
9633 struct ethtool_stats *estats, u64 *tmp_stats)
9634{
9635 struct tg3 *tp = netdev_priv(dev);
9636 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9637}
9638
Michael Chan566f86a2005-05-29 14:56:58 -07009639#define NVRAM_TEST_SIZE 0x100
Matt Carlsona5767de2007-11-12 21:10:58 -08009640#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
9641#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
9642#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
Michael Chanb16250e2006-09-27 16:10:14 -07009643#define NVRAM_SELFBOOT_HW_SIZE 0x20
9644#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
Michael Chan566f86a2005-05-29 14:56:58 -07009645
9646static int tg3_test_nvram(struct tg3 *tp)
9647{
Al Virob9fc7dc2007-12-17 22:59:57 -08009648 u32 csum, magic;
9649 __le32 *buf;
Andy Gospodarekab0049b2007-09-06 20:42:14 +01009650 int i, j, k, err = 0, size;
Michael Chan566f86a2005-05-29 14:56:58 -07009651
Michael Chan18201802006-03-20 22:29:15 -08009652 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Michael Chan1b277772006-03-20 22:27:48 -08009653 return -EIO;
9654
Michael Chan1b277772006-03-20 22:27:48 -08009655 if (magic == TG3_EEPROM_MAGIC)
9656 size = NVRAM_TEST_SIZE;
Michael Chanb16250e2006-09-27 16:10:14 -07009657 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
Matt Carlsona5767de2007-11-12 21:10:58 -08009658 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9659 TG3_EEPROM_SB_FORMAT_1) {
9660 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9661 case TG3_EEPROM_SB_REVISION_0:
9662 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9663 break;
9664 case TG3_EEPROM_SB_REVISION_2:
9665 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9666 break;
9667 case TG3_EEPROM_SB_REVISION_3:
9668 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9669 break;
9670 default:
9671 return 0;
9672 }
9673 } else
Michael Chan1b277772006-03-20 22:27:48 -08009674 return 0;
Michael Chanb16250e2006-09-27 16:10:14 -07009675 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9676 size = NVRAM_SELFBOOT_HW_SIZE;
9677 else
Michael Chan1b277772006-03-20 22:27:48 -08009678 return -EIO;
9679
9680 buf = kmalloc(size, GFP_KERNEL);
Michael Chan566f86a2005-05-29 14:56:58 -07009681 if (buf == NULL)
9682 return -ENOMEM;
9683
Michael Chan1b277772006-03-20 22:27:48 -08009684 err = -EIO;
9685 for (i = 0, j = 0; i < size; i += 4, j++) {
Al Virob9fc7dc2007-12-17 22:59:57 -08009686 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
Michael Chan566f86a2005-05-29 14:56:58 -07009687 break;
Michael Chan566f86a2005-05-29 14:56:58 -07009688 }
Michael Chan1b277772006-03-20 22:27:48 -08009689 if (i < size)
Michael Chan566f86a2005-05-29 14:56:58 -07009690 goto out;
9691
Michael Chan1b277772006-03-20 22:27:48 -08009692 /* Selfboot format */
Al Virob9fc7dc2007-12-17 22:59:57 -08009693 magic = swab32(le32_to_cpu(buf[0]));
9694 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -07009695 TG3_EEPROM_MAGIC_FW) {
Michael Chan1b277772006-03-20 22:27:48 -08009696 u8 *buf8 = (u8 *) buf, csum8 = 0;
9697
Al Virob9fc7dc2007-12-17 22:59:57 -08009698 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
Matt Carlsona5767de2007-11-12 21:10:58 -08009699 TG3_EEPROM_SB_REVISION_2) {
9700 /* For rev 2, the csum doesn't include the MBA. */
9701 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9702 csum8 += buf8[i];
9703 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9704 csum8 += buf8[i];
9705 } else {
9706 for (i = 0; i < size; i++)
9707 csum8 += buf8[i];
9708 }
Michael Chan1b277772006-03-20 22:27:48 -08009709
Adrian Bunkad96b482006-04-05 22:21:04 -07009710 if (csum8 == 0) {
9711 err = 0;
9712 goto out;
9713 }
9714
9715 err = -EIO;
9716 goto out;
Michael Chan1b277772006-03-20 22:27:48 -08009717 }
Michael Chan566f86a2005-05-29 14:56:58 -07009718
Al Virob9fc7dc2007-12-17 22:59:57 -08009719 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -07009720 TG3_EEPROM_MAGIC_HW) {
9721 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9722 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9723 u8 *buf8 = (u8 *) buf;
Michael Chanb16250e2006-09-27 16:10:14 -07009724
9725 /* Separate the parity bits and the data bytes. */
9726 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9727 if ((i == 0) || (i == 8)) {
9728 int l;
9729 u8 msk;
9730
9731 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9732 parity[k++] = buf8[i] & msk;
9733 i++;
9734 }
9735 else if (i == 16) {
9736 int l;
9737 u8 msk;
9738
9739 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9740 parity[k++] = buf8[i] & msk;
9741 i++;
9742
9743 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9744 parity[k++] = buf8[i] & msk;
9745 i++;
9746 }
9747 data[j++] = buf8[i];
9748 }
9749
9750 err = -EIO;
9751 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9752 u8 hw8 = hweight8(data[i]);
9753
9754 if ((hw8 & 0x1) && parity[i])
9755 goto out;
9756 else if (!(hw8 & 0x1) && !parity[i])
9757 goto out;
9758 }
9759 err = 0;
9760 goto out;
9761 }
9762
Michael Chan566f86a2005-05-29 14:56:58 -07009763 /* Bootstrap checksum at offset 0x10 */
9764 csum = calc_crc((unsigned char *) buf, 0x10);
Al Virob9fc7dc2007-12-17 22:59:57 -08009765 if(csum != le32_to_cpu(buf[0x10/4]))
Michael Chan566f86a2005-05-29 14:56:58 -07009766 goto out;
9767
9768 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9769 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
Al Virob9fc7dc2007-12-17 22:59:57 -08009770 if (csum != le32_to_cpu(buf[0xfc/4]))
Michael Chan566f86a2005-05-29 14:56:58 -07009771 goto out;
9772
9773 err = 0;
9774
9775out:
9776 kfree(buf);
9777 return err;
9778}
9779
Michael Chanca430072005-05-29 14:57:23 -07009780#define TG3_SERDES_TIMEOUT_SEC 2
9781#define TG3_COPPER_TIMEOUT_SEC 6
9782
9783static int tg3_test_link(struct tg3 *tp)
9784{
9785 int i, max;
9786
9787 if (!netif_running(tp->dev))
9788 return -ENODEV;
9789
Michael Chan4c987482005-09-05 17:52:38 -07009790 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Michael Chanca430072005-05-29 14:57:23 -07009791 max = TG3_SERDES_TIMEOUT_SEC;
9792 else
9793 max = TG3_COPPER_TIMEOUT_SEC;
9794
9795 for (i = 0; i < max; i++) {
9796 if (netif_carrier_ok(tp->dev))
9797 return 0;
9798
9799 if (msleep_interruptible(1000))
9800 break;
9801 }
9802
9803 return -EIO;
9804}
9805
Michael Chana71116d2005-05-29 14:58:11 -07009806/* Only test the commonly used registers */
David S. Miller30ca3e32006-03-20 23:02:36 -08009807static int tg3_test_registers(struct tg3 *tp)
Michael Chana71116d2005-05-29 14:58:11 -07009808{
Michael Chanb16250e2006-09-27 16:10:14 -07009809 int i, is_5705, is_5750;
Michael Chana71116d2005-05-29 14:58:11 -07009810 u32 offset, read_mask, write_mask, val, save_val, read_val;
9811 static struct {
9812 u16 offset;
9813 u16 flags;
9814#define TG3_FL_5705 0x1
9815#define TG3_FL_NOT_5705 0x2
9816#define TG3_FL_NOT_5788 0x4
Michael Chanb16250e2006-09-27 16:10:14 -07009817#define TG3_FL_NOT_5750 0x8
Michael Chana71116d2005-05-29 14:58:11 -07009818 u32 read_mask;
9819 u32 write_mask;
9820 } reg_tbl[] = {
9821 /* MAC Control Registers */
9822 { MAC_MODE, TG3_FL_NOT_5705,
9823 0x00000000, 0x00ef6f8c },
9824 { MAC_MODE, TG3_FL_5705,
9825 0x00000000, 0x01ef6b8c },
9826 { MAC_STATUS, TG3_FL_NOT_5705,
9827 0x03800107, 0x00000000 },
9828 { MAC_STATUS, TG3_FL_5705,
9829 0x03800100, 0x00000000 },
9830 { MAC_ADDR_0_HIGH, 0x0000,
9831 0x00000000, 0x0000ffff },
9832 { MAC_ADDR_0_LOW, 0x0000,
9833 0x00000000, 0xffffffff },
9834 { MAC_RX_MTU_SIZE, 0x0000,
9835 0x00000000, 0x0000ffff },
9836 { MAC_TX_MODE, 0x0000,
9837 0x00000000, 0x00000070 },
9838 { MAC_TX_LENGTHS, 0x0000,
9839 0x00000000, 0x00003fff },
9840 { MAC_RX_MODE, TG3_FL_NOT_5705,
9841 0x00000000, 0x000007fc },
9842 { MAC_RX_MODE, TG3_FL_5705,
9843 0x00000000, 0x000007dc },
9844 { MAC_HASH_REG_0, 0x0000,
9845 0x00000000, 0xffffffff },
9846 { MAC_HASH_REG_1, 0x0000,
9847 0x00000000, 0xffffffff },
9848 { MAC_HASH_REG_2, 0x0000,
9849 0x00000000, 0xffffffff },
9850 { MAC_HASH_REG_3, 0x0000,
9851 0x00000000, 0xffffffff },
9852
9853 /* Receive Data and Receive BD Initiator Control Registers. */
9854 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9855 0x00000000, 0xffffffff },
9856 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9857 0x00000000, 0xffffffff },
9858 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9859 0x00000000, 0x00000003 },
9860 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9861 0x00000000, 0xffffffff },
9862 { RCVDBDI_STD_BD+0, 0x0000,
9863 0x00000000, 0xffffffff },
9864 { RCVDBDI_STD_BD+4, 0x0000,
9865 0x00000000, 0xffffffff },
9866 { RCVDBDI_STD_BD+8, 0x0000,
9867 0x00000000, 0xffff0002 },
9868 { RCVDBDI_STD_BD+0xc, 0x0000,
9869 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009870
Michael Chana71116d2005-05-29 14:58:11 -07009871 /* Receive BD Initiator Control Registers. */
9872 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9873 0x00000000, 0xffffffff },
9874 { RCVBDI_STD_THRESH, TG3_FL_5705,
9875 0x00000000, 0x000003ff },
9876 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9877 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009878
Michael Chana71116d2005-05-29 14:58:11 -07009879 /* Host Coalescing Control Registers. */
9880 { HOSTCC_MODE, TG3_FL_NOT_5705,
9881 0x00000000, 0x00000004 },
9882 { HOSTCC_MODE, TG3_FL_5705,
9883 0x00000000, 0x000000f6 },
9884 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9885 0x00000000, 0xffffffff },
9886 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9887 0x00000000, 0x000003ff },
9888 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9889 0x00000000, 0xffffffff },
9890 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9891 0x00000000, 0x000003ff },
9892 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9893 0x00000000, 0xffffffff },
9894 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9895 0x00000000, 0x000000ff },
9896 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9897 0x00000000, 0xffffffff },
9898 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9899 0x00000000, 0x000000ff },
9900 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9901 0x00000000, 0xffffffff },
9902 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9903 0x00000000, 0xffffffff },
9904 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9905 0x00000000, 0xffffffff },
9906 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9907 0x00000000, 0x000000ff },
9908 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9909 0x00000000, 0xffffffff },
9910 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9911 0x00000000, 0x000000ff },
9912 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9913 0x00000000, 0xffffffff },
9914 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9915 0x00000000, 0xffffffff },
9916 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9917 0x00000000, 0xffffffff },
9918 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9919 0x00000000, 0xffffffff },
9920 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9921 0x00000000, 0xffffffff },
9922 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9923 0xffffffff, 0x00000000 },
9924 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9925 0xffffffff, 0x00000000 },
9926
9927 /* Buffer Manager Control Registers. */
Michael Chanb16250e2006-09-27 16:10:14 -07009928 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -07009929 0x00000000, 0x007fff80 },
Michael Chanb16250e2006-09-27 16:10:14 -07009930 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -07009931 0x00000000, 0x007fffff },
9932 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9933 0x00000000, 0x0000003f },
9934 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9935 0x00000000, 0x000001ff },
9936 { BUFMGR_MB_HIGH_WATER, 0x0000,
9937 0x00000000, 0x000001ff },
9938 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9939 0xffffffff, 0x00000000 },
9940 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9941 0xffffffff, 0x00000000 },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009942
Michael Chana71116d2005-05-29 14:58:11 -07009943 /* Mailbox Registers */
9944 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9945 0x00000000, 0x000001ff },
9946 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9947 0x00000000, 0x000001ff },
9948 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9949 0x00000000, 0x000007ff },
9950 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9951 0x00000000, 0x000001ff },
9952
9953 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9954 };
9955
Michael Chanb16250e2006-09-27 16:10:14 -07009956 is_5705 = is_5750 = 0;
9957 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
Michael Chana71116d2005-05-29 14:58:11 -07009958 is_5705 = 1;
Michael Chanb16250e2006-09-27 16:10:14 -07009959 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9960 is_5750 = 1;
9961 }
Michael Chana71116d2005-05-29 14:58:11 -07009962
9963 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9964 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9965 continue;
9966
9967 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9968 continue;
9969
9970 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9971 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9972 continue;
9973
Michael Chanb16250e2006-09-27 16:10:14 -07009974 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9975 continue;
9976
Michael Chana71116d2005-05-29 14:58:11 -07009977 offset = (u32) reg_tbl[i].offset;
9978 read_mask = reg_tbl[i].read_mask;
9979 write_mask = reg_tbl[i].write_mask;
9980
9981 /* Save the original register content */
9982 save_val = tr32(offset);
9983
9984 /* Determine the read-only value. */
9985 read_val = save_val & read_mask;
9986
9987 /* Write zero to the register, then make sure the read-only bits
9988 * are not changed and the read/write bits are all zeros.
9989 */
9990 tw32(offset, 0);
9991
9992 val = tr32(offset);
9993
9994 /* Test the read-only and read/write bits. */
9995 if (((val & read_mask) != read_val) || (val & write_mask))
9996 goto out;
9997
9998 /* Write ones to all the bits defined by RdMask and WrMask, then
9999 * make sure the read-only bits are not changed and the
10000 * read/write bits are all ones.
10001 */
10002 tw32(offset, read_mask | write_mask);
10003
10004 val = tr32(offset);
10005
10006 /* Test the read-only bits. */
10007 if ((val & read_mask) != read_val)
10008 goto out;
10009
10010 /* Test the read/write bits. */
10011 if ((val & write_mask) != write_mask)
10012 goto out;
10013
10014 tw32(offset, save_val);
10015 }
10016
10017 return 0;
10018
10019out:
Michael Chan9f88f292006-12-07 00:22:54 -080010020 if (netif_msg_hw(tp))
10021 printk(KERN_ERR PFX "Register test failed at offset %x\n",
10022 offset);
Michael Chana71116d2005-05-29 14:58:11 -070010023 tw32(offset, save_val);
10024 return -EIO;
10025}
10026
Michael Chan7942e1d2005-05-29 14:58:36 -070010027static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10028{
Arjan van de Venf71e1302006-03-03 21:33:57 -050010029 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
Michael Chan7942e1d2005-05-29 14:58:36 -070010030 int i;
10031 u32 j;
10032
Alejandro Martinez Ruize9edda62007-10-15 03:37:43 +020010033 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
Michael Chan7942e1d2005-05-29 14:58:36 -070010034 for (j = 0; j < len; j += 4) {
10035 u32 val;
10036
10037 tg3_write_mem(tp, offset + j, test_pattern[i]);
10038 tg3_read_mem(tp, offset + j, &val);
10039 if (val != test_pattern[i])
10040 return -EIO;
10041 }
10042 }
10043 return 0;
10044}
10045
10046static int tg3_test_memory(struct tg3 *tp)
10047{
10048 static struct mem_entry {
10049 u32 offset;
10050 u32 len;
10051 } mem_tbl_570x[] = {
Michael Chan38690192005-12-19 16:27:28 -080010052 { 0x00000000, 0x00b50},
Michael Chan7942e1d2005-05-29 14:58:36 -070010053 { 0x00002000, 0x1c000},
10054 { 0xffffffff, 0x00000}
10055 }, mem_tbl_5705[] = {
10056 { 0x00000100, 0x0000c},
10057 { 0x00000200, 0x00008},
Michael Chan7942e1d2005-05-29 14:58:36 -070010058 { 0x00004000, 0x00800},
10059 { 0x00006000, 0x01000},
10060 { 0x00008000, 0x02000},
10061 { 0x00010000, 0x0e000},
10062 { 0xffffffff, 0x00000}
Michael Chan79f4d132006-03-20 22:28:57 -080010063 }, mem_tbl_5755[] = {
10064 { 0x00000200, 0x00008},
10065 { 0x00004000, 0x00800},
10066 { 0x00006000, 0x00800},
10067 { 0x00008000, 0x02000},
10068 { 0x00010000, 0x0c000},
10069 { 0xffffffff, 0x00000}
Michael Chanb16250e2006-09-27 16:10:14 -070010070 }, mem_tbl_5906[] = {
10071 { 0x00000200, 0x00008},
10072 { 0x00004000, 0x00400},
10073 { 0x00006000, 0x00400},
10074 { 0x00008000, 0x01000},
10075 { 0x00010000, 0x01000},
10076 { 0xffffffff, 0x00000}
Michael Chan7942e1d2005-05-29 14:58:36 -070010077 };
10078 struct mem_entry *mem_tbl;
10079 int err = 0;
10080 int i;
10081
Michael Chan79f4d132006-03-20 22:28:57 -080010082 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
Michael Chanaf36e6b2006-03-23 01:28:06 -080010083 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070010084 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070010085 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010086 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10087 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan79f4d132006-03-20 22:28:57 -080010088 mem_tbl = mem_tbl_5755;
Michael Chanb16250e2006-09-27 16:10:14 -070010089 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10090 mem_tbl = mem_tbl_5906;
Michael Chan79f4d132006-03-20 22:28:57 -080010091 else
10092 mem_tbl = mem_tbl_5705;
10093 } else
Michael Chan7942e1d2005-05-29 14:58:36 -070010094 mem_tbl = mem_tbl_570x;
10095
10096 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10097 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
10098 mem_tbl[i].len)) != 0)
10099 break;
10100 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010101
Michael Chan7942e1d2005-05-29 14:58:36 -070010102 return err;
10103}
10104
Michael Chan9f40dea2005-09-05 17:53:06 -070010105#define TG3_MAC_LOOPBACK 0
10106#define TG3_PHY_LOOPBACK 1
10107
10108static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
Michael Chanc76949a2005-05-29 14:58:59 -070010109{
Michael Chan9f40dea2005-09-05 17:53:06 -070010110 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
Michael Chanc76949a2005-05-29 14:58:59 -070010111 u32 desc_idx;
10112 struct sk_buff *skb, *rx_skb;
10113 u8 *tx_data;
10114 dma_addr_t map;
10115 int num_pkts, tx_len, rx_len, i, err;
10116 struct tg3_rx_buffer_desc *desc;
10117
Michael Chan9f40dea2005-09-05 17:53:06 -070010118 if (loopback_mode == TG3_MAC_LOOPBACK) {
Michael Chanc94e3942005-09-27 12:12:42 -070010119 /* HW errata - mac loopback fails in some cases on 5780.
10120 * Normal traffic and PHY loopback are not affected by
10121 * errata.
10122 */
10123 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10124 return 0;
10125
Michael Chan9f40dea2005-09-05 17:53:06 -070010126 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
Matt Carlsone8f3f6c2007-07-11 19:47:55 -070010127 MAC_MODE_PORT_INT_LPBACK;
10128 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10129 mac_mode |= MAC_MODE_LINK_POLARITY;
Michael Chan3f7045c2006-09-27 16:02:29 -070010130 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10131 mac_mode |= MAC_MODE_PORT_MODE_MII;
10132 else
10133 mac_mode |= MAC_MODE_PORT_MODE_GMII;
Michael Chan9f40dea2005-09-05 17:53:06 -070010134 tw32(MAC_MODE, mac_mode);
10135 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
Michael Chan3f7045c2006-09-27 16:02:29 -070010136 u32 val;
10137
Michael Chanb16250e2006-09-27 16:10:14 -070010138 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10139 u32 phytest;
10140
10141 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
10142 u32 phy;
10143
10144 tg3_writephy(tp, MII_TG3_EPHY_TEST,
10145 phytest | MII_TG3_EPHY_SHADOW_EN);
10146 if (!tg3_readphy(tp, 0x1b, &phy))
10147 tg3_writephy(tp, 0x1b, phy & ~0x20);
Michael Chanb16250e2006-09-27 16:10:14 -070010148 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
10149 }
Michael Chan5d64ad32006-12-07 00:19:40 -080010150 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10151 } else
10152 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
Michael Chan3f7045c2006-09-27 16:02:29 -070010153
Matt Carlson9ef8ca92007-07-11 19:48:29 -070010154 tg3_phy_toggle_automdix(tp, 0);
10155
Michael Chan3f7045c2006-09-27 16:02:29 -070010156 tg3_writephy(tp, MII_BMCR, val);
Michael Chanc94e3942005-09-27 12:12:42 -070010157 udelay(40);
Michael Chan5d64ad32006-12-07 00:19:40 -080010158
Matt Carlsone8f3f6c2007-07-11 19:47:55 -070010159 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
Michael Chan5d64ad32006-12-07 00:19:40 -080010160 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chanb16250e2006-09-27 16:10:14 -070010161 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
Michael Chan5d64ad32006-12-07 00:19:40 -080010162 mac_mode |= MAC_MODE_PORT_MODE_MII;
10163 } else
10164 mac_mode |= MAC_MODE_PORT_MODE_GMII;
Michael Chanb16250e2006-09-27 16:10:14 -070010165
Michael Chanc94e3942005-09-27 12:12:42 -070010166 /* reset to prevent losing 1st rx packet intermittently */
10167 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10168 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10169 udelay(10);
10170 tw32_f(MAC_RX_MODE, tp->rx_mode);
10171 }
Matt Carlsone8f3f6c2007-07-11 19:47:55 -070010172 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10173 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10174 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10175 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10176 mac_mode |= MAC_MODE_LINK_POLARITY;
Michael Chanff18ff02006-03-27 23:17:27 -080010177 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10178 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10179 }
Michael Chan9f40dea2005-09-05 17:53:06 -070010180 tw32(MAC_MODE, mac_mode);
Michael Chan9f40dea2005-09-05 17:53:06 -070010181 }
10182 else
10183 return -EINVAL;
Michael Chanc76949a2005-05-29 14:58:59 -070010184
10185 err = -EIO;
10186
Michael Chanc76949a2005-05-29 14:58:59 -070010187 tx_len = 1514;
David S. Millera20e9c62006-07-31 22:38:16 -070010188 skb = netdev_alloc_skb(tp->dev, tx_len);
Jesper Juhla50bb7b2006-05-09 23:14:35 -070010189 if (!skb)
10190 return -ENOMEM;
10191
Michael Chanc76949a2005-05-29 14:58:59 -070010192 tx_data = skb_put(skb, tx_len);
10193 memcpy(tx_data, tp->dev->dev_addr, 6);
10194 memset(tx_data + 6, 0x0, 8);
10195
10196 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10197
10198 for (i = 14; i < tx_len; i++)
10199 tx_data[i] = (u8) (i & 0xff);
10200
10201 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10202
10203 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10204 HOSTCC_MODE_NOW);
10205
10206 udelay(10);
10207
10208 rx_start_idx = tp->hw_status->idx[0].rx_producer;
10209
Michael Chanc76949a2005-05-29 14:58:59 -070010210 num_pkts = 0;
10211
Michael Chan9f40dea2005-09-05 17:53:06 -070010212 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
Michael Chanc76949a2005-05-29 14:58:59 -070010213
Michael Chan9f40dea2005-09-05 17:53:06 -070010214 tp->tx_prod++;
Michael Chanc76949a2005-05-29 14:58:59 -070010215 num_pkts++;
10216
Michael Chan9f40dea2005-09-05 17:53:06 -070010217 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10218 tp->tx_prod);
Michael Chan09ee9292005-08-09 20:17:00 -070010219 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
Michael Chanc76949a2005-05-29 14:58:59 -070010220
10221 udelay(10);
10222
Michael Chan3f7045c2006-09-27 16:02:29 -070010223 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
10224 for (i = 0; i < 25; i++) {
Michael Chanc76949a2005-05-29 14:58:59 -070010225 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10226 HOSTCC_MODE_NOW);
10227
10228 udelay(10);
10229
10230 tx_idx = tp->hw_status->idx[0].tx_consumer;
10231 rx_idx = tp->hw_status->idx[0].rx_producer;
Michael Chan9f40dea2005-09-05 17:53:06 -070010232 if ((tx_idx == tp->tx_prod) &&
Michael Chanc76949a2005-05-29 14:58:59 -070010233 (rx_idx == (rx_start_idx + num_pkts)))
10234 break;
10235 }
10236
10237 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10238 dev_kfree_skb(skb);
10239
Michael Chan9f40dea2005-09-05 17:53:06 -070010240 if (tx_idx != tp->tx_prod)
Michael Chanc76949a2005-05-29 14:58:59 -070010241 goto out;
10242
10243 if (rx_idx != rx_start_idx + num_pkts)
10244 goto out;
10245
10246 desc = &tp->rx_rcb[rx_start_idx];
10247 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10248 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10249 if (opaque_key != RXD_OPAQUE_RING_STD)
10250 goto out;
10251
10252 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10253 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10254 goto out;
10255
10256 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10257 if (rx_len != tx_len)
10258 goto out;
10259
10260 rx_skb = tp->rx_std_buffers[desc_idx].skb;
10261
10262 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10263 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10264
10265 for (i = 14; i < tx_len; i++) {
10266 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10267 goto out;
10268 }
10269 err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010270
Michael Chanc76949a2005-05-29 14:58:59 -070010271 /* tg3_free_rings will unmap and free the rx_skb */
10272out:
10273 return err;
10274}
10275
Michael Chan9f40dea2005-09-05 17:53:06 -070010276#define TG3_MAC_LOOPBACK_FAILED 1
10277#define TG3_PHY_LOOPBACK_FAILED 2
10278#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10279 TG3_PHY_LOOPBACK_FAILED)
10280
10281static int tg3_test_loopback(struct tg3 *tp)
10282{
10283 int err = 0;
Matt Carlson9936bcf2007-10-10 18:03:07 -070010284 u32 cpmuctrl = 0;
Michael Chan9f40dea2005-09-05 17:53:06 -070010285
10286 if (!netif_running(tp->dev))
10287 return TG3_LOOPBACK_FAILED;
10288
Michael Chanb9ec6c12006-07-25 16:37:27 -070010289 err = tg3_reset_hw(tp, 1);
10290 if (err)
10291 return TG3_LOOPBACK_FAILED;
Michael Chan9f40dea2005-09-05 17:53:06 -070010292
Matt Carlson6833c042008-11-21 17:18:59 -080010293 /* Turn off gphy autopowerdown. */
10294 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10295 tg3_phy_toggle_apd(tp, false);
10296
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010297 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010298 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10299 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Matt Carlson9936bcf2007-10-10 18:03:07 -070010300 int i;
10301 u32 status;
10302
10303 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10304
10305 /* Wait for up to 40 microseconds to acquire lock. */
10306 for (i = 0; i < 4; i++) {
10307 status = tr32(TG3_CPMU_MUTEX_GNT);
10308 if (status == CPMU_MUTEX_GNT_DRIVER)
10309 break;
10310 udelay(10);
10311 }
10312
10313 if (status != CPMU_MUTEX_GNT_DRIVER)
10314 return TG3_LOOPBACK_FAILED;
10315
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010316 /* Turn off link-based power management. */
Matt Carlsone8750932007-11-12 21:11:51 -080010317 cpmuctrl = tr32(TG3_CPMU_CTRL);
Matt Carlson109115e2008-05-02 16:48:59 -070010318 tw32(TG3_CPMU_CTRL,
10319 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10320 CPMU_CTRL_LINK_AWARE_MODE));
Matt Carlson9936bcf2007-10-10 18:03:07 -070010321 }
10322
Michael Chan9f40dea2005-09-05 17:53:06 -070010323 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10324 err |= TG3_MAC_LOOPBACK_FAILED;
Matt Carlson9936bcf2007-10-10 18:03:07 -070010325
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010326 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010327 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10328 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Matt Carlson9936bcf2007-10-10 18:03:07 -070010329 tw32(TG3_CPMU_CTRL, cpmuctrl);
10330
10331 /* Release the mutex */
10332 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10333 }
10334
Matt Carlsondd477002008-05-25 23:45:58 -070010335 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10336 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
Michael Chan9f40dea2005-09-05 17:53:06 -070010337 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10338 err |= TG3_PHY_LOOPBACK_FAILED;
10339 }
10340
Matt Carlson6833c042008-11-21 17:18:59 -080010341 /* Re-enable gphy autopowerdown. */
10342 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10343 tg3_phy_toggle_apd(tp, true);
10344
Michael Chan9f40dea2005-09-05 17:53:06 -070010345 return err;
10346}
10347
Michael Chan4cafd3f2005-05-29 14:56:34 -070010348static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10349 u64 *data)
10350{
Michael Chan566f86a2005-05-29 14:56:58 -070010351 struct tg3 *tp = netdev_priv(dev);
10352
Michael Chanbc1c7562006-03-20 17:48:03 -080010353 if (tp->link_config.phy_is_low_power)
10354 tg3_set_power_state(tp, PCI_D0);
10355
Michael Chan566f86a2005-05-29 14:56:58 -070010356 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10357
10358 if (tg3_test_nvram(tp) != 0) {
10359 etest->flags |= ETH_TEST_FL_FAILED;
10360 data[0] = 1;
10361 }
Michael Chanca430072005-05-29 14:57:23 -070010362 if (tg3_test_link(tp) != 0) {
10363 etest->flags |= ETH_TEST_FL_FAILED;
10364 data[1] = 1;
10365 }
Michael Chana71116d2005-05-29 14:58:11 -070010366 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010367 int err, err2 = 0, irq_sync = 0;
Michael Chana71116d2005-05-29 14:58:11 -070010368
Michael Chanbbe832c2005-06-24 20:20:04 -070010369 if (netif_running(dev)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010370 tg3_phy_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -070010371 tg3_netif_stop(tp);
10372 irq_sync = 1;
10373 }
10374
10375 tg3_full_lock(tp, irq_sync);
Michael Chana71116d2005-05-29 14:58:11 -070010376
10377 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
Michael Chanec41c7d2006-01-17 02:40:55 -080010378 err = tg3_nvram_lock(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010379 tg3_halt_cpu(tp, RX_CPU_BASE);
10380 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10381 tg3_halt_cpu(tp, TX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -080010382 if (!err)
10383 tg3_nvram_unlock(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010384
Michael Chand9ab5ad2006-03-20 22:27:35 -080010385 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10386 tg3_phy_reset(tp);
10387
Michael Chana71116d2005-05-29 14:58:11 -070010388 if (tg3_test_registers(tp) != 0) {
10389 etest->flags |= ETH_TEST_FL_FAILED;
10390 data[2] = 1;
10391 }
Michael Chan7942e1d2005-05-29 14:58:36 -070010392 if (tg3_test_memory(tp) != 0) {
10393 etest->flags |= ETH_TEST_FL_FAILED;
10394 data[3] = 1;
10395 }
Michael Chan9f40dea2005-09-05 17:53:06 -070010396 if ((data[4] = tg3_test_loopback(tp)) != 0)
Michael Chanc76949a2005-05-29 14:58:59 -070010397 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chana71116d2005-05-29 14:58:11 -070010398
David S. Millerf47c11e2005-06-24 20:18:35 -070010399 tg3_full_unlock(tp);
10400
Michael Chand4bc3922005-05-29 14:59:20 -070010401 if (tg3_test_interrupt(tp) != 0) {
10402 etest->flags |= ETH_TEST_FL_FAILED;
10403 data[5] = 1;
10404 }
David S. Millerf47c11e2005-06-24 20:18:35 -070010405
10406 tg3_full_lock(tp, 0);
Michael Chand4bc3922005-05-29 14:59:20 -070010407
Michael Chana71116d2005-05-29 14:58:11 -070010408 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10409 if (netif_running(dev)) {
10410 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010411 err2 = tg3_restart_hw(tp, 1);
10412 if (!err2)
Michael Chanb9ec6c12006-07-25 16:37:27 -070010413 tg3_netif_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010414 }
David S. Millerf47c11e2005-06-24 20:18:35 -070010415
10416 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010417
10418 if (irq_sync && !err2)
10419 tg3_phy_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010420 }
Michael Chanbc1c7562006-03-20 17:48:03 -080010421 if (tp->link_config.phy_is_low_power)
10422 tg3_set_power_state(tp, PCI_D3hot);
10423
Michael Chan4cafd3f2005-05-29 14:56:34 -070010424}
10425
Linus Torvalds1da177e2005-04-16 15:20:36 -070010426static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10427{
10428 struct mii_ioctl_data *data = if_mii(ifr);
10429 struct tg3 *tp = netdev_priv(dev);
10430 int err;
10431
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010432 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10433 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10434 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -070010435 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010436 }
10437
Linus Torvalds1da177e2005-04-16 15:20:36 -070010438 switch(cmd) {
10439 case SIOCGMIIPHY:
10440 data->phy_id = PHY_ADDR;
10441
10442 /* fallthru */
10443 case SIOCGMIIREG: {
10444 u32 mii_regval;
10445
10446 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10447 break; /* We have no PHY */
10448
Michael Chanbc1c7562006-03-20 17:48:03 -080010449 if (tp->link_config.phy_is_low_power)
10450 return -EAGAIN;
10451
David S. Millerf47c11e2005-06-24 20:18:35 -070010452 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010453 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
David S. Millerf47c11e2005-06-24 20:18:35 -070010454 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010455
10456 data->val_out = mii_regval;
10457
10458 return err;
10459 }
10460
10461 case SIOCSMIIREG:
10462 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10463 break; /* We have no PHY */
10464
10465 if (!capable(CAP_NET_ADMIN))
10466 return -EPERM;
10467
Michael Chanbc1c7562006-03-20 17:48:03 -080010468 if (tp->link_config.phy_is_low_power)
10469 return -EAGAIN;
10470
David S. Millerf47c11e2005-06-24 20:18:35 -070010471 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010472 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
David S. Millerf47c11e2005-06-24 20:18:35 -070010473 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010474
10475 return err;
10476
10477 default:
10478 /* do nothing */
10479 break;
10480 }
10481 return -EOPNOTSUPP;
10482}
10483
10484#if TG3_VLAN_TAG_USED
10485static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10486{
10487 struct tg3 *tp = netdev_priv(dev);
10488
Michael Chan29315e82006-06-29 20:12:30 -070010489 if (netif_running(dev))
10490 tg3_netif_stop(tp);
10491
David S. Millerf47c11e2005-06-24 20:18:35 -070010492 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010493
10494 tp->vlgrp = grp;
10495
10496 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10497 __tg3_set_rx_mode(dev);
10498
Michael Chan29315e82006-06-29 20:12:30 -070010499 if (netif_running(dev))
10500 tg3_netif_start(tp);
Michael Chan46966542007-07-11 19:47:19 -070010501
10502 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010503}
Linus Torvalds1da177e2005-04-16 15:20:36 -070010504#endif
10505
David S. Miller15f98502005-05-18 22:49:26 -070010506static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10507{
10508 struct tg3 *tp = netdev_priv(dev);
10509
10510 memcpy(ec, &tp->coal, sizeof(*ec));
10511 return 0;
10512}
10513
Michael Chand244c892005-07-05 14:42:33 -070010514static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10515{
10516 struct tg3 *tp = netdev_priv(dev);
10517 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10518 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10519
10520 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10521 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10522 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10523 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10524 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10525 }
10526
10527 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10528 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10529 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10530 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10531 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10532 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10533 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10534 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10535 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10536 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10537 return -EINVAL;
10538
10539 /* No rx interrupts will be generated if both are zero */
10540 if ((ec->rx_coalesce_usecs == 0) &&
10541 (ec->rx_max_coalesced_frames == 0))
10542 return -EINVAL;
10543
10544 /* No tx interrupts will be generated if both are zero */
10545 if ((ec->tx_coalesce_usecs == 0) &&
10546 (ec->tx_max_coalesced_frames == 0))
10547 return -EINVAL;
10548
10549 /* Only copy relevant parameters, ignore all others. */
10550 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10551 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10552 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10553 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10554 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10555 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10556 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10557 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10558 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10559
10560 if (netif_running(dev)) {
10561 tg3_full_lock(tp, 0);
10562 __tg3_set_coalesce(tp, &tp->coal);
10563 tg3_full_unlock(tp);
10564 }
10565 return 0;
10566}
10567
Jeff Garzik7282d492006-09-13 14:30:00 -040010568static const struct ethtool_ops tg3_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010569 .get_settings = tg3_get_settings,
10570 .set_settings = tg3_set_settings,
10571 .get_drvinfo = tg3_get_drvinfo,
10572 .get_regs_len = tg3_get_regs_len,
10573 .get_regs = tg3_get_regs,
10574 .get_wol = tg3_get_wol,
10575 .set_wol = tg3_set_wol,
10576 .get_msglevel = tg3_get_msglevel,
10577 .set_msglevel = tg3_set_msglevel,
10578 .nway_reset = tg3_nway_reset,
10579 .get_link = ethtool_op_get_link,
10580 .get_eeprom_len = tg3_get_eeprom_len,
10581 .get_eeprom = tg3_get_eeprom,
10582 .set_eeprom = tg3_set_eeprom,
10583 .get_ringparam = tg3_get_ringparam,
10584 .set_ringparam = tg3_set_ringparam,
10585 .get_pauseparam = tg3_get_pauseparam,
10586 .set_pauseparam = tg3_set_pauseparam,
10587 .get_rx_csum = tg3_get_rx_csum,
10588 .set_rx_csum = tg3_set_rx_csum,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010589 .set_tx_csum = tg3_set_tx_csum,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010590 .set_sg = ethtool_op_set_sg,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010591 .set_tso = tg3_set_tso,
Michael Chan4cafd3f2005-05-29 14:56:34 -070010592 .self_test = tg3_self_test,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010593 .get_strings = tg3_get_strings,
Michael Chan4009a932005-09-05 17:52:54 -070010594 .phys_id = tg3_phys_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010595 .get_ethtool_stats = tg3_get_ethtool_stats,
David S. Miller15f98502005-05-18 22:49:26 -070010596 .get_coalesce = tg3_get_coalesce,
Michael Chand244c892005-07-05 14:42:33 -070010597 .set_coalesce = tg3_set_coalesce,
Jeff Garzikb9f2c042007-10-03 18:07:32 -070010598 .get_sset_count = tg3_get_sset_count,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010599};
10600
10601static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10602{
Michael Chan1b277772006-03-20 22:27:48 -080010603 u32 cursize, val, magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010604
10605 tp->nvram_size = EEPROM_CHIP_SIZE;
10606
Michael Chan18201802006-03-20 22:29:15 -080010607 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010608 return;
10609
Michael Chanb16250e2006-09-27 16:10:14 -070010610 if ((magic != TG3_EEPROM_MAGIC) &&
10611 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10612 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
Linus Torvalds1da177e2005-04-16 15:20:36 -070010613 return;
10614
10615 /*
10616 * Size the chip by reading offsets at increasing powers of two.
10617 * When we encounter our validation signature, we know the addressing
10618 * has wrapped around, and thus have our chip size.
10619 */
Michael Chan1b277772006-03-20 22:27:48 -080010620 cursize = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010621
10622 while (cursize < tp->nvram_size) {
Michael Chan18201802006-03-20 22:29:15 -080010623 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010624 return;
10625
Michael Chan18201802006-03-20 22:29:15 -080010626 if (val == magic)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010627 break;
10628
10629 cursize <<= 1;
10630 }
10631
10632 tp->nvram_size = cursize;
10633}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010634
Linus Torvalds1da177e2005-04-16 15:20:36 -070010635static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10636{
10637 u32 val;
10638
Michael Chan18201802006-03-20 22:29:15 -080010639 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
Michael Chan1b277772006-03-20 22:27:48 -080010640 return;
10641
10642 /* Selfboot format */
Michael Chan18201802006-03-20 22:29:15 -080010643 if (val != TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -080010644 tg3_get_eeprom_size(tp);
10645 return;
10646 }
10647
Linus Torvalds1da177e2005-04-16 15:20:36 -070010648 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10649 if (val != 0) {
10650 tp->nvram_size = (val >> 16) * 1024;
10651 return;
10652 }
10653 }
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010654 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010655}
10656
10657static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10658{
10659 u32 nvcfg1;
10660
10661 nvcfg1 = tr32(NVRAM_CFG1);
10662 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10663 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10664 }
10665 else {
10666 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10667 tw32(NVRAM_CFG1, nvcfg1);
10668 }
10669
Michael Chan4c987482005-09-05 17:52:38 -070010670 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
Michael Chana4e2b342005-10-26 15:46:52 -070010671 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010672 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10673 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10674 tp->nvram_jedecnum = JEDEC_ATMEL;
10675 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10676 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10677 break;
10678 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10679 tp->nvram_jedecnum = JEDEC_ATMEL;
10680 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10681 break;
10682 case FLASH_VENDOR_ATMEL_EEPROM:
10683 tp->nvram_jedecnum = JEDEC_ATMEL;
10684 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10685 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10686 break;
10687 case FLASH_VENDOR_ST:
10688 tp->nvram_jedecnum = JEDEC_ST;
10689 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10690 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10691 break;
10692 case FLASH_VENDOR_SAIFUN:
10693 tp->nvram_jedecnum = JEDEC_SAIFUN;
10694 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10695 break;
10696 case FLASH_VENDOR_SST_SMALL:
10697 case FLASH_VENDOR_SST_LARGE:
10698 tp->nvram_jedecnum = JEDEC_SST;
10699 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10700 break;
10701 }
10702 }
10703 else {
10704 tp->nvram_jedecnum = JEDEC_ATMEL;
10705 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10706 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10707 }
10708}
10709
Michael Chan361b4ac2005-04-21 17:11:21 -070010710static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10711{
10712 u32 nvcfg1;
10713
10714 nvcfg1 = tr32(NVRAM_CFG1);
10715
Michael Chane6af3012005-04-21 17:12:05 -070010716 /* NVRAM protection for TPM */
10717 if (nvcfg1 & (1 << 27))
10718 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10719
Michael Chan361b4ac2005-04-21 17:11:21 -070010720 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10721 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10722 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10723 tp->nvram_jedecnum = JEDEC_ATMEL;
10724 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10725 break;
10726 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10727 tp->nvram_jedecnum = JEDEC_ATMEL;
10728 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10729 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10730 break;
10731 case FLASH_5752VENDOR_ST_M45PE10:
10732 case FLASH_5752VENDOR_ST_M45PE20:
10733 case FLASH_5752VENDOR_ST_M45PE40:
10734 tp->nvram_jedecnum = JEDEC_ST;
10735 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10736 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10737 break;
10738 }
10739
10740 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10741 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10742 case FLASH_5752PAGE_SIZE_256:
10743 tp->nvram_pagesize = 256;
10744 break;
10745 case FLASH_5752PAGE_SIZE_512:
10746 tp->nvram_pagesize = 512;
10747 break;
10748 case FLASH_5752PAGE_SIZE_1K:
10749 tp->nvram_pagesize = 1024;
10750 break;
10751 case FLASH_5752PAGE_SIZE_2K:
10752 tp->nvram_pagesize = 2048;
10753 break;
10754 case FLASH_5752PAGE_SIZE_4K:
10755 tp->nvram_pagesize = 4096;
10756 break;
10757 case FLASH_5752PAGE_SIZE_264:
10758 tp->nvram_pagesize = 264;
10759 break;
10760 }
10761 }
10762 else {
10763 /* For eeprom, set pagesize to maximum eeprom size */
10764 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10765
10766 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10767 tw32(NVRAM_CFG1, nvcfg1);
10768 }
10769}
10770
Michael Chand3c7b882006-03-23 01:28:25 -080010771static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10772{
Matt Carlson989a9d22007-05-05 11:51:05 -070010773 u32 nvcfg1, protect = 0;
Michael Chand3c7b882006-03-23 01:28:25 -080010774
10775 nvcfg1 = tr32(NVRAM_CFG1);
10776
10777 /* NVRAM protection for TPM */
Matt Carlson989a9d22007-05-05 11:51:05 -070010778 if (nvcfg1 & (1 << 27)) {
Michael Chand3c7b882006-03-23 01:28:25 -080010779 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
Matt Carlson989a9d22007-05-05 11:51:05 -070010780 protect = 1;
10781 }
Michael Chand3c7b882006-03-23 01:28:25 -080010782
Matt Carlson989a9d22007-05-05 11:51:05 -070010783 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10784 switch (nvcfg1) {
Michael Chand3c7b882006-03-23 01:28:25 -080010785 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10786 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10787 case FLASH_5755VENDOR_ATMEL_FLASH_3:
Matt Carlson70b65a22007-07-11 19:48:50 -070010788 case FLASH_5755VENDOR_ATMEL_FLASH_5:
Michael Chand3c7b882006-03-23 01:28:25 -080010789 tp->nvram_jedecnum = JEDEC_ATMEL;
10790 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10791 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10792 tp->nvram_pagesize = 264;
Matt Carlson70b65a22007-07-11 19:48:50 -070010793 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10794 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010795 tp->nvram_size = (protect ? 0x3e200 :
10796 TG3_NVRAM_SIZE_512KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010797 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010798 tp->nvram_size = (protect ? 0x1f200 :
10799 TG3_NVRAM_SIZE_256KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010800 else
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010801 tp->nvram_size = (protect ? 0x1f200 :
10802 TG3_NVRAM_SIZE_128KB);
Michael Chand3c7b882006-03-23 01:28:25 -080010803 break;
10804 case FLASH_5752VENDOR_ST_M45PE10:
10805 case FLASH_5752VENDOR_ST_M45PE20:
10806 case FLASH_5752VENDOR_ST_M45PE40:
10807 tp->nvram_jedecnum = JEDEC_ST;
10808 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10809 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10810 tp->nvram_pagesize = 256;
Matt Carlson989a9d22007-05-05 11:51:05 -070010811 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010812 tp->nvram_size = (protect ?
10813 TG3_NVRAM_SIZE_64KB :
10814 TG3_NVRAM_SIZE_128KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010815 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010816 tp->nvram_size = (protect ?
10817 TG3_NVRAM_SIZE_64KB :
10818 TG3_NVRAM_SIZE_256KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010819 else
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010820 tp->nvram_size = (protect ?
10821 TG3_NVRAM_SIZE_128KB :
10822 TG3_NVRAM_SIZE_512KB);
Michael Chand3c7b882006-03-23 01:28:25 -080010823 break;
10824 }
10825}
10826
Michael Chan1b277772006-03-20 22:27:48 -080010827static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10828{
10829 u32 nvcfg1;
10830
10831 nvcfg1 = tr32(NVRAM_CFG1);
10832
10833 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10834 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10835 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10836 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10837 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10838 tp->nvram_jedecnum = JEDEC_ATMEL;
10839 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10840 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10841
10842 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10843 tw32(NVRAM_CFG1, nvcfg1);
10844 break;
10845 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10846 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10847 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10848 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10849 tp->nvram_jedecnum = JEDEC_ATMEL;
10850 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10851 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10852 tp->nvram_pagesize = 264;
10853 break;
10854 case FLASH_5752VENDOR_ST_M45PE10:
10855 case FLASH_5752VENDOR_ST_M45PE20:
10856 case FLASH_5752VENDOR_ST_M45PE40:
10857 tp->nvram_jedecnum = JEDEC_ST;
10858 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10859 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10860 tp->nvram_pagesize = 256;
10861 break;
10862 }
10863}
10864
Matt Carlson6b91fa02007-10-10 18:01:09 -070010865static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10866{
10867 u32 nvcfg1, protect = 0;
10868
10869 nvcfg1 = tr32(NVRAM_CFG1);
10870
10871 /* NVRAM protection for TPM */
10872 if (nvcfg1 & (1 << 27)) {
10873 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10874 protect = 1;
10875 }
10876
10877 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10878 switch (nvcfg1) {
10879 case FLASH_5761VENDOR_ATMEL_ADB021D:
10880 case FLASH_5761VENDOR_ATMEL_ADB041D:
10881 case FLASH_5761VENDOR_ATMEL_ADB081D:
10882 case FLASH_5761VENDOR_ATMEL_ADB161D:
10883 case FLASH_5761VENDOR_ATMEL_MDB021D:
10884 case FLASH_5761VENDOR_ATMEL_MDB041D:
10885 case FLASH_5761VENDOR_ATMEL_MDB081D:
10886 case FLASH_5761VENDOR_ATMEL_MDB161D:
10887 tp->nvram_jedecnum = JEDEC_ATMEL;
10888 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10889 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10890 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10891 tp->nvram_pagesize = 256;
10892 break;
10893 case FLASH_5761VENDOR_ST_A_M45PE20:
10894 case FLASH_5761VENDOR_ST_A_M45PE40:
10895 case FLASH_5761VENDOR_ST_A_M45PE80:
10896 case FLASH_5761VENDOR_ST_A_M45PE16:
10897 case FLASH_5761VENDOR_ST_M_M45PE20:
10898 case FLASH_5761VENDOR_ST_M_M45PE40:
10899 case FLASH_5761VENDOR_ST_M_M45PE80:
10900 case FLASH_5761VENDOR_ST_M_M45PE16:
10901 tp->nvram_jedecnum = JEDEC_ST;
10902 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10903 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10904 tp->nvram_pagesize = 256;
10905 break;
10906 }
10907
10908 if (protect) {
10909 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10910 } else {
10911 switch (nvcfg1) {
10912 case FLASH_5761VENDOR_ATMEL_ADB161D:
10913 case FLASH_5761VENDOR_ATMEL_MDB161D:
10914 case FLASH_5761VENDOR_ST_A_M45PE16:
10915 case FLASH_5761VENDOR_ST_M_M45PE16:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010916 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010917 break;
10918 case FLASH_5761VENDOR_ATMEL_ADB081D:
10919 case FLASH_5761VENDOR_ATMEL_MDB081D:
10920 case FLASH_5761VENDOR_ST_A_M45PE80:
10921 case FLASH_5761VENDOR_ST_M_M45PE80:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010922 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010923 break;
10924 case FLASH_5761VENDOR_ATMEL_ADB041D:
10925 case FLASH_5761VENDOR_ATMEL_MDB041D:
10926 case FLASH_5761VENDOR_ST_A_M45PE40:
10927 case FLASH_5761VENDOR_ST_M_M45PE40:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010928 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010929 break;
10930 case FLASH_5761VENDOR_ATMEL_ADB021D:
10931 case FLASH_5761VENDOR_ATMEL_MDB021D:
10932 case FLASH_5761VENDOR_ST_A_M45PE20:
10933 case FLASH_5761VENDOR_ST_M_M45PE20:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010934 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010935 break;
10936 }
10937 }
10938}
10939
Michael Chanb5d37722006-09-27 16:06:21 -070010940static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10941{
10942 tp->nvram_jedecnum = JEDEC_ATMEL;
10943 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10944 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10945}
10946
Linus Torvalds1da177e2005-04-16 15:20:36 -070010947/* Chips other than 5700/5701 use the NVRAM for fetching info. */
10948static void __devinit tg3_nvram_init(struct tg3 *tp)
10949{
Linus Torvalds1da177e2005-04-16 15:20:36 -070010950 tw32_f(GRC_EEPROM_ADDR,
10951 (EEPROM_ADDR_FSM_RESET |
10952 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10953 EEPROM_ADDR_CLKPERD_SHIFT)));
10954
Michael Chan9d57f012006-12-07 00:23:25 -080010955 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010956
10957 /* Enable seeprom accesses. */
10958 tw32_f(GRC_LOCAL_CTRL,
10959 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10960 udelay(100);
10961
10962 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10963 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10964 tp->tg3_flags |= TG3_FLAG_NVRAM;
10965
Michael Chanec41c7d2006-01-17 02:40:55 -080010966 if (tg3_nvram_lock(tp)) {
10967 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10968 "tg3_nvram_init failed.\n", tp->dev->name);
10969 return;
10970 }
Michael Chane6af3012005-04-21 17:12:05 -070010971 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010972
Matt Carlson989a9d22007-05-05 11:51:05 -070010973 tp->nvram_size = 0;
10974
Michael Chan361b4ac2005-04-21 17:11:21 -070010975 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10976 tg3_get_5752_nvram_info(tp);
Michael Chand3c7b882006-03-23 01:28:25 -080010977 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10978 tg3_get_5755_nvram_info(tp);
Matt Carlsond30cdd22007-10-07 23:28:35 -070010979 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010980 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10981 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan1b277772006-03-20 22:27:48 -080010982 tg3_get_5787_nvram_info(tp);
Matt Carlson6b91fa02007-10-10 18:01:09 -070010983 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10984 tg3_get_5761_nvram_info(tp);
Michael Chanb5d37722006-09-27 16:06:21 -070010985 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10986 tg3_get_5906_nvram_info(tp);
Michael Chan361b4ac2005-04-21 17:11:21 -070010987 else
10988 tg3_get_nvram_info(tp);
10989
Matt Carlson989a9d22007-05-05 11:51:05 -070010990 if (tp->nvram_size == 0)
10991 tg3_get_nvram_size(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010992
Michael Chane6af3012005-04-21 17:12:05 -070010993 tg3_disable_nvram_access(tp);
Michael Chan381291b2005-12-13 21:08:21 -080010994 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010995
10996 } else {
10997 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10998
10999 tg3_get_eeprom_size(tp);
11000 }
11001}
11002
11003static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
11004 u32 offset, u32 *val)
11005{
11006 u32 tmp;
11007 int i;
11008
11009 if (offset > EEPROM_ADDR_ADDR_MASK ||
11010 (offset % 4) != 0)
11011 return -EINVAL;
11012
11013 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
11014 EEPROM_ADDR_DEVID_MASK |
11015 EEPROM_ADDR_READ);
11016 tw32(GRC_EEPROM_ADDR,
11017 tmp |
11018 (0 << EEPROM_ADDR_DEVID_SHIFT) |
11019 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
11020 EEPROM_ADDR_ADDR_MASK) |
11021 EEPROM_ADDR_READ | EEPROM_ADDR_START);
11022
Michael Chan9d57f012006-12-07 00:23:25 -080011023 for (i = 0; i < 1000; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011024 tmp = tr32(GRC_EEPROM_ADDR);
11025
11026 if (tmp & EEPROM_ADDR_COMPLETE)
11027 break;
Michael Chan9d57f012006-12-07 00:23:25 -080011028 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011029 }
11030 if (!(tmp & EEPROM_ADDR_COMPLETE))
11031 return -EBUSY;
11032
11033 *val = tr32(GRC_EEPROM_DATA);
11034 return 0;
11035}
11036
11037#define NVRAM_CMD_TIMEOUT 10000
11038
11039static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
11040{
11041 int i;
11042
11043 tw32(NVRAM_CMD, nvram_cmd);
11044 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
11045 udelay(10);
11046 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
11047 udelay(10);
11048 break;
11049 }
11050 }
11051 if (i == NVRAM_CMD_TIMEOUT) {
11052 return -EBUSY;
11053 }
11054 return 0;
11055}
11056
Michael Chan18201802006-03-20 22:29:15 -080011057static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
11058{
11059 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
11060 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
11061 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
Matt Carlson6b91fa02007-10-10 18:01:09 -070011062 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
Michael Chan18201802006-03-20 22:29:15 -080011063 (tp->nvram_jedecnum == JEDEC_ATMEL))
11064
11065 addr = ((addr / tp->nvram_pagesize) <<
11066 ATMEL_AT45DB0X1B_PAGE_POS) +
11067 (addr % tp->nvram_pagesize);
11068
11069 return addr;
11070}
11071
Michael Chanc4e65752006-03-20 22:29:32 -080011072static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
11073{
11074 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
11075 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
11076 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
Matt Carlson6b91fa02007-10-10 18:01:09 -070011077 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
Michael Chanc4e65752006-03-20 22:29:32 -080011078 (tp->nvram_jedecnum == JEDEC_ATMEL))
11079
11080 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
11081 tp->nvram_pagesize) +
11082 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
11083
11084 return addr;
11085}
11086
Linus Torvalds1da177e2005-04-16 15:20:36 -070011087static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
11088{
11089 int ret;
11090
Linus Torvalds1da177e2005-04-16 15:20:36 -070011091 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
11092 return tg3_nvram_read_using_eeprom(tp, offset, val);
11093
Michael Chan18201802006-03-20 22:29:15 -080011094 offset = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011095
11096 if (offset > NVRAM_ADDR_MSK)
11097 return -EINVAL;
11098
Michael Chanec41c7d2006-01-17 02:40:55 -080011099 ret = tg3_nvram_lock(tp);
11100 if (ret)
11101 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011102
Michael Chane6af3012005-04-21 17:12:05 -070011103 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011104
11105 tw32(NVRAM_ADDR, offset);
11106 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
11107 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
11108
11109 if (ret == 0)
11110 *val = swab32(tr32(NVRAM_RDDATA));
11111
Michael Chane6af3012005-04-21 17:12:05 -070011112 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011113
Michael Chan381291b2005-12-13 21:08:21 -080011114 tg3_nvram_unlock(tp);
11115
Linus Torvalds1da177e2005-04-16 15:20:36 -070011116 return ret;
11117}
11118
Al Virob9fc7dc2007-12-17 22:59:57 -080011119static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
11120{
11121 u32 v;
11122 int res = tg3_nvram_read(tp, offset, &v);
11123 if (!res)
11124 *val = cpu_to_le32(v);
11125 return res;
11126}
11127
Michael Chan18201802006-03-20 22:29:15 -080011128static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
11129{
11130 int err;
11131 u32 tmp;
11132
11133 err = tg3_nvram_read(tp, offset, &tmp);
11134 *val = swab32(tmp);
11135 return err;
11136}
11137
Linus Torvalds1da177e2005-04-16 15:20:36 -070011138static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11139 u32 offset, u32 len, u8 *buf)
11140{
11141 int i, j, rc = 0;
11142 u32 val;
11143
11144 for (i = 0; i < len; i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011145 u32 addr;
11146 __le32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011147
11148 addr = offset + i;
11149
11150 memcpy(&data, buf + i, 4);
11151
Al Virob9fc7dc2007-12-17 22:59:57 -080011152 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011153
11154 val = tr32(GRC_EEPROM_ADDR);
11155 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11156
11157 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11158 EEPROM_ADDR_READ);
11159 tw32(GRC_EEPROM_ADDR, val |
11160 (0 << EEPROM_ADDR_DEVID_SHIFT) |
11161 (addr & EEPROM_ADDR_ADDR_MASK) |
11162 EEPROM_ADDR_START |
11163 EEPROM_ADDR_WRITE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011164
Michael Chan9d57f012006-12-07 00:23:25 -080011165 for (j = 0; j < 1000; j++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011166 val = tr32(GRC_EEPROM_ADDR);
11167
11168 if (val & EEPROM_ADDR_COMPLETE)
11169 break;
Michael Chan9d57f012006-12-07 00:23:25 -080011170 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011171 }
11172 if (!(val & EEPROM_ADDR_COMPLETE)) {
11173 rc = -EBUSY;
11174 break;
11175 }
11176 }
11177
11178 return rc;
11179}
11180
11181/* offset and length are dword aligned */
11182static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11183 u8 *buf)
11184{
11185 int ret = 0;
11186 u32 pagesize = tp->nvram_pagesize;
11187 u32 pagemask = pagesize - 1;
11188 u32 nvram_cmd;
11189 u8 *tmp;
11190
11191 tmp = kmalloc(pagesize, GFP_KERNEL);
11192 if (tmp == NULL)
11193 return -ENOMEM;
11194
11195 while (len) {
11196 int j;
Michael Chane6af3012005-04-21 17:12:05 -070011197 u32 phy_addr, page_off, size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011198
11199 phy_addr = offset & ~pagemask;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011200
Linus Torvalds1da177e2005-04-16 15:20:36 -070011201 for (j = 0; j < pagesize; j += 4) {
Al Viro286e3102007-12-17 23:00:31 -080011202 if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
Al Virob9fc7dc2007-12-17 22:59:57 -080011203 (__le32 *) (tmp + j))))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011204 break;
11205 }
11206 if (ret)
11207 break;
11208
11209 page_off = offset & pagemask;
11210 size = pagesize;
11211 if (len < size)
11212 size = len;
11213
11214 len -= size;
11215
11216 memcpy(tmp + page_off, buf, size);
11217
11218 offset = offset + (pagesize - page_off);
11219
Michael Chane6af3012005-04-21 17:12:05 -070011220 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011221
11222 /*
11223 * Before we can erase the flash page, we need
11224 * to issue a special "write enable" command.
11225 */
11226 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11227
11228 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11229 break;
11230
11231 /* Erase the target page */
11232 tw32(NVRAM_ADDR, phy_addr);
11233
11234 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11235 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11236
11237 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11238 break;
11239
11240 /* Issue another write enable to start the write. */
11241 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11242
11243 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11244 break;
11245
11246 for (j = 0; j < pagesize; j += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011247 __be32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011248
Al Virob9fc7dc2007-12-17 22:59:57 -080011249 data = *((__be32 *) (tmp + j));
11250 /* swab32(le32_to_cpu(data)), actually */
11251 tw32(NVRAM_WRDATA, be32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011252
11253 tw32(NVRAM_ADDR, phy_addr + j);
11254
11255 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11256 NVRAM_CMD_WR;
11257
11258 if (j == 0)
11259 nvram_cmd |= NVRAM_CMD_FIRST;
11260 else if (j == (pagesize - 4))
11261 nvram_cmd |= NVRAM_CMD_LAST;
11262
11263 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11264 break;
11265 }
11266 if (ret)
11267 break;
11268 }
11269
11270 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11271 tg3_nvram_exec_cmd(tp, nvram_cmd);
11272
11273 kfree(tmp);
11274
11275 return ret;
11276}
11277
11278/* offset and length are dword aligned */
11279static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11280 u8 *buf)
11281{
11282 int i, ret = 0;
11283
11284 for (i = 0; i < len; i += 4, offset += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011285 u32 page_off, phy_addr, nvram_cmd;
11286 __be32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011287
11288 memcpy(&data, buf + i, 4);
Al Virob9fc7dc2007-12-17 22:59:57 -080011289 tw32(NVRAM_WRDATA, be32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011290
11291 page_off = offset % tp->nvram_pagesize;
11292
Michael Chan18201802006-03-20 22:29:15 -080011293 phy_addr = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011294
11295 tw32(NVRAM_ADDR, phy_addr);
11296
11297 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11298
11299 if ((page_off == 0) || (i == 0))
11300 nvram_cmd |= NVRAM_CMD_FIRST;
Michael Chanf6d9a252006-04-29 19:00:24 -070011301 if (page_off == (tp->nvram_pagesize - 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011302 nvram_cmd |= NVRAM_CMD_LAST;
11303
11304 if (i == (len - 4))
11305 nvram_cmd |= NVRAM_CMD_LAST;
11306
Michael Chan4c987482005-09-05 17:52:38 -070011307 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
Michael Chanaf36e6b2006-03-23 01:28:06 -080011308 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
Michael Chan1b277772006-03-20 22:27:48 -080011309 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
Matt Carlsond30cdd22007-10-07 23:28:35 -070011310 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
Matt Carlson9936bcf2007-10-10 18:03:07 -070011311 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
Matt Carlson57e69832008-05-25 23:48:31 -070011312 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
Michael Chan4c987482005-09-05 17:52:38 -070011313 (tp->nvram_jedecnum == JEDEC_ST) &&
11314 (nvram_cmd & NVRAM_CMD_FIRST)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011315
11316 if ((ret = tg3_nvram_exec_cmd(tp,
11317 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11318 NVRAM_CMD_DONE)))
11319
11320 break;
11321 }
11322 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11323 /* We always do complete word writes to eeprom. */
11324 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11325 }
11326
11327 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11328 break;
11329 }
11330 return ret;
11331}
11332
11333/* offset and length are dword aligned */
11334static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11335{
11336 int ret;
11337
Linus Torvalds1da177e2005-04-16 15:20:36 -070011338 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -070011339 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11340 ~GRC_LCLCTRL_GPIO_OUTPUT1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011341 udelay(40);
11342 }
11343
11344 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11345 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11346 }
11347 else {
11348 u32 grc_mode;
11349
Michael Chanec41c7d2006-01-17 02:40:55 -080011350 ret = tg3_nvram_lock(tp);
11351 if (ret)
11352 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011353
Michael Chane6af3012005-04-21 17:12:05 -070011354 tg3_enable_nvram_access(tp);
11355 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11356 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011357 tw32(NVRAM_WRITE1, 0x406);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011358
11359 grc_mode = tr32(GRC_MODE);
11360 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11361
11362 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11363 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11364
11365 ret = tg3_nvram_write_block_buffered(tp, offset, len,
11366 buf);
11367 }
11368 else {
11369 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11370 buf);
11371 }
11372
11373 grc_mode = tr32(GRC_MODE);
11374 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11375
Michael Chane6af3012005-04-21 17:12:05 -070011376 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011377 tg3_nvram_unlock(tp);
11378 }
11379
11380 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -070011381 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011382 udelay(40);
11383 }
11384
11385 return ret;
11386}
11387
11388struct subsys_tbl_ent {
11389 u16 subsys_vendor, subsys_devid;
11390 u32 phy_id;
11391};
11392
11393static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11394 /* Broadcom boards. */
11395 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11396 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11397 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11398 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
11399 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11400 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11401 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
11402 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11403 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11404 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11405 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11406
11407 /* 3com boards. */
11408 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11409 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11410 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
11411 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11412 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11413
11414 /* DELL boards. */
11415 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11416 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11417 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11418 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11419
11420 /* Compaq boards. */
11421 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11422 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11423 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
11424 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11425 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11426
11427 /* IBM boards. */
11428 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11429};
11430
11431static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11432{
11433 int i;
11434
11435 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11436 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11437 tp->pdev->subsystem_vendor) &&
11438 (subsys_id_to_phy_id[i].subsys_devid ==
11439 tp->pdev->subsystem_device))
11440 return &subsys_id_to_phy_id[i];
11441 }
11442 return NULL;
11443}
11444
Michael Chan7d0c41e2005-04-21 17:06:20 -070011445static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011446{
Linus Torvalds1da177e2005-04-16 15:20:36 -070011447 u32 val;
Michael Chancaf636c72006-03-22 01:05:31 -080011448 u16 pmcsr;
11449
11450 /* On some early chips the SRAM cannot be accessed in D3hot state,
11451 * so need make sure we're in D0.
11452 */
11453 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11454 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11455 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11456 msleep(1);
Michael Chan7d0c41e2005-04-21 17:06:20 -070011457
11458 /* Make sure register accesses (indirect or otherwise)
11459 * will function correctly.
11460 */
11461 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11462 tp->misc_host_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011463
David S. Millerf49639e2006-06-09 11:58:36 -070011464 /* The memory arbiter has to be enabled in order for SRAM accesses
11465 * to succeed. Normally on powerup the tg3 chip firmware will make
11466 * sure it is enabled, but other entities such as system netboot
11467 * code might disable it.
11468 */
11469 val = tr32(MEMARB_MODE);
11470 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11471
Linus Torvalds1da177e2005-04-16 15:20:36 -070011472 tp->phy_id = PHY_ID_INVALID;
Michael Chan7d0c41e2005-04-21 17:06:20 -070011473 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11474
Gary Zambranoa85feb82007-05-05 11:52:19 -070011475 /* Assume an onboard device and WOL capable by default. */
11476 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
David S. Miller72b845e2006-03-14 14:11:48 -080011477
Michael Chanb5d37722006-09-27 16:06:21 -070011478 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan9d26e212006-12-07 00:21:14 -080011479 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
Michael Chanb5d37722006-09-27 16:06:21 -070011480 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011481 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11482 }
Matt Carlson0527ba32007-10-10 18:03:30 -070011483 val = tr32(VCPU_CFGSHDW);
11484 if (val & VCPU_CFGSHDW_ASPM_DBNC)
Matt Carlson8ed5d972007-05-07 00:25:49 -070011485 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
Matt Carlson0527ba32007-10-10 18:03:30 -070011486 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070011487 (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11488 device_may_wakeup(&tp->pdev->dev))
Matt Carlson0527ba32007-10-10 18:03:30 -070011489 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
Matt Carlson05ac4cb2008-11-03 16:53:46 -080011490 goto done;
Michael Chanb5d37722006-09-27 16:06:21 -070011491 }
11492
Linus Torvalds1da177e2005-04-16 15:20:36 -070011493 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11494 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11495 u32 nic_cfg, led_cfg;
Matt Carlsona9daf362008-05-25 23:49:44 -070011496 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
Michael Chan7d0c41e2005-04-21 17:06:20 -070011497 int eeprom_phy_serdes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011498
11499 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11500 tp->nic_sram_data_cfg = nic_cfg;
11501
11502 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11503 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11504 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11505 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11506 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11507 (ver > 0) && (ver < 0x100))
11508 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11509
Matt Carlsona9daf362008-05-25 23:49:44 -070011510 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11511 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11512
Linus Torvalds1da177e2005-04-16 15:20:36 -070011513 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11514 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11515 eeprom_phy_serdes = 1;
11516
11517 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11518 if (nic_phy_id != 0) {
11519 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11520 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11521
11522 eeprom_phy_id = (id1 >> 16) << 10;
11523 eeprom_phy_id |= (id2 & 0xfc00) << 16;
11524 eeprom_phy_id |= (id2 & 0x03ff) << 0;
11525 } else
11526 eeprom_phy_id = 0;
11527
Michael Chan7d0c41e2005-04-21 17:06:20 -070011528 tp->phy_id = eeprom_phy_id;
Michael Chan747e8f82005-07-25 12:33:22 -070011529 if (eeprom_phy_serdes) {
Michael Chana4e2b342005-10-26 15:46:52 -070011530 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan747e8f82005-07-25 12:33:22 -070011531 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11532 else
11533 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11534 }
Michael Chan7d0c41e2005-04-21 17:06:20 -070011535
John W. Linvillecbf46852005-04-21 17:01:29 -070011536 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011537 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11538 SHASTA_EXT_LED_MODE_MASK);
John W. Linvillecbf46852005-04-21 17:01:29 -070011539 else
Linus Torvalds1da177e2005-04-16 15:20:36 -070011540 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11541
11542 switch (led_cfg) {
11543 default:
11544 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11545 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11546 break;
11547
11548 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11549 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11550 break;
11551
11552 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11553 tp->led_ctrl = LED_CTRL_MODE_MAC;
Michael Chan9ba27792005-06-06 15:16:20 -070011554
11555 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11556 * read on some older 5700/5701 bootcode.
11557 */
11558 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11559 ASIC_REV_5700 ||
11560 GET_ASIC_REV(tp->pci_chip_rev_id) ==
11561 ASIC_REV_5701)
11562 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11563
Linus Torvalds1da177e2005-04-16 15:20:36 -070011564 break;
11565
11566 case SHASTA_EXT_LED_SHARED:
11567 tp->led_ctrl = LED_CTRL_MODE_SHARED;
11568 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11569 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11570 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11571 LED_CTRL_MODE_PHY_2);
11572 break;
11573
11574 case SHASTA_EXT_LED_MAC:
11575 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11576 break;
11577
11578 case SHASTA_EXT_LED_COMBO:
11579 tp->led_ctrl = LED_CTRL_MODE_COMBO;
11580 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11581 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11582 LED_CTRL_MODE_PHY_2);
11583 break;
11584
Stephen Hemminger855e1112008-04-16 16:37:28 -070011585 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011586
11587 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11588 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11589 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11590 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11591
Matt Carlsonb2a5c192008-04-03 21:44:44 -070011592 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11593 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
Matt Carlson5f608912007-11-12 21:17:07 -080011594
Michael Chan9d26e212006-12-07 00:21:14 -080011595 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011596 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011597 if ((tp->pdev->subsystem_vendor ==
11598 PCI_VENDOR_ID_ARIMA) &&
11599 (tp->pdev->subsystem_device == 0x205a ||
11600 tp->pdev->subsystem_device == 0x2063))
11601 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11602 } else {
David S. Millerf49639e2006-06-09 11:58:36 -070011603 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011604 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11605 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011606
11607 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11608 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
John W. Linvillecbf46852005-04-21 17:01:29 -070011609 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011610 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11611 }
Matt Carlsonb2b98d42008-11-03 16:52:32 -080011612
11613 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11614 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
Matt Carlson0d3031d2007-10-10 18:02:43 -070011615 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
Matt Carlsonb2b98d42008-11-03 16:52:32 -080011616
Gary Zambranoa85feb82007-05-05 11:52:19 -070011617 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11618 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11619 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011620
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070011621 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
Matt Carlson05ac4cb2008-11-03 16:53:46 -080011622 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
Matt Carlson0527ba32007-10-10 18:03:30 -070011623 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11624
Linus Torvalds1da177e2005-04-16 15:20:36 -070011625 if (cfg2 & (1 << 17))
11626 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11627
11628 /* serdes signal pre-emphasis in register 0x590 set by */
11629 /* bootcode if bit 18 is set */
11630 if (cfg2 & (1 << 18))
11631 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
Matt Carlson8ed5d972007-05-07 00:25:49 -070011632
Matt Carlson6833c042008-11-21 17:18:59 -080011633 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11634 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX &&
11635 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
11636 tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
11637
Matt Carlson8ed5d972007-05-07 00:25:49 -070011638 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11639 u32 cfg3;
11640
11641 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11642 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11643 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11644 }
Matt Carlsona9daf362008-05-25 23:49:44 -070011645
11646 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11647 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11648 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11649 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11650 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11651 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011652 }
Matt Carlson05ac4cb2008-11-03 16:53:46 -080011653done:
11654 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
11655 device_set_wakeup_enable(&tp->pdev->dev,
11656 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
Michael Chan7d0c41e2005-04-21 17:06:20 -070011657}
11658
Matt Carlsonb2a5c192008-04-03 21:44:44 -070011659static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11660{
11661 int i;
11662 u32 val;
11663
11664 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11665 tw32(OTP_CTRL, cmd);
11666
11667 /* Wait for up to 1 ms for command to execute. */
11668 for (i = 0; i < 100; i++) {
11669 val = tr32(OTP_STATUS);
11670 if (val & OTP_STATUS_CMD_DONE)
11671 break;
11672 udelay(10);
11673 }
11674
11675 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11676}
11677
11678/* Read the gphy configuration from the OTP region of the chip. The gphy
11679 * configuration is a 32-bit value that straddles the alignment boundary.
11680 * We do two 32-bit reads and then shift and merge the results.
11681 */
11682static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11683{
11684 u32 bhalf_otp, thalf_otp;
11685
11686 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11687
11688 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11689 return 0;
11690
11691 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11692
11693 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11694 return 0;
11695
11696 thalf_otp = tr32(OTP_READ_DATA);
11697
11698 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11699
11700 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11701 return 0;
11702
11703 bhalf_otp = tr32(OTP_READ_DATA);
11704
11705 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11706}
11707
Michael Chan7d0c41e2005-04-21 17:06:20 -070011708static int __devinit tg3_phy_probe(struct tg3 *tp)
11709{
11710 u32 hw_phy_id_1, hw_phy_id_2;
11711 u32 hw_phy_id, hw_phy_id_masked;
11712 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011713
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011714 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11715 return tg3_phy_init(tp);
11716
Linus Torvalds1da177e2005-04-16 15:20:36 -070011717 /* Reading the PHY ID register can conflict with ASF
11718 * firwmare access to the PHY hardware.
11719 */
11720 err = 0;
Matt Carlson0d3031d2007-10-10 18:02:43 -070011721 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11722 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011723 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11724 } else {
11725 /* Now read the physical PHY_ID from the chip and verify
11726 * that it is sane. If it doesn't look good, we fall back
11727 * to either the hard-coded table based PHY_ID and failing
11728 * that the value found in the eeprom area.
11729 */
11730 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11731 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11732
11733 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
11734 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11735 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
11736
11737 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11738 }
11739
11740 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11741 tp->phy_id = hw_phy_id;
11742 if (hw_phy_id_masked == PHY_ID_BCM8002)
11743 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
Michael Chanda6b2d02005-08-19 12:54:29 -070011744 else
11745 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011746 } else {
Michael Chan7d0c41e2005-04-21 17:06:20 -070011747 if (tp->phy_id != PHY_ID_INVALID) {
11748 /* Do nothing, phy ID already set up in
11749 * tg3_get_eeprom_hw_cfg().
11750 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070011751 } else {
11752 struct subsys_tbl_ent *p;
11753
11754 /* No eeprom signature? Try the hardcoded
11755 * subsys device table.
11756 */
11757 p = lookup_by_subsys(tp);
11758 if (!p)
11759 return -ENODEV;
11760
11761 tp->phy_id = p->phy_id;
11762 if (!tp->phy_id ||
11763 tp->phy_id == PHY_ID_BCM8002)
11764 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11765 }
11766 }
11767
Michael Chan747e8f82005-07-25 12:33:22 -070011768 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
Matt Carlson0d3031d2007-10-10 18:02:43 -070011769 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070011770 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chan3600d912006-12-07 00:21:48 -080011771 u32 bmsr, adv_reg, tg3_ctrl, mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011772
11773 tg3_readphy(tp, MII_BMSR, &bmsr);
11774 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11775 (bmsr & BMSR_LSTATUS))
11776 goto skip_phy_reset;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011777
Linus Torvalds1da177e2005-04-16 15:20:36 -070011778 err = tg3_phy_reset(tp);
11779 if (err)
11780 return err;
11781
11782 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11783 ADVERTISE_100HALF | ADVERTISE_100FULL |
11784 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11785 tg3_ctrl = 0;
11786 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11787 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11788 MII_TG3_CTRL_ADV_1000_FULL);
11789 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11790 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11791 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11792 MII_TG3_CTRL_ENABLE_AS_MASTER);
11793 }
11794
Michael Chan3600d912006-12-07 00:21:48 -080011795 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11796 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11797 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11798 if (!tg3_copper_is_advertising_all(tp, mask)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011799 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11800
11801 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11802 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11803
11804 tg3_writephy(tp, MII_BMCR,
11805 BMCR_ANENABLE | BMCR_ANRESTART);
11806 }
11807 tg3_phy_set_wirespeed(tp);
11808
11809 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11810 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11811 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11812 }
11813
11814skip_phy_reset:
11815 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11816 err = tg3_init_5401phy_dsp(tp);
11817 if (err)
11818 return err;
11819 }
11820
11821 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11822 err = tg3_init_5401phy_dsp(tp);
11823 }
11824
Michael Chan747e8f82005-07-25 12:33:22 -070011825 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011826 tp->link_config.advertising =
11827 (ADVERTISED_1000baseT_Half |
11828 ADVERTISED_1000baseT_Full |
11829 ADVERTISED_Autoneg |
11830 ADVERTISED_FIBRE);
11831 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11832 tp->link_config.advertising &=
11833 ~(ADVERTISED_1000baseT_Half |
11834 ADVERTISED_1000baseT_Full);
11835
11836 return err;
11837}
11838
11839static void __devinit tg3_read_partno(struct tg3 *tp)
11840{
11841 unsigned char vpd_data[256];
Michael Chanaf2c6a42006-11-07 14:57:51 -080011842 unsigned int i;
Michael Chan1b277772006-03-20 22:27:48 -080011843 u32 magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011844
Michael Chan18201802006-03-20 22:29:15 -080011845 if (tg3_nvram_read_swab(tp, 0x0, &magic))
David S. Millerf49639e2006-06-09 11:58:36 -070011846 goto out_not_found;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011847
Michael Chan18201802006-03-20 22:29:15 -080011848 if (magic == TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -080011849 for (i = 0; i < 256; i += 4) {
11850 u32 tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011851
Michael Chan1b277772006-03-20 22:27:48 -080011852 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11853 goto out_not_found;
11854
11855 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
11856 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
11857 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11858 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11859 }
11860 } else {
11861 int vpd_cap;
11862
11863 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11864 for (i = 0; i < 256; i += 4) {
11865 u32 tmp, j = 0;
Al Virob9fc7dc2007-12-17 22:59:57 -080011866 __le32 v;
Michael Chan1b277772006-03-20 22:27:48 -080011867 u16 tmp16;
11868
11869 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11870 i);
11871 while (j++ < 100) {
11872 pci_read_config_word(tp->pdev, vpd_cap +
11873 PCI_VPD_ADDR, &tmp16);
11874 if (tmp16 & 0x8000)
11875 break;
11876 msleep(1);
11877 }
David S. Millerf49639e2006-06-09 11:58:36 -070011878 if (!(tmp16 & 0x8000))
11879 goto out_not_found;
11880
Michael Chan1b277772006-03-20 22:27:48 -080011881 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11882 &tmp);
Al Virob9fc7dc2007-12-17 22:59:57 -080011883 v = cpu_to_le32(tmp);
11884 memcpy(&vpd_data[i], &v, 4);
Michael Chan1b277772006-03-20 22:27:48 -080011885 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011886 }
11887
11888 /* Now parse and find the part number. */
Michael Chanaf2c6a42006-11-07 14:57:51 -080011889 for (i = 0; i < 254; ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011890 unsigned char val = vpd_data[i];
Michael Chanaf2c6a42006-11-07 14:57:51 -080011891 unsigned int block_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011892
11893 if (val == 0x82 || val == 0x91) {
11894 i = (i + 3 +
11895 (vpd_data[i + 1] +
11896 (vpd_data[i + 2] << 8)));
11897 continue;
11898 }
11899
11900 if (val != 0x90)
11901 goto out_not_found;
11902
11903 block_end = (i + 3 +
11904 (vpd_data[i + 1] +
11905 (vpd_data[i + 2] << 8)));
11906 i += 3;
Michael Chanaf2c6a42006-11-07 14:57:51 -080011907
11908 if (block_end > 256)
11909 goto out_not_found;
11910
11911 while (i < (block_end - 2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011912 if (vpd_data[i + 0] == 'P' &&
11913 vpd_data[i + 1] == 'N') {
11914 int partno_len = vpd_data[i + 2];
11915
Michael Chanaf2c6a42006-11-07 14:57:51 -080011916 i += 3;
11917 if (partno_len > 24 || (partno_len + i) > 256)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011918 goto out_not_found;
11919
11920 memcpy(tp->board_part_number,
Michael Chanaf2c6a42006-11-07 14:57:51 -080011921 &vpd_data[i], partno_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011922
11923 /* Success. */
11924 return;
11925 }
Michael Chanaf2c6a42006-11-07 14:57:51 -080011926 i += 3 + vpd_data[i + 2];
Linus Torvalds1da177e2005-04-16 15:20:36 -070011927 }
11928
11929 /* Part number not found. */
11930 goto out_not_found;
11931 }
11932
11933out_not_found:
Michael Chanb5d37722006-09-27 16:06:21 -070011934 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11935 strcpy(tp->board_part_number, "BCM95906");
11936 else
11937 strcpy(tp->board_part_number, "none");
Linus Torvalds1da177e2005-04-16 15:20:36 -070011938}
11939
Matt Carlson9c8a6202007-10-21 16:16:08 -070011940static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11941{
11942 u32 val;
11943
11944 if (tg3_nvram_read_swab(tp, offset, &val) ||
11945 (val & 0xfc000000) != 0x0c000000 ||
11946 tg3_nvram_read_swab(tp, offset + 4, &val) ||
11947 val != 0)
11948 return 0;
11949
11950 return 1;
11951}
11952
Matt Carlsondfe00d72008-11-21 17:19:41 -080011953static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
11954{
11955 u32 offset, major, minor, build;
11956
11957 tp->fw_ver[0] = 's';
11958 tp->fw_ver[1] = 'b';
11959 tp->fw_ver[2] = '\0';
11960
11961 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
11962 return;
11963
11964 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
11965 case TG3_EEPROM_SB_REVISION_0:
11966 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
11967 break;
11968 case TG3_EEPROM_SB_REVISION_2:
11969 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
11970 break;
11971 case TG3_EEPROM_SB_REVISION_3:
11972 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
11973 break;
11974 default:
11975 return;
11976 }
11977
11978 if (tg3_nvram_read_swab(tp, offset, &val))
11979 return;
11980
11981 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
11982 TG3_EEPROM_SB_EDH_BLD_SHFT;
11983 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
11984 TG3_EEPROM_SB_EDH_MAJ_SHFT;
11985 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
11986
11987 if (minor > 99 || build > 26)
11988 return;
11989
11990 snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor);
11991
11992 if (build > 0) {
11993 tp->fw_ver[8] = 'a' + build - 1;
11994 tp->fw_ver[9] = '\0';
11995 }
11996}
11997
Michael Chanc4e65752006-03-20 22:29:32 -080011998static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11999{
12000 u32 val, offset, start;
Matt Carlson9c8a6202007-10-21 16:16:08 -070012001 u32 ver_offset;
12002 int i, bcnt;
Michael Chanc4e65752006-03-20 22:29:32 -080012003
12004 if (tg3_nvram_read_swab(tp, 0, &val))
12005 return;
12006
Matt Carlsondfe00d72008-11-21 17:19:41 -080012007 if (val != TG3_EEPROM_MAGIC) {
12008 if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
12009 tg3_read_sb_ver(tp, val);
12010
Michael Chanc4e65752006-03-20 22:29:32 -080012011 return;
Matt Carlsondfe00d72008-11-21 17:19:41 -080012012 }
Michael Chanc4e65752006-03-20 22:29:32 -080012013
12014 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
12015 tg3_nvram_read_swab(tp, 0x4, &start))
12016 return;
12017
12018 offset = tg3_nvram_logical_addr(tp, offset);
Matt Carlson9c8a6202007-10-21 16:16:08 -070012019
12020 if (!tg3_fw_img_is_valid(tp, offset) ||
12021 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
Michael Chanc4e65752006-03-20 22:29:32 -080012022 return;
12023
Matt Carlson9c8a6202007-10-21 16:16:08 -070012024 offset = offset + ver_offset - start;
12025 for (i = 0; i < 16; i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080012026 __le32 v;
12027 if (tg3_nvram_read_le(tp, offset + i, &v))
Michael Chanc4e65752006-03-20 22:29:32 -080012028 return;
12029
Al Virob9fc7dc2007-12-17 22:59:57 -080012030 memcpy(tp->fw_ver + i, &v, 4);
Michael Chanc4e65752006-03-20 22:29:32 -080012031 }
Matt Carlson9c8a6202007-10-21 16:16:08 -070012032
12033 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
Matt Carlson84af67f2007-11-12 21:08:59 -080012034 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
Matt Carlson9c8a6202007-10-21 16:16:08 -070012035 return;
12036
12037 for (offset = TG3_NVM_DIR_START;
12038 offset < TG3_NVM_DIR_END;
12039 offset += TG3_NVM_DIRENT_SIZE) {
12040 if (tg3_nvram_read_swab(tp, offset, &val))
12041 return;
12042
12043 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
12044 break;
12045 }
12046
12047 if (offset == TG3_NVM_DIR_END)
12048 return;
12049
12050 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12051 start = 0x08000000;
12052 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
12053 return;
12054
12055 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
12056 !tg3_fw_img_is_valid(tp, offset) ||
12057 tg3_nvram_read_swab(tp, offset + 8, &val))
12058 return;
12059
12060 offset += val - start;
12061
12062 bcnt = strlen(tp->fw_ver);
12063
12064 tp->fw_ver[bcnt++] = ',';
12065 tp->fw_ver[bcnt++] = ' ';
12066
12067 for (i = 0; i < 4; i++) {
Al Virob9fc7dc2007-12-17 22:59:57 -080012068 __le32 v;
12069 if (tg3_nvram_read_le(tp, offset, &v))
Matt Carlson9c8a6202007-10-21 16:16:08 -070012070 return;
12071
Al Virob9fc7dc2007-12-17 22:59:57 -080012072 offset += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070012073
Al Virob9fc7dc2007-12-17 22:59:57 -080012074 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
12075 memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
Matt Carlson9c8a6202007-10-21 16:16:08 -070012076 break;
12077 }
12078
Al Virob9fc7dc2007-12-17 22:59:57 -080012079 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
12080 bcnt += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070012081 }
12082
12083 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
Michael Chanc4e65752006-03-20 22:29:32 -080012084}
12085
Michael Chan7544b092007-05-05 13:08:32 -070012086static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
12087
Linus Torvalds1da177e2005-04-16 15:20:36 -070012088static int __devinit tg3_get_invariants(struct tg3 *tp)
12089{
12090 static struct pci_device_id write_reorder_chipsets[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012091 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12092 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
John W. Linvillec165b002006-07-08 13:28:53 -070012093 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12094 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
Michael Chan399de502005-10-03 14:02:39 -070012095 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
12096 PCI_DEVICE_ID_VIA_8385_0) },
Linus Torvalds1da177e2005-04-16 15:20:36 -070012097 { },
12098 };
12099 u32 misc_ctrl_reg;
12100 u32 cacheline_sz_reg;
12101 u32 pci_state_reg, grc_misc_cfg;
12102 u32 val;
12103 u16 pci_cmd;
Matt Carlson5e7dfd02008-11-21 17:18:16 -080012104 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012105
Linus Torvalds1da177e2005-04-16 15:20:36 -070012106 /* Force memory write invalidate off. If we leave it on,
12107 * then on 5700_BX chips we have to enable a workaround.
12108 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
12109 * to match the cacheline size. The Broadcom driver have this
12110 * workaround but turns MWI off all the times so never uses
12111 * it. This seems to suggest that the workaround is insufficient.
12112 */
12113 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12114 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
12115 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12116
12117 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
12118 * has the register indirect write enable bit set before
12119 * we try to access any of the MMIO registers. It is also
12120 * critical that the PCI-X hw workaround situation is decided
12121 * before that as well.
12122 */
12123 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12124 &misc_ctrl_reg);
12125
12126 tp->pci_chip_rev_id = (misc_ctrl_reg >>
12127 MISC_HOST_CTRL_CHIPREV_SHIFT);
Matt Carlson795d01c2007-10-07 23:28:17 -070012128 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
12129 u32 prod_id_asic_rev;
12130
12131 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
12132 &prod_id_asic_rev);
12133 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
12134 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012135
Michael Chanff645be2005-04-21 17:09:53 -070012136 /* Wrong chip ID in 5752 A0. This code can be removed later
12137 * as A0 is not in production.
12138 */
12139 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12140 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12141
Michael Chan68929142005-08-09 20:17:14 -070012142 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12143 * we need to disable memory and use config. cycles
12144 * only to access all registers. The 5702/03 chips
12145 * can mistakenly decode the special cycles from the
12146 * ICH chipsets as memory write cycles, causing corruption
12147 * of register and memory space. Only certain ICH bridges
12148 * will drive special cycles with non-zero data during the
12149 * address phase which can fall within the 5703's address
12150 * range. This is not an ICH bug as the PCI spec allows
12151 * non-zero address during special cycles. However, only
12152 * these ICH bridges are known to drive non-zero addresses
12153 * during special cycles.
12154 *
12155 * Since special cycles do not cross PCI bridges, we only
12156 * enable this workaround if the 5703 is on the secondary
12157 * bus of these ICH bridges.
12158 */
12159 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12160 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12161 static struct tg3_dev_id {
12162 u32 vendor;
12163 u32 device;
12164 u32 rev;
12165 } ich_chipsets[] = {
12166 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12167 PCI_ANY_ID },
12168 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12169 PCI_ANY_ID },
12170 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12171 0xa },
12172 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12173 PCI_ANY_ID },
12174 { },
12175 };
12176 struct tg3_dev_id *pci_id = &ich_chipsets[0];
12177 struct pci_dev *bridge = NULL;
12178
12179 while (pci_id->vendor != 0) {
12180 bridge = pci_get_device(pci_id->vendor, pci_id->device,
12181 bridge);
12182 if (!bridge) {
12183 pci_id++;
12184 continue;
12185 }
12186 if (pci_id->rev != PCI_ANY_ID) {
Auke Kok44c10132007-06-08 15:46:36 -070012187 if (bridge->revision > pci_id->rev)
Michael Chan68929142005-08-09 20:17:14 -070012188 continue;
12189 }
12190 if (bridge->subordinate &&
12191 (bridge->subordinate->number ==
12192 tp->pdev->bus->number)) {
12193
12194 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
12195 pci_dev_put(bridge);
12196 break;
12197 }
12198 }
12199 }
12200
Matt Carlson41588ba2008-04-19 18:12:33 -070012201 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
12202 static struct tg3_dev_id {
12203 u32 vendor;
12204 u32 device;
12205 } bridge_chipsets[] = {
12206 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12207 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12208 { },
12209 };
12210 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12211 struct pci_dev *bridge = NULL;
12212
12213 while (pci_id->vendor != 0) {
12214 bridge = pci_get_device(pci_id->vendor,
12215 pci_id->device,
12216 bridge);
12217 if (!bridge) {
12218 pci_id++;
12219 continue;
12220 }
12221 if (bridge->subordinate &&
12222 (bridge->subordinate->number <=
12223 tp->pdev->bus->number) &&
12224 (bridge->subordinate->subordinate >=
12225 tp->pdev->bus->number)) {
12226 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12227 pci_dev_put(bridge);
12228 break;
12229 }
12230 }
12231 }
12232
Michael Chan4a29cc22006-03-19 13:21:12 -080012233 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12234 * DMA addresses > 40-bit. This bridge may have other additional
12235 * 57xx devices behind it in some 4-port NIC designs for example.
12236 * Any tg3 device found behind the bridge will also need the 40-bit
12237 * DMA workaround.
12238 */
Michael Chana4e2b342005-10-26 15:46:52 -070012239 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12240 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12241 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
Michael Chan4a29cc22006-03-19 13:21:12 -080012242 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
Michael Chan4cf78e42005-07-25 12:29:19 -070012243 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
Michael Chana4e2b342005-10-26 15:46:52 -070012244 }
Michael Chan4a29cc22006-03-19 13:21:12 -080012245 else {
12246 struct pci_dev *bridge = NULL;
12247
12248 do {
12249 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12250 PCI_DEVICE_ID_SERVERWORKS_EPB,
12251 bridge);
12252 if (bridge && bridge->subordinate &&
12253 (bridge->subordinate->number <=
12254 tp->pdev->bus->number) &&
12255 (bridge->subordinate->subordinate >=
12256 tp->pdev->bus->number)) {
12257 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12258 pci_dev_put(bridge);
12259 break;
12260 }
12261 } while (bridge);
12262 }
Michael Chan4cf78e42005-07-25 12:29:19 -070012263
Linus Torvalds1da177e2005-04-16 15:20:36 -070012264 /* Initialize misc host control in PCI block. */
12265 tp->misc_host_ctrl |= (misc_ctrl_reg &
12266 MISC_HOST_CTRL_CHIPREV);
12267 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12268 tp->misc_host_ctrl);
12269
12270 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12271 &cacheline_sz_reg);
12272
12273 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
12274 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
12275 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
12276 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
12277
Michael Chan7544b092007-05-05 13:08:32 -070012278 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12279 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12280 tp->pdev_peer = tg3_find_peer(tp);
12281
John W. Linville2052da92005-04-21 16:56:08 -070012282 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
Michael Chan4cf78e42005-07-25 12:29:19 -070012283 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -080012284 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chand9ab5ad2006-03-20 22:27:35 -080012285 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012286 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012287 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012288 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012289 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
Michael Chana4e2b342005-10-26 15:46:52 -070012290 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
John W. Linville6708e5c2005-04-21 17:00:52 -070012291 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12292
John W. Linville1b440c562005-04-21 17:03:18 -070012293 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12294 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12295 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12296
Michael Chan5a6f3072006-03-20 22:28:05 -080012297 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Michael Chan7544b092007-05-05 13:08:32 -070012298 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12299 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12300 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12301 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12302 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12303 tp->pdev_peer == tp->pdev))
12304 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12305
Michael Chanaf36e6b2006-03-23 01:28:06 -080012306 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012307 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012308 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012309 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012310 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012311 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan5a6f3072006-03-20 22:28:05 -080012312 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
Michael Chanfcfa0a32006-03-20 22:28:41 -080012313 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
Michael Chan52c0fd82006-06-29 20:15:54 -070012314 } else {
Michael Chan7f62ad52007-02-20 23:25:40 -080012315 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
Michael Chan52c0fd82006-06-29 20:15:54 -070012316 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12317 ASIC_REV_5750 &&
12318 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
Michael Chan7f62ad52007-02-20 23:25:40 -080012319 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
Michael Chan52c0fd82006-06-29 20:15:54 -070012320 }
Michael Chan5a6f3072006-03-20 22:28:05 -080012321 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012322
Matt Carlsonf51f3562008-05-25 23:45:08 -070012323 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12324 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan0f893dc2005-07-25 12:30:38 -070012325 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12326
Matt Carlson52f44902008-11-21 17:17:04 -080012327 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12328 &pci_state_reg);
12329
Matt Carlson5e7dfd02008-11-21 17:18:16 -080012330 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12331 if (tp->pcie_cap != 0) {
12332 u16 lnkctl;
12333
Linus Torvalds1da177e2005-04-16 15:20:36 -070012334 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
Matt Carlson5f5c51e2007-11-12 21:19:37 -080012335
12336 pcie_set_readrq(tp->pdev, 4096);
12337
Matt Carlson5e7dfd02008-11-21 17:18:16 -080012338 pci_read_config_word(tp->pdev,
12339 tp->pcie_cap + PCI_EXP_LNKCTL,
12340 &lnkctl);
12341 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
12342 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Michael Chanc7835a72006-11-15 21:14:42 -080012343 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
Matt Carlson5e7dfd02008-11-21 17:18:16 -080012344 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12345 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12346 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
Michael Chanc7835a72006-11-15 21:14:42 -080012347 }
Matt Carlson52f44902008-11-21 17:17:04 -080012348 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Matt Carlsonfcb389d2008-11-03 16:55:44 -080012349 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
Matt Carlson52f44902008-11-21 17:17:04 -080012350 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12351 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12352 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12353 if (!tp->pcix_cap) {
12354 printk(KERN_ERR PFX "Cannot find PCI-X "
12355 "capability, aborting.\n");
12356 return -EIO;
12357 }
12358
12359 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
12360 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12361 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012362
Michael Chan399de502005-10-03 14:02:39 -070012363 /* If we have an AMD 762 or VIA K8T800 chipset, write
12364 * reordering to the mailbox registers done by the host
12365 * controller can cause major troubles. We read back from
12366 * every mailbox register write to force the writes to be
12367 * posted to the chip in order.
12368 */
12369 if (pci_dev_present(write_reorder_chipsets) &&
12370 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12371 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12372
Linus Torvalds1da177e2005-04-16 15:20:36 -070012373 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12374 tp->pci_lat_timer < 64) {
12375 tp->pci_lat_timer = 64;
12376
12377 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
12378 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
12379 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
12380 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
12381
12382 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12383 cacheline_sz_reg);
12384 }
12385
Matt Carlson52f44902008-11-21 17:17:04 -080012386 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12387 /* 5700 BX chips need to have their TX producer index
12388 * mailboxes written twice to workaround a bug.
12389 */
12390 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
Matt Carlson9974a352007-10-07 23:27:28 -070012391
Matt Carlson52f44902008-11-21 17:17:04 -080012392 /* If we are in PCI-X mode, enable register write workaround.
Linus Torvalds1da177e2005-04-16 15:20:36 -070012393 *
12394 * The workaround is to use indirect register accesses
12395 * for all chip writes not to mailbox registers.
12396 */
Matt Carlson52f44902008-11-21 17:17:04 -080012397 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012398 u32 pm_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012399
12400 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12401
12402 /* The chip can have it's power management PCI config
12403 * space registers clobbered due to this bug.
12404 * So explicitly force the chip into D0 here.
12405 */
Matt Carlson9974a352007-10-07 23:27:28 -070012406 pci_read_config_dword(tp->pdev,
12407 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012408 &pm_reg);
12409 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12410 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
Matt Carlson9974a352007-10-07 23:27:28 -070012411 pci_write_config_dword(tp->pdev,
12412 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012413 pm_reg);
12414
12415 /* Also, force SERR#/PERR# in PCI command. */
12416 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12417 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12418 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12419 }
12420 }
12421
Linus Torvalds1da177e2005-04-16 15:20:36 -070012422 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12423 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12424 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12425 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12426
12427 /* Chip-specific fixup from Broadcom driver */
12428 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12429 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12430 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12431 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12432 }
12433
Michael Chan1ee582d2005-08-09 20:16:46 -070012434 /* Default fast path register access methods */
Michael Chan20094932005-08-09 20:16:32 -070012435 tp->read32 = tg3_read32;
Michael Chan1ee582d2005-08-09 20:16:46 -070012436 tp->write32 = tg3_write32;
Michael Chan09ee9292005-08-09 20:17:00 -070012437 tp->read32_mbox = tg3_read32;
Michael Chan20094932005-08-09 20:16:32 -070012438 tp->write32_mbox = tg3_write32;
Michael Chan1ee582d2005-08-09 20:16:46 -070012439 tp->write32_tx_mbox = tg3_write32;
12440 tp->write32_rx_mbox = tg3_write32;
12441
12442 /* Various workaround register access methods */
12443 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12444 tp->write32 = tg3_write_indirect_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070012445 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12446 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12447 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12448 /*
12449 * Back to back register writes can cause problems on these
12450 * chips, the workaround is to read back all reg writes
12451 * except those to mailbox regs.
12452 *
12453 * See tg3_write_indirect_reg32().
12454 */
Michael Chan1ee582d2005-08-09 20:16:46 -070012455 tp->write32 = tg3_write_flush_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070012456 }
12457
Michael Chan1ee582d2005-08-09 20:16:46 -070012458
12459 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12460 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12461 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12462 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12463 tp->write32_rx_mbox = tg3_write_flush_reg32;
12464 }
Michael Chan20094932005-08-09 20:16:32 -070012465
Michael Chan68929142005-08-09 20:17:14 -070012466 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12467 tp->read32 = tg3_read_indirect_reg32;
12468 tp->write32 = tg3_write_indirect_reg32;
12469 tp->read32_mbox = tg3_read_indirect_mbox;
12470 tp->write32_mbox = tg3_write_indirect_mbox;
12471 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12472 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12473
12474 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070012475 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070012476
12477 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12478 pci_cmd &= ~PCI_COMMAND_MEMORY;
12479 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12480 }
Michael Chanb5d37722006-09-27 16:06:21 -070012481 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12482 tp->read32_mbox = tg3_read32_mbox_5906;
12483 tp->write32_mbox = tg3_write32_mbox_5906;
12484 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12485 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12486 }
Michael Chan68929142005-08-09 20:17:14 -070012487
Michael Chanbbadf502006-04-06 21:46:34 -070012488 if (tp->write32 == tg3_write_indirect_reg32 ||
12489 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12490 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
David S. Millerf49639e2006-06-09 11:58:36 -070012491 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
Michael Chanbbadf502006-04-06 21:46:34 -070012492 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12493
Michael Chan7d0c41e2005-04-21 17:06:20 -070012494 /* Get eeprom hw config before calling tg3_set_power_state().
Michael Chan9d26e212006-12-07 00:21:14 -080012495 * In particular, the TG3_FLG2_IS_NIC flag must be
Michael Chan7d0c41e2005-04-21 17:06:20 -070012496 * determined before calling tg3_set_power_state() so that
12497 * we know whether or not to switch out of Vaux power.
12498 * When the flag is set, it means that GPIO1 is used for eeprom
12499 * write protect and also implies that it is a LOM where GPIOs
12500 * are not used to switch power.
Jeff Garzik6aa20a22006-09-13 13:24:59 -040012501 */
Michael Chan7d0c41e2005-04-21 17:06:20 -070012502 tg3_get_eeprom_hw_cfg(tp);
12503
Matt Carlson0d3031d2007-10-10 18:02:43 -070012504 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12505 /* Allow reads and writes to the
12506 * APE register and memory space.
12507 */
12508 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12509 PCISTATE_ALLOW_APE_SHMEM_WR;
12510 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12511 pci_state_reg);
12512 }
12513
Matt Carlson9936bcf2007-10-10 18:03:07 -070012514 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012515 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlsonbcb37f62008-11-03 16:52:09 -080012516 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsond30cdd22007-10-07 23:28:35 -070012517 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12518
Michael Chan314fba32005-04-21 17:07:04 -070012519 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12520 * GPIO1 driven high will bring 5700's external PHY out of reset.
12521 * It is also used as eeprom write protect on LOMs.
12522 */
12523 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12524 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12525 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12526 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12527 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan3e7d83b2005-04-21 17:10:36 -070012528 /* Unused GPIO3 must be driven as output on 5752 because there
12529 * are no pull-up resistors on unused GPIO pins.
12530 */
12531 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12532 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chan314fba32005-04-21 17:07:04 -070012533
Michael Chanaf36e6b2006-03-23 01:28:06 -080012534 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12535 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12536
Matt Carlson5f0c4a32008-06-09 15:41:12 -070012537 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12538 /* Turn off the debug UART. */
12539 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12540 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12541 /* Keep VMain power. */
12542 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12543 GRC_LCLCTRL_GPIO_OUTPUT0;
12544 }
12545
Linus Torvalds1da177e2005-04-16 15:20:36 -070012546 /* Force the chip into D0. */
Michael Chanbc1c7562006-03-20 17:48:03 -080012547 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012548 if (err) {
12549 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12550 pci_name(tp->pdev));
12551 return err;
12552 }
12553
12554 /* 5700 B0 chips do not support checksumming correctly due
12555 * to hardware bugs.
12556 */
12557 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12558 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12559
Linus Torvalds1da177e2005-04-16 15:20:36 -070012560 /* Derive initial jumbo mode from MTU assigned in
12561 * ether_setup() via the alloc_etherdev() call
12562 */
Michael Chan0f893dc2005-07-25 12:30:38 -070012563 if (tp->dev->mtu > ETH_DATA_LEN &&
Michael Chana4e2b342005-10-26 15:46:52 -070012564 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan0f893dc2005-07-25 12:30:38 -070012565 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012566
12567 /* Determine WakeOnLan speed to use. */
12568 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12569 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12570 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12571 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12572 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12573 } else {
12574 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12575 }
12576
12577 /* A few boards don't want Ethernet@WireSpeed phy feature */
12578 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12579 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12580 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
Michael Chan747e8f82005-07-25 12:33:22 -070012581 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
Michael Chanb5d37722006-09-27 16:06:21 -070012582 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
Michael Chan747e8f82005-07-25 12:33:22 -070012583 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
Linus Torvalds1da177e2005-04-16 15:20:36 -070012584 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12585
12586 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12587 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12588 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12589 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12590 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12591
Michael Chanc424cb22006-04-29 18:56:34 -070012592 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12593 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012594 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012595 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12596 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
Michael Chand4011ad2007-02-13 12:17:25 -080012597 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12598 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12599 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
Michael Chanc1d2a192007-01-08 19:57:20 -080012600 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12601 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
Matt Carlson57e69832008-05-25 23:48:31 -070012602 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12603 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
Michael Chanc424cb22006-04-29 18:56:34 -070012604 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12605 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012606
Matt Carlsonb2a5c192008-04-03 21:44:44 -070012607 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12608 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12609 tp->phy_otp = tg3_read_otp_phycfg(tp);
12610 if (tp->phy_otp == 0)
12611 tp->phy_otp = TG3_OTP_DEFAULT;
12612 }
12613
Matt Carlsonf51f3562008-05-25 23:45:08 -070012614 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
Matt Carlson8ef21422008-05-02 16:47:53 -070012615 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12616 else
12617 tp->mi_mode = MAC_MI_MODE_BASE;
12618
Linus Torvalds1da177e2005-04-16 15:20:36 -070012619 tp->coalesce_mode = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012620 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12621 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12622 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12623
Matt Carlson57e69832008-05-25 23:48:31 -070012624 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12625 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12626
Matt Carlson158d7ab2008-05-29 01:37:54 -070012627 err = tg3_mdio_init(tp);
12628 if (err)
12629 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012630
12631 /* Initialize data/descriptor byte/word swapping. */
12632 val = tr32(GRC_MODE);
12633 val &= GRC_MODE_HOST_STACKUP;
12634 tw32(GRC_MODE, val | tp->grc_mode);
12635
12636 tg3_switch_clocks(tp);
12637
12638 /* Clear this out for sanity. */
12639 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12640
12641 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12642 &pci_state_reg);
12643 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12644 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12645 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12646
12647 if (chiprevid == CHIPREV_ID_5701_A0 ||
12648 chiprevid == CHIPREV_ID_5701_B0 ||
12649 chiprevid == CHIPREV_ID_5701_B2 ||
12650 chiprevid == CHIPREV_ID_5701_B5) {
12651 void __iomem *sram_base;
12652
12653 /* Write some dummy words into the SRAM status block
12654 * area, see if it reads back correctly. If the return
12655 * value is bad, force enable the PCIX workaround.
12656 */
12657 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12658
12659 writel(0x00000000, sram_base);
12660 writel(0x00000000, sram_base + 4);
12661 writel(0xffffffff, sram_base + 4);
12662 if (readl(sram_base) != 0x00000000)
12663 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12664 }
12665 }
12666
12667 udelay(50);
12668 tg3_nvram_init(tp);
12669
12670 grc_misc_cfg = tr32(GRC_MISC_CFG);
12671 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12672
Linus Torvalds1da177e2005-04-16 15:20:36 -070012673 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12674 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12675 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12676 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12677
David S. Millerfac9b832005-05-18 22:46:34 -070012678 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12679 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12680 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12681 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12682 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12683 HOSTCC_MODE_CLRTICK_TXBD);
12684
12685 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12686 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12687 tp->misc_host_ctrl);
12688 }
12689
Matt Carlson3bda1252008-08-15 14:08:22 -070012690 /* Preserve the APE MAC_MODE bits */
12691 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12692 tp->mac_mode = tr32(MAC_MODE) |
12693 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12694 else
12695 tp->mac_mode = TG3_DEF_MAC_MODE;
12696
Linus Torvalds1da177e2005-04-16 15:20:36 -070012697 /* these are limited to 10/100 only */
12698 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12699 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12700 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12701 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12702 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12703 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12704 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12705 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12706 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
Michael Chan676917d2006-12-07 00:20:22 -080012707 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12708 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
Michael Chanb5d37722006-09-27 16:06:21 -070012709 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012710 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12711
12712 err = tg3_phy_probe(tp);
12713 if (err) {
12714 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12715 pci_name(tp->pdev), err);
12716 /* ... but do not return immediately ... */
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070012717 tg3_mdio_fini(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012718 }
12719
12720 tg3_read_partno(tp);
Michael Chanc4e65752006-03-20 22:29:32 -080012721 tg3_read_fw_ver(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012722
12723 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12724 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12725 } else {
12726 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12727 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12728 else
12729 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12730 }
12731
12732 /* 5700 {AX,BX} chips have a broken status block link
12733 * change bit implementation, so we must use the
12734 * status register in those cases.
12735 */
12736 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12737 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12738 else
12739 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12740
12741 /* The led_ctrl is set during tg3_phy_probe, here we might
12742 * have to force the link status polling mechanism based
12743 * upon subsystem IDs.
12744 */
12745 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
Michael Chan007a880d2007-05-31 14:49:51 -070012746 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070012747 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12748 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12749 TG3_FLAG_USE_LINKCHG_REG);
12750 }
12751
12752 /* For all SERDES we poll the MAC status register. */
12753 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12754 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12755 else
12756 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12757
Matt Carlsonad829262008-11-21 17:16:16 -080012758 tp->rx_offset = NET_IP_ALIGN;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012759 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12760 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12761 tp->rx_offset = 0;
12762
Michael Chanf92905d2006-06-29 20:14:29 -070012763 tp->rx_std_max_post = TG3_RX_RING_SIZE;
12764
12765 /* Increment the rx prod index on the rx std ring by at most
12766 * 8 for these chips to workaround hw errata.
12767 */
12768 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12769 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12770 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12771 tp->rx_std_max_post = 8;
12772
Matt Carlson8ed5d972007-05-07 00:25:49 -070012773 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12774 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12775 PCIE_PWR_MGMT_L1_THRESH_MSK;
12776
Linus Torvalds1da177e2005-04-16 15:20:36 -070012777 return err;
12778}
12779
David S. Miller49b6e95f2007-03-29 01:38:42 -070012780#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012781static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12782{
12783 struct net_device *dev = tp->dev;
12784 struct pci_dev *pdev = tp->pdev;
David S. Miller49b6e95f2007-03-29 01:38:42 -070012785 struct device_node *dp = pci_device_to_OF_node(pdev);
David S. Miller374d4ca2007-03-29 01:57:57 -070012786 const unsigned char *addr;
David S. Miller49b6e95f2007-03-29 01:38:42 -070012787 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012788
David S. Miller49b6e95f2007-03-29 01:38:42 -070012789 addr = of_get_property(dp, "local-mac-address", &len);
12790 if (addr && len == 6) {
12791 memcpy(dev->dev_addr, addr, 6);
12792 memcpy(dev->perm_addr, dev->dev_addr, 6);
12793 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012794 }
12795 return -ENODEV;
12796}
12797
12798static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12799{
12800 struct net_device *dev = tp->dev;
12801
12802 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
John W. Linville2ff43692005-09-12 14:44:20 -070012803 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012804 return 0;
12805}
12806#endif
12807
12808static int __devinit tg3_get_device_address(struct tg3 *tp)
12809{
12810 struct net_device *dev = tp->dev;
12811 u32 hi, lo, mac_offset;
Michael Chan008652b2006-03-27 23:14:53 -080012812 int addr_ok = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012813
David S. Miller49b6e95f2007-03-29 01:38:42 -070012814#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012815 if (!tg3_get_macaddr_sparc(tp))
12816 return 0;
12817#endif
12818
12819 mac_offset = 0x7c;
David S. Millerf49639e2006-06-09 11:58:36 -070012820 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
Michael Chana4e2b342005-10-26 15:46:52 -070012821 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012822 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12823 mac_offset = 0xcc;
12824 if (tg3_nvram_lock(tp))
12825 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12826 else
12827 tg3_nvram_unlock(tp);
12828 }
Michael Chanb5d37722006-09-27 16:06:21 -070012829 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12830 mac_offset = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012831
12832 /* First try to get it from MAC address mailbox. */
12833 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12834 if ((hi >> 16) == 0x484b) {
12835 dev->dev_addr[0] = (hi >> 8) & 0xff;
12836 dev->dev_addr[1] = (hi >> 0) & 0xff;
12837
12838 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12839 dev->dev_addr[2] = (lo >> 24) & 0xff;
12840 dev->dev_addr[3] = (lo >> 16) & 0xff;
12841 dev->dev_addr[4] = (lo >> 8) & 0xff;
12842 dev->dev_addr[5] = (lo >> 0) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012843
Michael Chan008652b2006-03-27 23:14:53 -080012844 /* Some old bootcode may report a 0 MAC address in SRAM */
12845 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12846 }
12847 if (!addr_ok) {
12848 /* Next, try NVRAM. */
David S. Millerf49639e2006-06-09 11:58:36 -070012849 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
Michael Chan008652b2006-03-27 23:14:53 -080012850 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12851 dev->dev_addr[0] = ((hi >> 16) & 0xff);
12852 dev->dev_addr[1] = ((hi >> 24) & 0xff);
12853 dev->dev_addr[2] = ((lo >> 0) & 0xff);
12854 dev->dev_addr[3] = ((lo >> 8) & 0xff);
12855 dev->dev_addr[4] = ((lo >> 16) & 0xff);
12856 dev->dev_addr[5] = ((lo >> 24) & 0xff);
12857 }
12858 /* Finally just fetch it out of the MAC control regs. */
12859 else {
12860 hi = tr32(MAC_ADDR_0_HIGH);
12861 lo = tr32(MAC_ADDR_0_LOW);
12862
12863 dev->dev_addr[5] = lo & 0xff;
12864 dev->dev_addr[4] = (lo >> 8) & 0xff;
12865 dev->dev_addr[3] = (lo >> 16) & 0xff;
12866 dev->dev_addr[2] = (lo >> 24) & 0xff;
12867 dev->dev_addr[1] = hi & 0xff;
12868 dev->dev_addr[0] = (hi >> 8) & 0xff;
12869 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012870 }
12871
12872 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
David S. Miller7582a332008-03-20 15:53:15 -070012873#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012874 if (!tg3_get_default_macaddr_sparc(tp))
12875 return 0;
12876#endif
12877 return -EINVAL;
12878 }
John W. Linville2ff43692005-09-12 14:44:20 -070012879 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012880 return 0;
12881}
12882
David S. Miller59e6b432005-05-18 22:50:10 -070012883#define BOUNDARY_SINGLE_CACHELINE 1
12884#define BOUNDARY_MULTI_CACHELINE 2
12885
12886static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12887{
12888 int cacheline_size;
12889 u8 byte;
12890 int goal;
12891
12892 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12893 if (byte == 0)
12894 cacheline_size = 1024;
12895 else
12896 cacheline_size = (int) byte * 4;
12897
12898 /* On 5703 and later chips, the boundary bits have no
12899 * effect.
12900 */
12901 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12902 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12903 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12904 goto out;
12905
12906#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12907 goal = BOUNDARY_MULTI_CACHELINE;
12908#else
12909#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12910 goal = BOUNDARY_SINGLE_CACHELINE;
12911#else
12912 goal = 0;
12913#endif
12914#endif
12915
12916 if (!goal)
12917 goto out;
12918
12919 /* PCI controllers on most RISC systems tend to disconnect
12920 * when a device tries to burst across a cache-line boundary.
12921 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12922 *
12923 * Unfortunately, for PCI-E there are only limited
12924 * write-side controls for this, and thus for reads
12925 * we will still get the disconnects. We'll also waste
12926 * these PCI cycles for both read and write for chips
12927 * other than 5700 and 5701 which do not implement the
12928 * boundary bits.
12929 */
12930 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12931 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12932 switch (cacheline_size) {
12933 case 16:
12934 case 32:
12935 case 64:
12936 case 128:
12937 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12938 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12939 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12940 } else {
12941 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12942 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12943 }
12944 break;
12945
12946 case 256:
12947 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12948 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12949 break;
12950
12951 default:
12952 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12953 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12954 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012955 }
David S. Miller59e6b432005-05-18 22:50:10 -070012956 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12957 switch (cacheline_size) {
12958 case 16:
12959 case 32:
12960 case 64:
12961 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12962 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12963 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12964 break;
12965 }
12966 /* fallthrough */
12967 case 128:
12968 default:
12969 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12970 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12971 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012972 }
David S. Miller59e6b432005-05-18 22:50:10 -070012973 } else {
12974 switch (cacheline_size) {
12975 case 16:
12976 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12977 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12978 DMA_RWCTRL_WRITE_BNDRY_16);
12979 break;
12980 }
12981 /* fallthrough */
12982 case 32:
12983 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12984 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12985 DMA_RWCTRL_WRITE_BNDRY_32);
12986 break;
12987 }
12988 /* fallthrough */
12989 case 64:
12990 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12991 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12992 DMA_RWCTRL_WRITE_BNDRY_64);
12993 break;
12994 }
12995 /* fallthrough */
12996 case 128:
12997 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12998 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12999 DMA_RWCTRL_WRITE_BNDRY_128);
13000 break;
13001 }
13002 /* fallthrough */
13003 case 256:
13004 val |= (DMA_RWCTRL_READ_BNDRY_256 |
13005 DMA_RWCTRL_WRITE_BNDRY_256);
13006 break;
13007 case 512:
13008 val |= (DMA_RWCTRL_READ_BNDRY_512 |
13009 DMA_RWCTRL_WRITE_BNDRY_512);
13010 break;
13011 case 1024:
13012 default:
13013 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
13014 DMA_RWCTRL_WRITE_BNDRY_1024);
13015 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070013016 }
David S. Miller59e6b432005-05-18 22:50:10 -070013017 }
13018
13019out:
13020 return val;
13021}
13022
Linus Torvalds1da177e2005-04-16 15:20:36 -070013023static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
13024{
13025 struct tg3_internal_buffer_desc test_desc;
13026 u32 sram_dma_descs;
13027 int i, ret;
13028
13029 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
13030
13031 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
13032 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
13033 tw32(RDMAC_STATUS, 0);
13034 tw32(WDMAC_STATUS, 0);
13035
13036 tw32(BUFMGR_MODE, 0);
13037 tw32(FTQ_RESET, 0);
13038
13039 test_desc.addr_hi = ((u64) buf_dma) >> 32;
13040 test_desc.addr_lo = buf_dma & 0xffffffff;
13041 test_desc.nic_mbuf = 0x00002100;
13042 test_desc.len = size;
13043
13044 /*
13045 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
13046 * the *second* time the tg3 driver was getting loaded after an
13047 * initial scan.
13048 *
13049 * Broadcom tells me:
13050 * ...the DMA engine is connected to the GRC block and a DMA
13051 * reset may affect the GRC block in some unpredictable way...
13052 * The behavior of resets to individual blocks has not been tested.
13053 *
13054 * Broadcom noted the GRC reset will also reset all sub-components.
13055 */
13056 if (to_device) {
13057 test_desc.cqid_sqid = (13 << 8) | 2;
13058
13059 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
13060 udelay(40);
13061 } else {
13062 test_desc.cqid_sqid = (16 << 8) | 7;
13063
13064 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
13065 udelay(40);
13066 }
13067 test_desc.flags = 0x00000005;
13068
13069 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
13070 u32 val;
13071
13072 val = *(((u32 *)&test_desc) + i);
13073 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
13074 sram_dma_descs + (i * sizeof(u32)));
13075 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
13076 }
13077 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
13078
13079 if (to_device) {
13080 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
13081 } else {
13082 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
13083 }
13084
13085 ret = -ENODEV;
13086 for (i = 0; i < 40; i++) {
13087 u32 val;
13088
13089 if (to_device)
13090 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
13091 else
13092 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
13093 if ((val & 0xffff) == sram_dma_descs) {
13094 ret = 0;
13095 break;
13096 }
13097
13098 udelay(100);
13099 }
13100
13101 return ret;
13102}
13103
David S. Millerded73402005-05-23 13:59:47 -070013104#define TEST_BUFFER_SIZE 0x2000
Linus Torvalds1da177e2005-04-16 15:20:36 -070013105
13106static int __devinit tg3_test_dma(struct tg3 *tp)
13107{
13108 dma_addr_t buf_dma;
David S. Miller59e6b432005-05-18 22:50:10 -070013109 u32 *buf, saved_dma_rwctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013110 int ret;
13111
13112 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
13113 if (!buf) {
13114 ret = -ENOMEM;
13115 goto out_nofree;
13116 }
13117
13118 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
13119 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
13120
David S. Miller59e6b432005-05-18 22:50:10 -070013121 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013122
13123 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13124 /* DMA read watermark not used on PCIE */
13125 tp->dma_rwctrl |= 0x00180000;
13126 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
Michael Chan85e94ce2005-04-21 17:05:28 -070013127 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13128 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013129 tp->dma_rwctrl |= 0x003f0000;
13130 else
13131 tp->dma_rwctrl |= 0x003f000f;
13132 } else {
13133 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13134 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
13135 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
Michael Chan49afdeb2007-02-13 12:17:03 -080013136 u32 read_water = 0x7;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013137
Michael Chan4a29cc22006-03-19 13:21:12 -080013138 /* If the 5704 is behind the EPB bridge, we can
13139 * do the less restrictive ONE_DMA workaround for
13140 * better performance.
13141 */
13142 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
13143 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13144 tp->dma_rwctrl |= 0x8000;
13145 else if (ccval == 0x6 || ccval == 0x7)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013146 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
13147
Michael Chan49afdeb2007-02-13 12:17:03 -080013148 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
13149 read_water = 4;
David S. Miller59e6b432005-05-18 22:50:10 -070013150 /* Set bit 23 to enable PCIX hw bug fix */
Michael Chan49afdeb2007-02-13 12:17:03 -080013151 tp->dma_rwctrl |=
13152 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
13153 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
13154 (1 << 23);
Michael Chan4cf78e42005-07-25 12:29:19 -070013155 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
13156 /* 5780 always in PCIX mode */
13157 tp->dma_rwctrl |= 0x00144000;
Michael Chana4e2b342005-10-26 15:46:52 -070013158 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13159 /* 5714 always in PCIX mode */
13160 tp->dma_rwctrl |= 0x00148000;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013161 } else {
13162 tp->dma_rwctrl |= 0x001b000f;
13163 }
13164 }
13165
13166 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13167 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13168 tp->dma_rwctrl &= 0xfffffff0;
13169
13170 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13171 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13172 /* Remove this if it causes problems for some boards. */
13173 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
13174
13175 /* On 5700/5701 chips, we need to set this bit.
13176 * Otherwise the chip will issue cacheline transactions
13177 * to streamable DMA memory with not all the byte
13178 * enables turned on. This is an error on several
13179 * RISC PCI controllers, in particular sparc64.
13180 *
13181 * On 5703/5704 chips, this bit has been reassigned
13182 * a different meaning. In particular, it is used
13183 * on those chips to enable a PCI-X workaround.
13184 */
13185 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
13186 }
13187
13188 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13189
13190#if 0
13191 /* Unneeded, already done by tg3_get_invariants. */
13192 tg3_switch_clocks(tp);
13193#endif
13194
13195 ret = 0;
13196 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13197 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13198 goto out;
13199
David S. Miller59e6b432005-05-18 22:50:10 -070013200 /* It is best to perform DMA test with maximum write burst size
13201 * to expose the 5700/5701 write DMA bug.
13202 */
13203 saved_dma_rwctrl = tp->dma_rwctrl;
13204 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13205 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13206
Linus Torvalds1da177e2005-04-16 15:20:36 -070013207 while (1) {
13208 u32 *p = buf, i;
13209
13210 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
13211 p[i] = i;
13212
13213 /* Send the buffer to the chip. */
13214 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13215 if (ret) {
13216 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
13217 break;
13218 }
13219
13220#if 0
13221 /* validate data reached card RAM correctly. */
13222 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13223 u32 val;
13224 tg3_read_mem(tp, 0x2100 + (i*4), &val);
13225 if (le32_to_cpu(val) != p[i]) {
13226 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
13227 /* ret = -ENODEV here? */
13228 }
13229 p[i] = 0;
13230 }
13231#endif
13232 /* Now read it back. */
13233 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13234 if (ret) {
13235 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13236
13237 break;
13238 }
13239
13240 /* Verify it. */
13241 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13242 if (p[i] == i)
13243 continue;
13244
David S. Miller59e6b432005-05-18 22:50:10 -070013245 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13246 DMA_RWCTRL_WRITE_BNDRY_16) {
13247 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013248 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13249 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13250 break;
13251 } else {
13252 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13253 ret = -ENODEV;
13254 goto out;
13255 }
13256 }
13257
13258 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13259 /* Success. */
13260 ret = 0;
13261 break;
13262 }
13263 }
David S. Miller59e6b432005-05-18 22:50:10 -070013264 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13265 DMA_RWCTRL_WRITE_BNDRY_16) {
Michael Chan6d1cfba2005-06-08 14:13:14 -070013266 static struct pci_device_id dma_wait_state_chipsets[] = {
13267 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13268 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13269 { },
13270 };
13271
David S. Miller59e6b432005-05-18 22:50:10 -070013272 /* DMA test passed without adjusting DMA boundary,
Michael Chan6d1cfba2005-06-08 14:13:14 -070013273 * now look for chipsets that are known to expose the
13274 * DMA bug without failing the test.
David S. Miller59e6b432005-05-18 22:50:10 -070013275 */
Michael Chan6d1cfba2005-06-08 14:13:14 -070013276 if (pci_dev_present(dma_wait_state_chipsets)) {
13277 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13278 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13279 }
13280 else
13281 /* Safe to use the calculated DMA boundary. */
13282 tp->dma_rwctrl = saved_dma_rwctrl;
13283
David S. Miller59e6b432005-05-18 22:50:10 -070013284 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13285 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013286
13287out:
13288 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13289out_nofree:
13290 return ret;
13291}
13292
13293static void __devinit tg3_init_link_config(struct tg3 *tp)
13294{
13295 tp->link_config.advertising =
13296 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13297 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13298 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13299 ADVERTISED_Autoneg | ADVERTISED_MII);
13300 tp->link_config.speed = SPEED_INVALID;
13301 tp->link_config.duplex = DUPLEX_INVALID;
13302 tp->link_config.autoneg = AUTONEG_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013303 tp->link_config.active_speed = SPEED_INVALID;
13304 tp->link_config.active_duplex = DUPLEX_INVALID;
13305 tp->link_config.phy_is_low_power = 0;
13306 tp->link_config.orig_speed = SPEED_INVALID;
13307 tp->link_config.orig_duplex = DUPLEX_INVALID;
13308 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13309}
13310
13311static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13312{
Michael Chanfdfec172005-07-25 12:31:48 -070013313 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13314 tp->bufmgr_config.mbuf_read_dma_low_water =
13315 DEFAULT_MB_RDMA_LOW_WATER_5705;
13316 tp->bufmgr_config.mbuf_mac_rx_low_water =
13317 DEFAULT_MB_MACRX_LOW_WATER_5705;
13318 tp->bufmgr_config.mbuf_high_water =
13319 DEFAULT_MB_HIGH_WATER_5705;
Michael Chanb5d37722006-09-27 16:06:21 -070013320 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13321 tp->bufmgr_config.mbuf_mac_rx_low_water =
13322 DEFAULT_MB_MACRX_LOW_WATER_5906;
13323 tp->bufmgr_config.mbuf_high_water =
13324 DEFAULT_MB_HIGH_WATER_5906;
13325 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013326
Michael Chanfdfec172005-07-25 12:31:48 -070013327 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13328 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13329 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13330 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13331 tp->bufmgr_config.mbuf_high_water_jumbo =
13332 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13333 } else {
13334 tp->bufmgr_config.mbuf_read_dma_low_water =
13335 DEFAULT_MB_RDMA_LOW_WATER;
13336 tp->bufmgr_config.mbuf_mac_rx_low_water =
13337 DEFAULT_MB_MACRX_LOW_WATER;
13338 tp->bufmgr_config.mbuf_high_water =
13339 DEFAULT_MB_HIGH_WATER;
13340
13341 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13342 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13343 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13344 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13345 tp->bufmgr_config.mbuf_high_water_jumbo =
13346 DEFAULT_MB_HIGH_WATER_JUMBO;
13347 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013348
13349 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13350 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13351}
13352
13353static char * __devinit tg3_phy_string(struct tg3 *tp)
13354{
13355 switch (tp->phy_id & PHY_ID_MASK) {
13356 case PHY_ID_BCM5400: return "5400";
13357 case PHY_ID_BCM5401: return "5401";
13358 case PHY_ID_BCM5411: return "5411";
13359 case PHY_ID_BCM5701: return "5701";
13360 case PHY_ID_BCM5703: return "5703";
13361 case PHY_ID_BCM5704: return "5704";
13362 case PHY_ID_BCM5705: return "5705";
13363 case PHY_ID_BCM5750: return "5750";
Michael Chan85e94ce2005-04-21 17:05:28 -070013364 case PHY_ID_BCM5752: return "5752";
Michael Chana4e2b342005-10-26 15:46:52 -070013365 case PHY_ID_BCM5714: return "5714";
Michael Chan4cf78e42005-07-25 12:29:19 -070013366 case PHY_ID_BCM5780: return "5780";
Michael Chanaf36e6b2006-03-23 01:28:06 -080013367 case PHY_ID_BCM5755: return "5755";
Michael Chand9ab5ad2006-03-20 22:27:35 -080013368 case PHY_ID_BCM5787: return "5787";
Matt Carlsond30cdd22007-10-07 23:28:35 -070013369 case PHY_ID_BCM5784: return "5784";
Michael Chan126a3362006-09-27 16:03:07 -070013370 case PHY_ID_BCM5756: return "5722/5756";
Michael Chanb5d37722006-09-27 16:06:21 -070013371 case PHY_ID_BCM5906: return "5906";
Matt Carlson9936bcf2007-10-10 18:03:07 -070013372 case PHY_ID_BCM5761: return "5761";
Linus Torvalds1da177e2005-04-16 15:20:36 -070013373 case PHY_ID_BCM8002: return "8002/serdes";
13374 case 0: return "serdes";
13375 default: return "unknown";
Stephen Hemminger855e1112008-04-16 16:37:28 -070013376 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013377}
13378
Michael Chanf9804dd2005-09-27 12:13:10 -070013379static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13380{
13381 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13382 strcpy(str, "PCI Express");
13383 return str;
13384 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13385 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13386
13387 strcpy(str, "PCIX:");
13388
13389 if ((clock_ctrl == 7) ||
13390 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13391 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13392 strcat(str, "133MHz");
13393 else if (clock_ctrl == 0)
13394 strcat(str, "33MHz");
13395 else if (clock_ctrl == 2)
13396 strcat(str, "50MHz");
13397 else if (clock_ctrl == 4)
13398 strcat(str, "66MHz");
13399 else if (clock_ctrl == 6)
13400 strcat(str, "100MHz");
Michael Chanf9804dd2005-09-27 12:13:10 -070013401 } else {
13402 strcpy(str, "PCI:");
13403 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13404 strcat(str, "66MHz");
13405 else
13406 strcat(str, "33MHz");
13407 }
13408 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13409 strcat(str, ":32-bit");
13410 else
13411 strcat(str, ":64-bit");
13412 return str;
13413}
13414
Michael Chan8c2dc7e2005-12-19 16:26:02 -080013415static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013416{
13417 struct pci_dev *peer;
13418 unsigned int func, devnr = tp->pdev->devfn & ~7;
13419
13420 for (func = 0; func < 8; func++) {
13421 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13422 if (peer && peer != tp->pdev)
13423 break;
13424 pci_dev_put(peer);
13425 }
Michael Chan16fe9d72005-12-13 21:09:54 -080013426 /* 5704 can be configured in single-port mode, set peer to
13427 * tp->pdev in that case.
13428 */
13429 if (!peer) {
13430 peer = tp->pdev;
13431 return peer;
13432 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013433
13434 /*
13435 * We don't need to keep the refcount elevated; there's no way
13436 * to remove one half of this device without removing the other
13437 */
13438 pci_dev_put(peer);
13439
13440 return peer;
13441}
13442
David S. Miller15f98502005-05-18 22:49:26 -070013443static void __devinit tg3_init_coal(struct tg3 *tp)
13444{
13445 struct ethtool_coalesce *ec = &tp->coal;
13446
13447 memset(ec, 0, sizeof(*ec));
13448 ec->cmd = ETHTOOL_GCOALESCE;
13449 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13450 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13451 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13452 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13453 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13454 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13455 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13456 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13457 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13458
13459 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13460 HOSTCC_MODE_CLRTICK_TXBD)) {
13461 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13462 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13463 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13464 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13465 }
Michael Chand244c892005-07-05 14:42:33 -070013466
13467 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13468 ec->rx_coalesce_usecs_irq = 0;
13469 ec->tx_coalesce_usecs_irq = 0;
13470 ec->stats_block_coalesce_usecs = 0;
13471 }
David S. Miller15f98502005-05-18 22:49:26 -070013472}
13473
Stephen Hemminger7c7d64b2008-11-19 22:25:36 -080013474static const struct net_device_ops tg3_netdev_ops = {
13475 .ndo_open = tg3_open,
13476 .ndo_stop = tg3_close,
Stephen Hemminger00829822008-11-20 20:14:53 -080013477 .ndo_start_xmit = tg3_start_xmit,
13478 .ndo_get_stats = tg3_get_stats,
13479 .ndo_validate_addr = eth_validate_addr,
13480 .ndo_set_multicast_list = tg3_set_rx_mode,
13481 .ndo_set_mac_address = tg3_set_mac_addr,
13482 .ndo_do_ioctl = tg3_ioctl,
13483 .ndo_tx_timeout = tg3_tx_timeout,
13484 .ndo_change_mtu = tg3_change_mtu,
13485#if TG3_VLAN_TAG_USED
13486 .ndo_vlan_rx_register = tg3_vlan_rx_register,
13487#endif
13488#ifdef CONFIG_NET_POLL_CONTROLLER
13489 .ndo_poll_controller = tg3_poll_controller,
13490#endif
13491};
13492
13493static const struct net_device_ops tg3_netdev_ops_dma_bug = {
13494 .ndo_open = tg3_open,
13495 .ndo_stop = tg3_close,
13496 .ndo_start_xmit = tg3_start_xmit_dma_bug,
Stephen Hemminger7c7d64b2008-11-19 22:25:36 -080013497 .ndo_get_stats = tg3_get_stats,
13498 .ndo_validate_addr = eth_validate_addr,
13499 .ndo_set_multicast_list = tg3_set_rx_mode,
13500 .ndo_set_mac_address = tg3_set_mac_addr,
13501 .ndo_do_ioctl = tg3_ioctl,
13502 .ndo_tx_timeout = tg3_tx_timeout,
13503 .ndo_change_mtu = tg3_change_mtu,
13504#if TG3_VLAN_TAG_USED
13505 .ndo_vlan_rx_register = tg3_vlan_rx_register,
13506#endif
13507#ifdef CONFIG_NET_POLL_CONTROLLER
13508 .ndo_poll_controller = tg3_poll_controller,
13509#endif
13510};
13511
Linus Torvalds1da177e2005-04-16 15:20:36 -070013512static int __devinit tg3_init_one(struct pci_dev *pdev,
13513 const struct pci_device_id *ent)
13514{
13515 static int tg3_version_printed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013516 struct net_device *dev;
13517 struct tg3 *tp;
Joe Perchesd6645372007-12-20 04:06:59 -080013518 int err, pm_cap;
Michael Chanf9804dd2005-09-27 12:13:10 -070013519 char str[40];
Michael Chan72f2afb2006-03-06 19:28:35 -080013520 u64 dma_mask, persist_dma_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013521
13522 if (tg3_version_printed++ == 0)
13523 printk(KERN_INFO "%s", version);
13524
13525 err = pci_enable_device(pdev);
13526 if (err) {
13527 printk(KERN_ERR PFX "Cannot enable PCI device, "
13528 "aborting.\n");
13529 return err;
13530 }
13531
Linus Torvalds1da177e2005-04-16 15:20:36 -070013532 err = pci_request_regions(pdev, DRV_MODULE_NAME);
13533 if (err) {
13534 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13535 "aborting.\n");
13536 goto err_out_disable_pdev;
13537 }
13538
13539 pci_set_master(pdev);
13540
13541 /* Find power-management capability. */
13542 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13543 if (pm_cap == 0) {
13544 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13545 "aborting.\n");
13546 err = -EIO;
13547 goto err_out_free_res;
13548 }
13549
Linus Torvalds1da177e2005-04-16 15:20:36 -070013550 dev = alloc_etherdev(sizeof(*tp));
13551 if (!dev) {
13552 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13553 err = -ENOMEM;
13554 goto err_out_free_res;
13555 }
13556
Linus Torvalds1da177e2005-04-16 15:20:36 -070013557 SET_NETDEV_DEV(dev, &pdev->dev);
13558
Linus Torvalds1da177e2005-04-16 15:20:36 -070013559#if TG3_VLAN_TAG_USED
13560 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013561#endif
13562
13563 tp = netdev_priv(dev);
13564 tp->pdev = pdev;
13565 tp->dev = dev;
13566 tp->pm_cap = pm_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013567 tp->rx_mode = TG3_DEF_RX_MODE;
13568 tp->tx_mode = TG3_DEF_TX_MODE;
Matt Carlson8ef21422008-05-02 16:47:53 -070013569
Linus Torvalds1da177e2005-04-16 15:20:36 -070013570 if (tg3_debug > 0)
13571 tp->msg_enable = tg3_debug;
13572 else
13573 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13574
13575 /* The word/byte swap controls here control register access byte
13576 * swapping. DMA data byte swapping is controlled in the GRC_MODE
13577 * setting below.
13578 */
13579 tp->misc_host_ctrl =
13580 MISC_HOST_CTRL_MASK_PCI_INT |
13581 MISC_HOST_CTRL_WORD_SWAP |
13582 MISC_HOST_CTRL_INDIR_ACCESS |
13583 MISC_HOST_CTRL_PCISTATE_RW;
13584
13585 /* The NONFRM (non-frame) byte/word swap controls take effect
13586 * on descriptor entries, anything which isn't packet data.
13587 *
13588 * The StrongARM chips on the board (one for tx, one for rx)
13589 * are running in big-endian mode.
13590 */
13591 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13592 GRC_MODE_WSWAP_NONFRM_DATA);
13593#ifdef __BIG_ENDIAN
13594 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13595#endif
13596 spin_lock_init(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013597 spin_lock_init(&tp->indirect_lock);
David Howellsc4028952006-11-22 14:57:56 +000013598 INIT_WORK(&tp->reset_task, tg3_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013599
Matt Carlsond5fe4882008-11-21 17:20:32 -080013600 tp->regs = pci_ioremap_bar(pdev, BAR_0);
Andy Gospodarekab0049b2007-09-06 20:42:14 +010013601 if (!tp->regs) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013602 printk(KERN_ERR PFX "Cannot map device registers, "
13603 "aborting.\n");
13604 err = -ENOMEM;
13605 goto err_out_free_dev;
13606 }
13607
13608 tg3_init_link_config(tp);
13609
Linus Torvalds1da177e2005-04-16 15:20:36 -070013610 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13611 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13612 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13613
Stephen Hemmingerbea33482007-10-03 16:41:36 -070013614 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013615 dev->ethtool_ops = &tg3_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013616 dev->watchdog_timeo = TG3_TX_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013617 dev->irq = pdev->irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013618
13619 err = tg3_get_invariants(tp);
13620 if (err) {
13621 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13622 "aborting.\n");
13623 goto err_out_iounmap;
13624 }
13625
Stephen Hemminger00829822008-11-20 20:14:53 -080013626 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13627 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13628 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13629 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13630 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13631 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13632 dev->netdev_ops = &tg3_netdev_ops;
13633 else
13634 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
13635
13636
Michael Chan4a29cc22006-03-19 13:21:12 -080013637 /* The EPB bridge inside 5714, 5715, and 5780 and any
13638 * device behind the EPB cannot support DMA addresses > 40-bit.
Michael Chan72f2afb2006-03-06 19:28:35 -080013639 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13640 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13641 * do DMA address check in tg3_start_xmit().
13642 */
Michael Chan4a29cc22006-03-19 13:21:12 -080013643 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13644 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13645 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
Michael Chan72f2afb2006-03-06 19:28:35 -080013646 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13647#ifdef CONFIG_HIGHMEM
13648 dma_mask = DMA_64BIT_MASK;
13649#endif
Michael Chan4a29cc22006-03-19 13:21:12 -080013650 } else
Michael Chan72f2afb2006-03-06 19:28:35 -080013651 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13652
13653 /* Configure DMA attributes. */
13654 if (dma_mask > DMA_32BIT_MASK) {
13655 err = pci_set_dma_mask(pdev, dma_mask);
13656 if (!err) {
13657 dev->features |= NETIF_F_HIGHDMA;
13658 err = pci_set_consistent_dma_mask(pdev,
13659 persist_dma_mask);
13660 if (err < 0) {
13661 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13662 "DMA for consistent allocations\n");
13663 goto err_out_iounmap;
13664 }
13665 }
13666 }
13667 if (err || dma_mask == DMA_32BIT_MASK) {
13668 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13669 if (err) {
13670 printk(KERN_ERR PFX "No usable DMA configuration, "
13671 "aborting.\n");
13672 goto err_out_iounmap;
13673 }
13674 }
13675
Michael Chanfdfec172005-07-25 12:31:48 -070013676 tg3_init_bufmgr_config(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013677
Linus Torvalds1da177e2005-04-16 15:20:36 -070013678 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13679 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13680 }
13681 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13682 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13683 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
Michael Chanc7835a72006-11-15 21:14:42 -080013684 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -070013685 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13686 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13687 } else {
Michael Chan7f62ad52007-02-20 23:25:40 -080013688 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013689 }
13690
Michael Chan4e3a7aa2006-03-20 17:47:44 -080013691 /* TSO is on by default on chips that support hardware TSO.
13692 * Firmware TSO on older chips gives lower performance, so it
13693 * is off by default, but can be enabled using ethtool.
13694 */
Michael Chanb0026622006-07-03 19:42:14 -070013695 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013696 dev->features |= NETIF_F_TSO;
Michael Chanb5d37722006-09-27 16:06:21 -070013697 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13698 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
Michael Chanb0026622006-07-03 19:42:14 -070013699 dev->features |= NETIF_F_TSO6;
Matt Carlson57e69832008-05-25 23:48:31 -070013700 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13701 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13702 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13703 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlson9936bcf2007-10-10 18:03:07 -070013704 dev->features |= NETIF_F_TSO_ECN;
Michael Chanb0026622006-07-03 19:42:14 -070013705 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013706
Linus Torvalds1da177e2005-04-16 15:20:36 -070013707
13708 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13709 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13710 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13711 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13712 tp->rx_pending = 63;
13713 }
13714
Linus Torvalds1da177e2005-04-16 15:20:36 -070013715 err = tg3_get_device_address(tp);
13716 if (err) {
13717 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13718 "aborting.\n");
13719 goto err_out_iounmap;
13720 }
13721
Matt Carlson0d3031d2007-10-10 18:02:43 -070013722 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
Matt Carlson63532392008-11-03 16:49:57 -080013723 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
Al Viro79ea13c2008-01-24 02:06:46 -080013724 if (!tp->aperegs) {
Matt Carlson0d3031d2007-10-10 18:02:43 -070013725 printk(KERN_ERR PFX "Cannot map APE registers, "
13726 "aborting.\n");
13727 err = -ENOMEM;
13728 goto err_out_iounmap;
13729 }
13730
13731 tg3_ape_lock_init(tp);
13732 }
13733
Matt Carlsonc88864d2007-11-12 21:07:01 -080013734 /*
13735 * Reset chip in case UNDI or EFI driver did not shutdown
13736 * DMA self test will enable WDMAC and we'll see (spurious)
13737 * pending DMA on the PCI bus at that point.
13738 */
13739 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13740 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13741 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13742 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13743 }
13744
13745 err = tg3_test_dma(tp);
13746 if (err) {
13747 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13748 goto err_out_apeunmap;
13749 }
13750
13751 /* Tigon3 can do ipv4 only... and some chips have buggy
13752 * checksumming.
13753 */
13754 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13755 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13756 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13757 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13758 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070013759 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13760 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsonc88864d2007-11-12 21:07:01 -080013761 dev->features |= NETIF_F_IPV6_CSUM;
13762
13763 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13764 } else
13765 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13766
13767 /* flow control autonegotiation is default behavior */
13768 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
Matt Carlson8d018622007-12-20 20:05:44 -080013769 tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
Matt Carlsonc88864d2007-11-12 21:07:01 -080013770
13771 tg3_init_coal(tp);
13772
Michael Chanc49a1562006-12-17 17:07:29 -080013773 pci_set_drvdata(pdev, dev);
13774
Linus Torvalds1da177e2005-04-16 15:20:36 -070013775 err = register_netdev(dev);
13776 if (err) {
13777 printk(KERN_ERR PFX "Cannot register net device, "
13778 "aborting.\n");
Matt Carlson0d3031d2007-10-10 18:02:43 -070013779 goto err_out_apeunmap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013780 }
13781
Matt Carlsondf59c942008-11-03 16:52:56 -080013782 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070013783 dev->name,
13784 tp->board_part_number,
13785 tp->pci_chip_rev_id,
Michael Chanf9804dd2005-09-27 12:13:10 -070013786 tg3_bus_string(tp, str),
Johannes Berge1749612008-10-27 15:59:26 -070013787 dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013788
Matt Carlsondf59c942008-11-03 16:52:56 -080013789 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
13790 printk(KERN_INFO
13791 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
13792 tp->dev->name,
13793 tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
Kay Sieversfb28ad32008-11-10 13:55:14 -080013794 dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev));
Matt Carlsondf59c942008-11-03 16:52:56 -080013795 else
13796 printk(KERN_INFO
13797 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
13798 tp->dev->name, tg3_phy_string(tp),
13799 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13800 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13801 "10/100/1000Base-T")),
13802 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
13803
13804 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070013805 dev->name,
13806 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13807 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13808 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13809 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -070013810 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
Michael Chan4a29cc22006-03-19 13:21:12 -080013811 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13812 dev->name, tp->dma_rwctrl,
13813 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13814 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
Linus Torvalds1da177e2005-04-16 15:20:36 -070013815
13816 return 0;
13817
Matt Carlson0d3031d2007-10-10 18:02:43 -070013818err_out_apeunmap:
13819 if (tp->aperegs) {
13820 iounmap(tp->aperegs);
13821 tp->aperegs = NULL;
13822 }
13823
Linus Torvalds1da177e2005-04-16 15:20:36 -070013824err_out_iounmap:
Michael Chan68929142005-08-09 20:17:14 -070013825 if (tp->regs) {
13826 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070013827 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070013828 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013829
13830err_out_free_dev:
13831 free_netdev(dev);
13832
13833err_out_free_res:
13834 pci_release_regions(pdev);
13835
13836err_out_disable_pdev:
13837 pci_disable_device(pdev);
13838 pci_set_drvdata(pdev, NULL);
13839 return err;
13840}
13841
13842static void __devexit tg3_remove_one(struct pci_dev *pdev)
13843{
13844 struct net_device *dev = pci_get_drvdata(pdev);
13845
13846 if (dev) {
13847 struct tg3 *tp = netdev_priv(dev);
13848
Michael Chan7faa0062006-02-02 17:29:28 -080013849 flush_scheduled_work();
Matt Carlson158d7ab2008-05-29 01:37:54 -070013850
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013851 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13852 tg3_phy_fini(tp);
Matt Carlson158d7ab2008-05-29 01:37:54 -070013853 tg3_mdio_fini(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013854 }
Matt Carlson158d7ab2008-05-29 01:37:54 -070013855
Linus Torvalds1da177e2005-04-16 15:20:36 -070013856 unregister_netdev(dev);
Matt Carlson0d3031d2007-10-10 18:02:43 -070013857 if (tp->aperegs) {
13858 iounmap(tp->aperegs);
13859 tp->aperegs = NULL;
13860 }
Michael Chan68929142005-08-09 20:17:14 -070013861 if (tp->regs) {
13862 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070013863 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070013864 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013865 free_netdev(dev);
13866 pci_release_regions(pdev);
13867 pci_disable_device(pdev);
13868 pci_set_drvdata(pdev, NULL);
13869 }
13870}
13871
13872static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13873{
13874 struct net_device *dev = pci_get_drvdata(pdev);
13875 struct tg3 *tp = netdev_priv(dev);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070013876 pci_power_t target_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013877 int err;
13878
Michael Chan3e0c95f2007-08-03 20:56:54 -070013879 /* PCI register 4 needs to be saved whether netif_running() or not.
13880 * MSI address and data need to be saved if using MSI and
13881 * netif_running().
13882 */
13883 pci_save_state(pdev);
13884
Linus Torvalds1da177e2005-04-16 15:20:36 -070013885 if (!netif_running(dev))
13886 return 0;
13887
Michael Chan7faa0062006-02-02 17:29:28 -080013888 flush_scheduled_work();
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013889 tg3_phy_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013890 tg3_netif_stop(tp);
13891
13892 del_timer_sync(&tp->timer);
13893
David S. Millerf47c11e2005-06-24 20:18:35 -070013894 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013895 tg3_disable_ints(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -070013896 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013897
13898 netif_device_detach(dev);
13899
David S. Millerf47c11e2005-06-24 20:18:35 -070013900 tg3_full_lock(tp, 0);
Michael Chan944d9802005-05-29 14:57:48 -070013901 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan6a9eba12005-12-13 21:08:58 -080013902 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
David S. Millerf47c11e2005-06-24 20:18:35 -070013903 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013904
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070013905 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13906
13907 err = tg3_set_power_state(tp, target_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013908 if (err) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013909 int err2;
13910
David S. Millerf47c11e2005-06-24 20:18:35 -070013911 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013912
Michael Chan6a9eba12005-12-13 21:08:58 -080013913 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013914 err2 = tg3_restart_hw(tp, 1);
13915 if (err2)
Michael Chanb9ec6c12006-07-25 16:37:27 -070013916 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013917
13918 tp->timer.expires = jiffies + tp->timer_offset;
13919 add_timer(&tp->timer);
13920
13921 netif_device_attach(dev);
13922 tg3_netif_start(tp);
13923
Michael Chanb9ec6c12006-07-25 16:37:27 -070013924out:
David S. Millerf47c11e2005-06-24 20:18:35 -070013925 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013926
13927 if (!err2)
13928 tg3_phy_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013929 }
13930
13931 return err;
13932}
13933
13934static int tg3_resume(struct pci_dev *pdev)
13935{
13936 struct net_device *dev = pci_get_drvdata(pdev);
13937 struct tg3 *tp = netdev_priv(dev);
13938 int err;
13939
Michael Chan3e0c95f2007-08-03 20:56:54 -070013940 pci_restore_state(tp->pdev);
13941
Linus Torvalds1da177e2005-04-16 15:20:36 -070013942 if (!netif_running(dev))
13943 return 0;
13944
Michael Chanbc1c7562006-03-20 17:48:03 -080013945 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013946 if (err)
13947 return err;
13948
13949 netif_device_attach(dev);
13950
David S. Millerf47c11e2005-06-24 20:18:35 -070013951 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013952
Michael Chan6a9eba12005-12-13 21:08:58 -080013953 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Michael Chanb9ec6c12006-07-25 16:37:27 -070013954 err = tg3_restart_hw(tp, 1);
13955 if (err)
13956 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013957
13958 tp->timer.expires = jiffies + tp->timer_offset;
13959 add_timer(&tp->timer);
13960
Linus Torvalds1da177e2005-04-16 15:20:36 -070013961 tg3_netif_start(tp);
13962
Michael Chanb9ec6c12006-07-25 16:37:27 -070013963out:
David S. Millerf47c11e2005-06-24 20:18:35 -070013964 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013965
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013966 if (!err)
13967 tg3_phy_start(tp);
13968
Michael Chanb9ec6c12006-07-25 16:37:27 -070013969 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013970}
13971
13972static struct pci_driver tg3_driver = {
13973 .name = DRV_MODULE_NAME,
13974 .id_table = tg3_pci_tbl,
13975 .probe = tg3_init_one,
13976 .remove = __devexit_p(tg3_remove_one),
13977 .suspend = tg3_suspend,
13978 .resume = tg3_resume
13979};
13980
13981static int __init tg3_init(void)
13982{
Jeff Garzik29917622006-08-19 17:48:59 -040013983 return pci_register_driver(&tg3_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013984}
13985
13986static void __exit tg3_cleanup(void)
13987{
13988 pci_unregister_driver(&tg3_driver);
13989}
13990
13991module_init(tg3_init);
13992module_exit(tg3_cleanup);