blob: c48bb51fb742651c06ce5621fa756e0aa5e59b87 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
Michael Chan65610fb2007-02-13 12:18:46 -08007 * Copyright (C) 2005-2007 Broadcom Corporation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Firmware is:
Michael Chan49cabf42005-06-06 15:15:17 -070010 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/compiler.h>
24#include <linux/slab.h>
25#include <linux/delay.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020026#include <linux/in.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
Matt Carlson158d7ab2008-05-29 01:37:54 -070035#include <linux/phy.h>
Matt Carlsona9daf362008-05-25 23:49:44 -070036#include <linux/brcmphy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/if_vlan.h>
38#include <linux/ip.h>
39#include <linux/tcp.h>
40#include <linux/workqueue.h>
Michael Chan61487482005-09-05 17:53:19 -070041#include <linux/prefetch.h>
Tobias Klauserf9a5f7d2005-10-29 15:09:26 +020042#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44#include <net/checksum.h>
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -030045#include <net/ip.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47#include <asm/system.h>
48#include <asm/io.h>
49#include <asm/byteorder.h>
50#include <asm/uaccess.h>
51
David S. Miller49b6e95f2007-03-29 01:38:42 -070052#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <asm/idprom.h>
David S. Miller49b6e95f2007-03-29 01:38:42 -070054#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#endif
56
Matt Carlson63532392008-11-03 16:49:57 -080057#define BAR_0 0
58#define BAR_2 2
59
Linus Torvalds1da177e2005-04-16 15:20:36 -070060#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
61#define TG3_VLAN_TAG_USED 1
62#else
63#define TG3_VLAN_TAG_USED 0
64#endif
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#define TG3_TSO_SUPPORT 1
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
68#include "tg3.h"
69
70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": "
Matt Carlson23197912008-08-15 14:11:19 -070072#define DRV_MODULE_VERSION "3.94"
73#define DRV_MODULE_RELDATE "August 14, 2008"
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0
77#define TG3_DEF_TX_MODE 0
78#define TG3_DEF_MSG_ENABLE \
79 (NETIF_MSG_DRV | \
80 NETIF_MSG_PROBE | \
81 NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | \
83 NETIF_MSG_IFDOWN | \
84 NETIF_MSG_IFUP | \
85 NETIF_MSG_RX_ERR | \
86 NETIF_MSG_TX_ERR)
87
88/* length of time before we decide the hardware is borked,
89 * and dev->tx_timeout() should be called to fix the problem
90 */
91#define TG3_TX_TIMEOUT (5 * HZ)
92
93/* hardware minimum and maximum for a single frame's data payload */
94#define TG3_MIN_MTU 60
95#define TG3_MAX_MTU(tp) \
Michael Chan0f893dc2005-07-25 12:30:38 -070096 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
98/* These numbers seem to be hard coded in the NIC firmware somehow.
99 * You can't change the ring sizes, but you can change where you place
100 * them in the NIC onboard memory.
101 */
102#define TG3_RX_RING_SIZE 512
103#define TG3_DEF_RX_RING_PENDING 200
104#define TG3_RX_JUMBO_RING_SIZE 256
105#define TG3_DEF_RX_JUMBO_RING_PENDING 100
106
107/* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
112 */
113#define TG3_RX_RCB_RING_SIZE(tp) \
114 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
115
116#define TG3_TX_RING_SIZE 512
117#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
118
119#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120 TG3_RX_RING_SIZE)
121#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 TG3_RX_JUMBO_RING_SIZE)
123#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RCB_RING_SIZE(tp))
125#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
126 TG3_TX_RING_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128
129#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
130#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
131
132/* minimum number of free TX descriptors required to wake up TX process */
Ranjit Manomohan42952232006-10-18 20:54:26 -0700133#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
135/* number of ETHTOOL_GSTATS u64's */
136#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
137
Michael Chan4cafd3f2005-05-29 14:56:34 -0700138#define TG3_NUM_TEST 6
139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140static char version[] __devinitdata =
141 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
142
143MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
144MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
145MODULE_LICENSE("GPL");
146MODULE_VERSION(DRV_MODULE_VERSION);
147
148static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
149module_param(tg3_debug, int, 0);
150MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
151
152static struct pci_device_id tg3_pci_tbl[] = {
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
Michael Chan126a3362006-09-27 16:03:07 -0700177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
Michael Chan126a3362006-09-27 16:03:07 -0700192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
Michael Chan676917d2006-12-07 00:20:22 -0800196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
Michael Chanb5d37722006-09-27 16:06:21 -0700204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
Matt Carlsond30cdd22007-10-07 23:28:35 -0700206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
Matt Carlson6c7af272007-10-21 16:12:02 -0700208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
Matt Carlson9936bcf2007-10-10 18:03:07 -0700209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
Matt Carlsonc88e6682008-11-03 16:49:18 -0800211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
Matt Carlson57e69832008-05-25 23:48:31 -0700213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700214 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
215 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
216 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
217 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
218 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
219 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
220 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
221 {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222};
223
224MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
225
Andreas Mohr50da8592006-08-14 23:54:30 -0700226static const struct {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 const char string[ETH_GSTRING_LEN];
228} ethtool_stats_keys[TG3_NUM_STATS] = {
229 { "rx_octets" },
230 { "rx_fragments" },
231 { "rx_ucast_packets" },
232 { "rx_mcast_packets" },
233 { "rx_bcast_packets" },
234 { "rx_fcs_errors" },
235 { "rx_align_errors" },
236 { "rx_xon_pause_rcvd" },
237 { "rx_xoff_pause_rcvd" },
238 { "rx_mac_ctrl_rcvd" },
239 { "rx_xoff_entered" },
240 { "rx_frame_too_long_errors" },
241 { "rx_jabbers" },
242 { "rx_undersize_packets" },
243 { "rx_in_length_errors" },
244 { "rx_out_length_errors" },
245 { "rx_64_or_less_octet_packets" },
246 { "rx_65_to_127_octet_packets" },
247 { "rx_128_to_255_octet_packets" },
248 { "rx_256_to_511_octet_packets" },
249 { "rx_512_to_1023_octet_packets" },
250 { "rx_1024_to_1522_octet_packets" },
251 { "rx_1523_to_2047_octet_packets" },
252 { "rx_2048_to_4095_octet_packets" },
253 { "rx_4096_to_8191_octet_packets" },
254 { "rx_8192_to_9022_octet_packets" },
255
256 { "tx_octets" },
257 { "tx_collisions" },
258
259 { "tx_xon_sent" },
260 { "tx_xoff_sent" },
261 { "tx_flow_control" },
262 { "tx_mac_errors" },
263 { "tx_single_collisions" },
264 { "tx_mult_collisions" },
265 { "tx_deferred" },
266 { "tx_excessive_collisions" },
267 { "tx_late_collisions" },
268 { "tx_collide_2times" },
269 { "tx_collide_3times" },
270 { "tx_collide_4times" },
271 { "tx_collide_5times" },
272 { "tx_collide_6times" },
273 { "tx_collide_7times" },
274 { "tx_collide_8times" },
275 { "tx_collide_9times" },
276 { "tx_collide_10times" },
277 { "tx_collide_11times" },
278 { "tx_collide_12times" },
279 { "tx_collide_13times" },
280 { "tx_collide_14times" },
281 { "tx_collide_15times" },
282 { "tx_ucast_packets" },
283 { "tx_mcast_packets" },
284 { "tx_bcast_packets" },
285 { "tx_carrier_sense_errors" },
286 { "tx_discards" },
287 { "tx_errors" },
288
289 { "dma_writeq_full" },
290 { "dma_write_prioq_full" },
291 { "rxbds_empty" },
292 { "rx_discards" },
293 { "rx_errors" },
294 { "rx_threshold_hit" },
295
296 { "dma_readq_full" },
297 { "dma_read_prioq_full" },
298 { "tx_comp_queue_full" },
299
300 { "ring_set_send_prod_index" },
301 { "ring_status_update" },
302 { "nic_irqs" },
303 { "nic_avoided_irqs" },
304 { "nic_tx_threshold_hit" }
305};
306
Andreas Mohr50da8592006-08-14 23:54:30 -0700307static const struct {
Michael Chan4cafd3f2005-05-29 14:56:34 -0700308 const char string[ETH_GSTRING_LEN];
309} ethtool_test_keys[TG3_NUM_TEST] = {
310 { "nvram test (online) " },
311 { "link test (online) " },
312 { "register test (offline)" },
313 { "memory test (offline)" },
314 { "loopback test (offline)" },
315 { "interrupt test (offline)" },
316};
317
Michael Chanb401e9e2005-12-19 16:27:04 -0800318static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
319{
320 writel(val, tp->regs + off);
321}
322
323static u32 tg3_read32(struct tg3 *tp, u32 off)
324{
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400325 return (readl(tp->regs + off));
Michael Chanb401e9e2005-12-19 16:27:04 -0800326}
327
Matt Carlson0d3031d2007-10-10 18:02:43 -0700328static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
329{
330 writel(val, tp->aperegs + off);
331}
332
333static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
334{
335 return (readl(tp->aperegs + off));
336}
337
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
339{
Michael Chan68929142005-08-09 20:17:14 -0700340 unsigned long flags;
341
342 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700343 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
344 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
Michael Chan68929142005-08-09 20:17:14 -0700345 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700346}
347
348static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
349{
350 writel(val, tp->regs + off);
351 readl(tp->regs + off);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352}
353
Michael Chan68929142005-08-09 20:17:14 -0700354static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
355{
356 unsigned long flags;
357 u32 val;
358
359 spin_lock_irqsave(&tp->indirect_lock, flags);
360 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
361 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
362 spin_unlock_irqrestore(&tp->indirect_lock, flags);
363 return val;
364}
365
366static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
367{
368 unsigned long flags;
369
370 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
371 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
372 TG3_64BIT_REG_LOW, val);
373 return;
374 }
375 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
376 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
377 TG3_64BIT_REG_LOW, val);
378 return;
379 }
380
381 spin_lock_irqsave(&tp->indirect_lock, flags);
382 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
383 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
384 spin_unlock_irqrestore(&tp->indirect_lock, flags);
385
386 /* In indirect mode when disabling interrupts, we also need
387 * to clear the interrupt bit in the GRC local ctrl register.
388 */
389 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
390 (val == 0x1)) {
391 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
392 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
393 }
394}
395
396static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
397{
398 unsigned long flags;
399 u32 val;
400
401 spin_lock_irqsave(&tp->indirect_lock, flags);
402 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
403 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
404 spin_unlock_irqrestore(&tp->indirect_lock, flags);
405 return val;
406}
407
Michael Chanb401e9e2005-12-19 16:27:04 -0800408/* usec_wait specifies the wait time in usec when writing to certain registers
409 * where it is unsafe to read back the register without some delay.
410 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
411 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
412 */
413static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414{
Michael Chanb401e9e2005-12-19 16:27:04 -0800415 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
416 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
417 /* Non-posted methods */
418 tp->write32(tp, off, val);
419 else {
420 /* Posted method */
421 tg3_write32(tp, off, val);
422 if (usec_wait)
423 udelay(usec_wait);
424 tp->read32(tp, off);
425 }
426 /* Wait again after the read for the posted method to guarantee that
427 * the wait time is met.
428 */
429 if (usec_wait)
430 udelay(usec_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431}
432
Michael Chan09ee9292005-08-09 20:17:00 -0700433static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
434{
435 tp->write32_mbox(tp, off, val);
Michael Chan68929142005-08-09 20:17:14 -0700436 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
437 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
438 tp->read32_mbox(tp, off);
Michael Chan09ee9292005-08-09 20:17:00 -0700439}
440
Michael Chan20094932005-08-09 20:16:32 -0700441static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442{
443 void __iomem *mbox = tp->regs + off;
444 writel(val, mbox);
445 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
446 writel(val, mbox);
447 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
448 readl(mbox);
449}
450
Michael Chanb5d37722006-09-27 16:06:21 -0700451static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
452{
453 return (readl(tp->regs + off + GRCMBOX_BASE));
454}
455
456static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
457{
458 writel(val, tp->regs + off + GRCMBOX_BASE);
459}
460
Michael Chan20094932005-08-09 20:16:32 -0700461#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700462#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
Michael Chan20094932005-08-09 20:16:32 -0700463#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
464#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700465#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
Michael Chan20094932005-08-09 20:16:32 -0700466
467#define tw32(reg,val) tp->write32(tp, reg, val)
Michael Chanb401e9e2005-12-19 16:27:04 -0800468#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
469#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
Michael Chan20094932005-08-09 20:16:32 -0700470#define tr32(reg) tp->read32(tp, reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
472static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
473{
Michael Chan68929142005-08-09 20:17:14 -0700474 unsigned long flags;
475
Michael Chanb5d37722006-09-27 16:06:21 -0700476 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
477 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
478 return;
479
Michael Chan68929142005-08-09 20:17:14 -0700480 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700481 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
482 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
483 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484
Michael Chanbbadf502006-04-06 21:46:34 -0700485 /* Always leave this as zero. */
486 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
487 } else {
488 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
489 tw32_f(TG3PCI_MEM_WIN_DATA, val);
490
491 /* Always leave this as zero. */
492 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
493 }
Michael Chan68929142005-08-09 20:17:14 -0700494 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495}
496
497static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
498{
Michael Chan68929142005-08-09 20:17:14 -0700499 unsigned long flags;
500
Michael Chanb5d37722006-09-27 16:06:21 -0700501 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
502 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
503 *val = 0;
504 return;
505 }
506
Michael Chan68929142005-08-09 20:17:14 -0700507 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700508 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
509 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
510 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511
Michael Chanbbadf502006-04-06 21:46:34 -0700512 /* Always leave this as zero. */
513 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
514 } else {
515 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
516 *val = tr32(TG3PCI_MEM_WIN_DATA);
517
518 /* Always leave this as zero. */
519 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
520 }
Michael Chan68929142005-08-09 20:17:14 -0700521 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522}
523
Matt Carlson0d3031d2007-10-10 18:02:43 -0700524static void tg3_ape_lock_init(struct tg3 *tp)
525{
526 int i;
527
528 /* Make sure the driver hasn't any stale locks. */
529 for (i = 0; i < 8; i++)
530 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
531 APE_LOCK_GRANT_DRIVER);
532}
533
534static int tg3_ape_lock(struct tg3 *tp, int locknum)
535{
536 int i, off;
537 int ret = 0;
538 u32 status;
539
540 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
541 return 0;
542
543 switch (locknum) {
Matt Carlson77b483f2008-08-15 14:07:24 -0700544 case TG3_APE_LOCK_GRC:
Matt Carlson0d3031d2007-10-10 18:02:43 -0700545 case TG3_APE_LOCK_MEM:
546 break;
547 default:
548 return -EINVAL;
549 }
550
551 off = 4 * locknum;
552
553 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
554
555 /* Wait for up to 1 millisecond to acquire lock. */
556 for (i = 0; i < 100; i++) {
557 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
558 if (status == APE_LOCK_GRANT_DRIVER)
559 break;
560 udelay(10);
561 }
562
563 if (status != APE_LOCK_GRANT_DRIVER) {
564 /* Revoke the lock request. */
565 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
566 APE_LOCK_GRANT_DRIVER);
567
568 ret = -EBUSY;
569 }
570
571 return ret;
572}
573
574static void tg3_ape_unlock(struct tg3 *tp, int locknum)
575{
576 int off;
577
578 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
579 return;
580
581 switch (locknum) {
Matt Carlson77b483f2008-08-15 14:07:24 -0700582 case TG3_APE_LOCK_GRC:
Matt Carlson0d3031d2007-10-10 18:02:43 -0700583 case TG3_APE_LOCK_MEM:
584 break;
585 default:
586 return;
587 }
588
589 off = 4 * locknum;
590 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
591}
592
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593static void tg3_disable_ints(struct tg3 *tp)
594{
595 tw32(TG3PCI_MISC_HOST_CTRL,
596 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700597 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598}
599
600static inline void tg3_cond_int(struct tg3 *tp)
601{
Michael Chan38f38432005-09-05 17:53:32 -0700602 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
603 (tp->hw_status->status & SD_STATUS_UPDATED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
Michael Chanb5d37722006-09-27 16:06:21 -0700605 else
606 tw32(HOSTCC_MODE, tp->coalesce_mode |
607 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608}
609
610static void tg3_enable_ints(struct tg3 *tp)
611{
Michael Chanbbe832c2005-06-24 20:20:04 -0700612 tp->irq_sync = 0;
613 wmb();
614
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 tw32(TG3PCI_MISC_HOST_CTRL,
616 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700617 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
618 (tp->last_tag << 24));
Michael Chanfcfa0a32006-03-20 22:28:41 -0800619 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
620 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
621 (tp->last_tag << 24));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 tg3_cond_int(tp);
623}
624
Michael Chan04237dd2005-04-25 15:17:17 -0700625static inline unsigned int tg3_has_work(struct tg3 *tp)
626{
627 struct tg3_hw_status *sblk = tp->hw_status;
628 unsigned int work_exists = 0;
629
630 /* check for phy events */
631 if (!(tp->tg3_flags &
632 (TG3_FLAG_USE_LINKCHG_REG |
633 TG3_FLAG_POLL_SERDES))) {
634 if (sblk->status & SD_STATUS_LINK_CHG)
635 work_exists = 1;
636 }
637 /* check for RX/TX work to do */
638 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
639 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
640 work_exists = 1;
641
642 return work_exists;
643}
644
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645/* tg3_restart_ints
Michael Chan04237dd2005-04-25 15:17:17 -0700646 * similar to tg3_enable_ints, but it accurately determines whether there
647 * is new work pending and can return without flushing the PIO write
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400648 * which reenables interrupts
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 */
650static void tg3_restart_ints(struct tg3 *tp)
651{
David S. Millerfac9b832005-05-18 22:46:34 -0700652 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
653 tp->last_tag << 24);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 mmiowb();
655
David S. Millerfac9b832005-05-18 22:46:34 -0700656 /* When doing tagged status, this work check is unnecessary.
657 * The last_tag we write above tells the chip which piece of
658 * work we've completed.
659 */
660 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
661 tg3_has_work(tp))
Michael Chan04237dd2005-04-25 15:17:17 -0700662 tw32(HOSTCC_MODE, tp->coalesce_mode |
663 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664}
665
666static inline void tg3_netif_stop(struct tg3 *tp)
667{
Michael Chanbbe832c2005-06-24 20:20:04 -0700668 tp->dev->trans_start = jiffies; /* prevent tx timeout */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700669 napi_disable(&tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 netif_tx_disable(tp->dev);
671}
672
673static inline void tg3_netif_start(struct tg3 *tp)
674{
675 netif_wake_queue(tp->dev);
676 /* NOTE: unconditional netif_wake_queue is only appropriate
677 * so long as all callers are assured to have free tx slots
678 * (such as after tg3_init_hw)
679 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700680 napi_enable(&tp->napi);
David S. Millerf47c11e2005-06-24 20:18:35 -0700681 tp->hw_status->status |= SD_STATUS_UPDATED;
682 tg3_enable_ints(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683}
684
685static void tg3_switch_clocks(struct tg3 *tp)
686{
687 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
688 u32 orig_clock_ctrl;
689
Matt Carlson795d01c2007-10-07 23:28:17 -0700690 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
691 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan4cf78e42005-07-25 12:29:19 -0700692 return;
693
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 orig_clock_ctrl = clock_ctrl;
695 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
696 CLOCK_CTRL_CLKRUN_OENABLE |
697 0x1f);
698 tp->pci_clock_ctrl = clock_ctrl;
699
700 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
701 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800702 tw32_wait_f(TG3PCI_CLOCK_CTRL,
703 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 }
705 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800706 tw32_wait_f(TG3PCI_CLOCK_CTRL,
707 clock_ctrl |
708 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
709 40);
710 tw32_wait_f(TG3PCI_CLOCK_CTRL,
711 clock_ctrl | (CLOCK_CTRL_ALTCLK),
712 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 }
Michael Chanb401e9e2005-12-19 16:27:04 -0800714 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715}
716
717#define PHY_BUSY_LOOPS 5000
718
719static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
720{
721 u32 frame_val;
722 unsigned int loops;
723 int ret;
724
725 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
726 tw32_f(MAC_MI_MODE,
727 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
728 udelay(80);
729 }
730
731 *val = 0x0;
732
733 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
734 MI_COM_PHY_ADDR_MASK);
735 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
736 MI_COM_REG_ADDR_MASK);
737 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400738
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 tw32_f(MAC_MI_COM, frame_val);
740
741 loops = PHY_BUSY_LOOPS;
742 while (loops != 0) {
743 udelay(10);
744 frame_val = tr32(MAC_MI_COM);
745
746 if ((frame_val & MI_COM_BUSY) == 0) {
747 udelay(5);
748 frame_val = tr32(MAC_MI_COM);
749 break;
750 }
751 loops -= 1;
752 }
753
754 ret = -EBUSY;
755 if (loops != 0) {
756 *val = frame_val & MI_COM_DATA_MASK;
757 ret = 0;
758 }
759
760 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
761 tw32_f(MAC_MI_MODE, tp->mi_mode);
762 udelay(80);
763 }
764
765 return ret;
766}
767
768static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
769{
770 u32 frame_val;
771 unsigned int loops;
772 int ret;
773
Michael Chanb5d37722006-09-27 16:06:21 -0700774 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
775 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
776 return 0;
777
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
779 tw32_f(MAC_MI_MODE,
780 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
781 udelay(80);
782 }
783
784 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
785 MI_COM_PHY_ADDR_MASK);
786 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
787 MI_COM_REG_ADDR_MASK);
788 frame_val |= (val & MI_COM_DATA_MASK);
789 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400790
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 tw32_f(MAC_MI_COM, frame_val);
792
793 loops = PHY_BUSY_LOOPS;
794 while (loops != 0) {
795 udelay(10);
796 frame_val = tr32(MAC_MI_COM);
797 if ((frame_val & MI_COM_BUSY) == 0) {
798 udelay(5);
799 frame_val = tr32(MAC_MI_COM);
800 break;
801 }
802 loops -= 1;
803 }
804
805 ret = -EBUSY;
806 if (loops != 0)
807 ret = 0;
808
809 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
810 tw32_f(MAC_MI_MODE, tp->mi_mode);
811 udelay(80);
812 }
813
814 return ret;
815}
816
Matt Carlson95e28692008-05-25 23:44:14 -0700817static int tg3_bmcr_reset(struct tg3 *tp)
818{
819 u32 phy_control;
820 int limit, err;
821
822 /* OK, reset it, and poll the BMCR_RESET bit until it
823 * clears or we time out.
824 */
825 phy_control = BMCR_RESET;
826 err = tg3_writephy(tp, MII_BMCR, phy_control);
827 if (err != 0)
828 return -EBUSY;
829
830 limit = 5000;
831 while (limit--) {
832 err = tg3_readphy(tp, MII_BMCR, &phy_control);
833 if (err != 0)
834 return -EBUSY;
835
836 if ((phy_control & BMCR_RESET) == 0) {
837 udelay(40);
838 break;
839 }
840 udelay(10);
841 }
842 if (limit <= 0)
843 return -EBUSY;
844
845 return 0;
846}
847
Matt Carlson158d7ab2008-05-29 01:37:54 -0700848static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
849{
850 struct tg3 *tp = (struct tg3 *)bp->priv;
851 u32 val;
852
853 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
854 return -EAGAIN;
855
856 if (tg3_readphy(tp, reg, &val))
857 return -EIO;
858
859 return val;
860}
861
862static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
863{
864 struct tg3 *tp = (struct tg3 *)bp->priv;
865
866 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
867 return -EAGAIN;
868
869 if (tg3_writephy(tp, reg, val))
870 return -EIO;
871
872 return 0;
873}
874
875static int tg3_mdio_reset(struct mii_bus *bp)
876{
877 return 0;
878}
879
Matt Carlson9c61d6b2008-11-03 16:54:56 -0800880static void tg3_mdio_config_5785(struct tg3 *tp)
Matt Carlsona9daf362008-05-25 23:49:44 -0700881{
882 u32 val;
Matt Carlsonfcb389d2008-11-03 16:55:44 -0800883 struct phy_device *phydev;
Matt Carlsona9daf362008-05-25 23:49:44 -0700884
Matt Carlsonfcb389d2008-11-03 16:55:44 -0800885 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
886 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
887 case TG3_PHY_ID_BCM50610:
888 val = MAC_PHYCFG2_50610_LED_MODES;
889 break;
890 case TG3_PHY_ID_BCMAC131:
891 val = MAC_PHYCFG2_AC131_LED_MODES;
892 break;
893 case TG3_PHY_ID_RTL8211C:
894 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
895 break;
896 case TG3_PHY_ID_RTL8201E:
897 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
898 break;
899 default:
Matt Carlsona9daf362008-05-25 23:49:44 -0700900 return;
Matt Carlsonfcb389d2008-11-03 16:55:44 -0800901 }
902
903 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
904 tw32(MAC_PHYCFG2, val);
905
906 val = tr32(MAC_PHYCFG1);
907 val &= ~MAC_PHYCFG1_RGMII_INT;
908 tw32(MAC_PHYCFG1, val);
909
910 return;
911 }
912
913 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
914 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
915 MAC_PHYCFG2_FMODE_MASK_MASK |
916 MAC_PHYCFG2_GMODE_MASK_MASK |
917 MAC_PHYCFG2_ACT_MASK_MASK |
918 MAC_PHYCFG2_QUAL_MASK_MASK |
919 MAC_PHYCFG2_INBAND_ENABLE;
920
921 tw32(MAC_PHYCFG2, val);
Matt Carlsona9daf362008-05-25 23:49:44 -0700922
923 val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
924 MAC_PHYCFG1_RGMII_SND_STAT_EN);
925 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
926 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
927 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
928 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
929 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
930 }
931 tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
932
Matt Carlsona9daf362008-05-25 23:49:44 -0700933 val = tr32(MAC_EXT_RGMII_MODE);
934 val &= ~(MAC_RGMII_MODE_RX_INT_B |
935 MAC_RGMII_MODE_RX_QUALITY |
936 MAC_RGMII_MODE_RX_ACTIVITY |
937 MAC_RGMII_MODE_RX_ENG_DET |
938 MAC_RGMII_MODE_TX_ENABLE |
939 MAC_RGMII_MODE_TX_LOWPWR |
940 MAC_RGMII_MODE_TX_RESET);
Matt Carlsonfcb389d2008-11-03 16:55:44 -0800941 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
Matt Carlsona9daf362008-05-25 23:49:44 -0700942 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
943 val |= MAC_RGMII_MODE_RX_INT_B |
944 MAC_RGMII_MODE_RX_QUALITY |
945 MAC_RGMII_MODE_RX_ACTIVITY |
946 MAC_RGMII_MODE_RX_ENG_DET;
947 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
948 val |= MAC_RGMII_MODE_TX_ENABLE |
949 MAC_RGMII_MODE_TX_LOWPWR |
950 MAC_RGMII_MODE_TX_RESET;
951 }
952 tw32(MAC_EXT_RGMII_MODE, val);
953}
954
Matt Carlson158d7ab2008-05-29 01:37:54 -0700955static void tg3_mdio_start(struct tg3 *tp)
956{
957 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700958 mutex_lock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700959 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700960 mutex_unlock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700961 }
962
963 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
964 tw32_f(MAC_MI_MODE, tp->mi_mode);
965 udelay(80);
Matt Carlsona9daf362008-05-25 23:49:44 -0700966
Matt Carlson9c61d6b2008-11-03 16:54:56 -0800967 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
968 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
969 tg3_mdio_config_5785(tp);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700970}
971
972static void tg3_mdio_stop(struct tg3 *tp)
973{
974 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700975 mutex_lock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700976 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700977 mutex_unlock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700978 }
979}
980
981static int tg3_mdio_init(struct tg3 *tp)
982{
983 int i;
984 u32 reg;
Matt Carlsona9daf362008-05-25 23:49:44 -0700985 struct phy_device *phydev;
Matt Carlson158d7ab2008-05-29 01:37:54 -0700986
987 tg3_mdio_start(tp);
988
989 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
990 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
991 return 0;
992
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700993 tp->mdio_bus = mdiobus_alloc();
994 if (tp->mdio_bus == NULL)
995 return -ENOMEM;
Matt Carlson158d7ab2008-05-29 01:37:54 -0700996
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700997 tp->mdio_bus->name = "tg3 mdio bus";
998 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
Matt Carlson158d7ab2008-05-29 01:37:54 -0700999 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001000 tp->mdio_bus->priv = tp;
1001 tp->mdio_bus->parent = &tp->pdev->dev;
1002 tp->mdio_bus->read = &tg3_mdio_read;
1003 tp->mdio_bus->write = &tg3_mdio_write;
1004 tp->mdio_bus->reset = &tg3_mdio_reset;
1005 tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
1006 tp->mdio_bus->irq = &tp->mdio_irq[0];
Matt Carlson158d7ab2008-05-29 01:37:54 -07001007
1008 for (i = 0; i < PHY_MAX_ADDR; i++)
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001009 tp->mdio_bus->irq[i] = PHY_POLL;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001010
1011 /* The bus registration will look for all the PHYs on the mdio bus.
1012 * Unfortunately, it does not ensure the PHY is powered up before
1013 * accessing the PHY ID registers. A chip reset is the
1014 * quickest way to bring the device back to an operational state..
1015 */
1016 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1017 tg3_bmcr_reset(tp);
1018
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001019 i = mdiobus_register(tp->mdio_bus);
Matt Carlsona9daf362008-05-25 23:49:44 -07001020 if (i) {
Matt Carlson158d7ab2008-05-29 01:37:54 -07001021 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1022 tp->dev->name, i);
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001023 mdiobus_free(tp->mdio_bus);
Matt Carlsona9daf362008-05-25 23:49:44 -07001024 return i;
1025 }
Matt Carlson158d7ab2008-05-29 01:37:54 -07001026
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001027 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsona9daf362008-05-25 23:49:44 -07001028
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001029 if (!phydev || !phydev->drv) {
1030 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1031 mdiobus_unregister(tp->mdio_bus);
1032 mdiobus_free(tp->mdio_bus);
1033 return -ENODEV;
1034 }
1035
1036 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
Matt Carlsona9daf362008-05-25 23:49:44 -07001037 case TG3_PHY_ID_BCM50610:
Matt Carlsona9daf362008-05-25 23:49:44 -07001038 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1039 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1040 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1041 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1042 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1043 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001044 /* fallthru */
1045 case TG3_PHY_ID_RTL8211C:
1046 phydev->interface = PHY_INTERFACE_MODE_RGMII;
Matt Carlsona9daf362008-05-25 23:49:44 -07001047 break;
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001048 case TG3_PHY_ID_RTL8201E:
Matt Carlsona9daf362008-05-25 23:49:44 -07001049 case TG3_PHY_ID_BCMAC131:
1050 phydev->interface = PHY_INTERFACE_MODE_MII;
1051 break;
1052 }
1053
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001054 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1055
1056 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1057 tg3_mdio_config_5785(tp);
Matt Carlsona9daf362008-05-25 23:49:44 -07001058
1059 return 0;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001060}
1061
1062static void tg3_mdio_fini(struct tg3 *tp)
1063{
1064 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1065 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001066 mdiobus_unregister(tp->mdio_bus);
1067 mdiobus_free(tp->mdio_bus);
Matt Carlson158d7ab2008-05-29 01:37:54 -07001068 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1069 }
1070}
1071
Matt Carlson95e28692008-05-25 23:44:14 -07001072/* tp->lock is held. */
Matt Carlson4ba526c2008-08-15 14:10:04 -07001073static inline void tg3_generate_fw_event(struct tg3 *tp)
1074{
1075 u32 val;
1076
1077 val = tr32(GRC_RX_CPU_EVENT);
1078 val |= GRC_RX_CPU_DRIVER_EVENT;
1079 tw32_f(GRC_RX_CPU_EVENT, val);
1080
1081 tp->last_event_jiffies = jiffies;
1082}
1083
1084#define TG3_FW_EVENT_TIMEOUT_USEC 2500
1085
1086/* tp->lock is held. */
Matt Carlson95e28692008-05-25 23:44:14 -07001087static void tg3_wait_for_event_ack(struct tg3 *tp)
1088{
1089 int i;
Matt Carlson4ba526c2008-08-15 14:10:04 -07001090 unsigned int delay_cnt;
1091 long time_remain;
Matt Carlson95e28692008-05-25 23:44:14 -07001092
Matt Carlson4ba526c2008-08-15 14:10:04 -07001093 /* If enough time has passed, no wait is necessary. */
1094 time_remain = (long)(tp->last_event_jiffies + 1 +
1095 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1096 (long)jiffies;
1097 if (time_remain < 0)
1098 return;
1099
1100 /* Check if we can shorten the wait time. */
1101 delay_cnt = jiffies_to_usecs(time_remain);
1102 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1103 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1104 delay_cnt = (delay_cnt >> 3) + 1;
1105
1106 for (i = 0; i < delay_cnt; i++) {
Matt Carlson95e28692008-05-25 23:44:14 -07001107 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1108 break;
Matt Carlson4ba526c2008-08-15 14:10:04 -07001109 udelay(8);
Matt Carlson95e28692008-05-25 23:44:14 -07001110 }
1111}
1112
1113/* tp->lock is held. */
1114static void tg3_ump_link_report(struct tg3 *tp)
1115{
1116 u32 reg;
1117 u32 val;
1118
1119 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1120 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1121 return;
1122
1123 tg3_wait_for_event_ack(tp);
1124
1125 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1126
1127 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1128
1129 val = 0;
1130 if (!tg3_readphy(tp, MII_BMCR, &reg))
1131 val = reg << 16;
1132 if (!tg3_readphy(tp, MII_BMSR, &reg))
1133 val |= (reg & 0xffff);
1134 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1135
1136 val = 0;
1137 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1138 val = reg << 16;
1139 if (!tg3_readphy(tp, MII_LPA, &reg))
1140 val |= (reg & 0xffff);
1141 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1142
1143 val = 0;
1144 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1145 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1146 val = reg << 16;
1147 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1148 val |= (reg & 0xffff);
1149 }
1150 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1151
1152 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1153 val = reg << 16;
1154 else
1155 val = 0;
1156 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1157
Matt Carlson4ba526c2008-08-15 14:10:04 -07001158 tg3_generate_fw_event(tp);
Matt Carlson95e28692008-05-25 23:44:14 -07001159}
1160
1161static void tg3_link_report(struct tg3 *tp)
1162{
1163 if (!netif_carrier_ok(tp->dev)) {
1164 if (netif_msg_link(tp))
1165 printk(KERN_INFO PFX "%s: Link is down.\n",
1166 tp->dev->name);
1167 tg3_ump_link_report(tp);
1168 } else if (netif_msg_link(tp)) {
1169 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1170 tp->dev->name,
1171 (tp->link_config.active_speed == SPEED_1000 ?
1172 1000 :
1173 (tp->link_config.active_speed == SPEED_100 ?
1174 100 : 10)),
1175 (tp->link_config.active_duplex == DUPLEX_FULL ?
1176 "full" : "half"));
1177
1178 printk(KERN_INFO PFX
1179 "%s: Flow control is %s for TX and %s for RX.\n",
1180 tp->dev->name,
1181 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1182 "on" : "off",
1183 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1184 "on" : "off");
1185 tg3_ump_link_report(tp);
1186 }
1187}
1188
1189static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1190{
1191 u16 miireg;
1192
1193 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1194 miireg = ADVERTISE_PAUSE_CAP;
1195 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1196 miireg = ADVERTISE_PAUSE_ASYM;
1197 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1198 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1199 else
1200 miireg = 0;
1201
1202 return miireg;
1203}
1204
1205static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1206{
1207 u16 miireg;
1208
1209 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1210 miireg = ADVERTISE_1000XPAUSE;
1211 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1212 miireg = ADVERTISE_1000XPSE_ASYM;
1213 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1214 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1215 else
1216 miireg = 0;
1217
1218 return miireg;
1219}
1220
1221static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1222{
1223 u8 cap = 0;
1224
1225 if (lcladv & ADVERTISE_PAUSE_CAP) {
1226 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1227 if (rmtadv & LPA_PAUSE_CAP)
1228 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1229 else if (rmtadv & LPA_PAUSE_ASYM)
1230 cap = TG3_FLOW_CTRL_RX;
1231 } else {
1232 if (rmtadv & LPA_PAUSE_CAP)
1233 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1234 }
1235 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1236 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1237 cap = TG3_FLOW_CTRL_TX;
1238 }
1239
1240 return cap;
1241}
1242
1243static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1244{
1245 u8 cap = 0;
1246
1247 if (lcladv & ADVERTISE_1000XPAUSE) {
1248 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1249 if (rmtadv & LPA_1000XPAUSE)
1250 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1251 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1252 cap = TG3_FLOW_CTRL_RX;
1253 } else {
1254 if (rmtadv & LPA_1000XPAUSE)
1255 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1256 }
1257 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1258 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1259 cap = TG3_FLOW_CTRL_TX;
1260 }
1261
1262 return cap;
1263}
1264
Matt Carlsonf51f3562008-05-25 23:45:08 -07001265static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
Matt Carlson95e28692008-05-25 23:44:14 -07001266{
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001267 u8 autoneg;
Matt Carlsonf51f3562008-05-25 23:45:08 -07001268 u8 flowctrl = 0;
Matt Carlson95e28692008-05-25 23:44:14 -07001269 u32 old_rx_mode = tp->rx_mode;
1270 u32 old_tx_mode = tp->tx_mode;
1271
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001272 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001273 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001274 else
1275 autoneg = tp->link_config.autoneg;
1276
1277 if (autoneg == AUTONEG_ENABLE &&
Matt Carlson95e28692008-05-25 23:44:14 -07001278 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1279 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Matt Carlsonf51f3562008-05-25 23:45:08 -07001280 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
Matt Carlson95e28692008-05-25 23:44:14 -07001281 else
Matt Carlsonf51f3562008-05-25 23:45:08 -07001282 flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1283 } else
1284 flowctrl = tp->link_config.flowctrl;
Matt Carlson95e28692008-05-25 23:44:14 -07001285
Matt Carlsonf51f3562008-05-25 23:45:08 -07001286 tp->link_config.active_flowctrl = flowctrl;
Matt Carlson95e28692008-05-25 23:44:14 -07001287
Matt Carlsonf51f3562008-05-25 23:45:08 -07001288 if (flowctrl & TG3_FLOW_CTRL_RX)
Matt Carlson95e28692008-05-25 23:44:14 -07001289 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1290 else
1291 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1292
Matt Carlsonf51f3562008-05-25 23:45:08 -07001293 if (old_rx_mode != tp->rx_mode)
Matt Carlson95e28692008-05-25 23:44:14 -07001294 tw32_f(MAC_RX_MODE, tp->rx_mode);
Matt Carlson95e28692008-05-25 23:44:14 -07001295
Matt Carlsonf51f3562008-05-25 23:45:08 -07001296 if (flowctrl & TG3_FLOW_CTRL_TX)
Matt Carlson95e28692008-05-25 23:44:14 -07001297 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1298 else
1299 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1300
Matt Carlsonf51f3562008-05-25 23:45:08 -07001301 if (old_tx_mode != tp->tx_mode)
Matt Carlson95e28692008-05-25 23:44:14 -07001302 tw32_f(MAC_TX_MODE, tp->tx_mode);
Matt Carlson95e28692008-05-25 23:44:14 -07001303}
1304
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001305static void tg3_adjust_link(struct net_device *dev)
1306{
1307 u8 oldflowctrl, linkmesg = 0;
1308 u32 mac_mode, lcl_adv, rmt_adv;
1309 struct tg3 *tp = netdev_priv(dev);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001310 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001311
1312 spin_lock(&tp->lock);
1313
1314 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1315 MAC_MODE_HALF_DUPLEX);
1316
1317 oldflowctrl = tp->link_config.active_flowctrl;
1318
1319 if (phydev->link) {
1320 lcl_adv = 0;
1321 rmt_adv = 0;
1322
1323 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1324 mac_mode |= MAC_MODE_PORT_MODE_MII;
1325 else
1326 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1327
1328 if (phydev->duplex == DUPLEX_HALF)
1329 mac_mode |= MAC_MODE_HALF_DUPLEX;
1330 else {
1331 lcl_adv = tg3_advert_flowctrl_1000T(
1332 tp->link_config.flowctrl);
1333
1334 if (phydev->pause)
1335 rmt_adv = LPA_PAUSE_CAP;
1336 if (phydev->asym_pause)
1337 rmt_adv |= LPA_PAUSE_ASYM;
1338 }
1339
1340 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1341 } else
1342 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1343
1344 if (mac_mode != tp->mac_mode) {
1345 tp->mac_mode = mac_mode;
1346 tw32_f(MAC_MODE, tp->mac_mode);
1347 udelay(40);
1348 }
1349
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001350 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1351 if (phydev->speed == SPEED_10)
1352 tw32(MAC_MI_STAT,
1353 MAC_MI_STAT_10MBPS_MODE |
1354 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1355 else
1356 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1357 }
1358
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001359 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1360 tw32(MAC_TX_LENGTHS,
1361 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1362 (6 << TX_LENGTHS_IPG_SHIFT) |
1363 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1364 else
1365 tw32(MAC_TX_LENGTHS,
1366 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1367 (6 << TX_LENGTHS_IPG_SHIFT) |
1368 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1369
1370 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1371 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1372 phydev->speed != tp->link_config.active_speed ||
1373 phydev->duplex != tp->link_config.active_duplex ||
1374 oldflowctrl != tp->link_config.active_flowctrl)
1375 linkmesg = 1;
1376
1377 tp->link_config.active_speed = phydev->speed;
1378 tp->link_config.active_duplex = phydev->duplex;
1379
1380 spin_unlock(&tp->lock);
1381
1382 if (linkmesg)
1383 tg3_link_report(tp);
1384}
1385
1386static int tg3_phy_init(struct tg3 *tp)
1387{
1388 struct phy_device *phydev;
1389
1390 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1391 return 0;
1392
1393 /* Bring the PHY back to a known state. */
1394 tg3_bmcr_reset(tp);
1395
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001396 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001397
1398 /* Attach the MAC to the PHY. */
Matt Carlsona9daf362008-05-25 23:49:44 -07001399 phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link,
1400 phydev->dev_flags, phydev->interface);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001401 if (IS_ERR(phydev)) {
1402 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1403 return PTR_ERR(phydev);
1404 }
1405
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001406 /* Mask with MAC supported features. */
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001407 switch (phydev->interface) {
1408 case PHY_INTERFACE_MODE_GMII:
1409 case PHY_INTERFACE_MODE_RGMII:
1410 phydev->supported &= (PHY_GBIT_FEATURES |
1411 SUPPORTED_Pause |
1412 SUPPORTED_Asym_Pause);
1413 break;
1414 case PHY_INTERFACE_MODE_MII:
1415 phydev->supported &= (PHY_BASIC_FEATURES |
1416 SUPPORTED_Pause |
1417 SUPPORTED_Asym_Pause);
1418 break;
1419 default:
1420 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1421 return -EINVAL;
1422 }
1423
1424 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001425
1426 phydev->advertising = phydev->supported;
1427
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001428 return 0;
1429}
1430
1431static void tg3_phy_start(struct tg3 *tp)
1432{
1433 struct phy_device *phydev;
1434
1435 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1436 return;
1437
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001438 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001439
1440 if (tp->link_config.phy_is_low_power) {
1441 tp->link_config.phy_is_low_power = 0;
1442 phydev->speed = tp->link_config.orig_speed;
1443 phydev->duplex = tp->link_config.orig_duplex;
1444 phydev->autoneg = tp->link_config.orig_autoneg;
1445 phydev->advertising = tp->link_config.orig_advertising;
1446 }
1447
1448 phy_start(phydev);
1449
1450 phy_start_aneg(phydev);
1451}
1452
1453static void tg3_phy_stop(struct tg3 *tp)
1454{
1455 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1456 return;
1457
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001458 phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001459}
1460
1461static void tg3_phy_fini(struct tg3 *tp)
1462{
1463 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001464 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001465 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1466 }
1467}
1468
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001469static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1470{
1471 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1472 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1473}
1474
Matt Carlson9ef8ca92007-07-11 19:48:29 -07001475static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1476{
1477 u32 phy;
1478
1479 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1480 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1481 return;
1482
1483 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1484 u32 ephy;
1485
1486 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1487 tg3_writephy(tp, MII_TG3_EPHY_TEST,
1488 ephy | MII_TG3_EPHY_SHADOW_EN);
1489 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1490 if (enable)
1491 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1492 else
1493 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1494 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1495 }
1496 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1497 }
1498 } else {
1499 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1500 MII_TG3_AUXCTL_SHDWSEL_MISC;
1501 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1502 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1503 if (enable)
1504 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1505 else
1506 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1507 phy |= MII_TG3_AUXCTL_MISC_WREN;
1508 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1509 }
1510 }
1511}
1512
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513static void tg3_phy_set_wirespeed(struct tg3 *tp)
1514{
1515 u32 val;
1516
1517 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1518 return;
1519
1520 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1521 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1522 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1523 (val | (1 << 15) | (1 << 4)));
1524}
1525
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001526static void tg3_phy_apply_otp(struct tg3 *tp)
1527{
1528 u32 otp, phy;
1529
1530 if (!tp->phy_otp)
1531 return;
1532
1533 otp = tp->phy_otp;
1534
1535 /* Enable SM_DSP clock and tx 6dB coding. */
1536 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1537 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1538 MII_TG3_AUXCTL_ACTL_TX_6DB;
1539 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1540
1541 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1542 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1543 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1544
1545 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1546 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1547 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1548
1549 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1550 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1551 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1552
1553 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1554 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1555
1556 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1557 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1558
1559 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1560 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1561 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1562
1563 /* Turn off SM_DSP clock. */
1564 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1565 MII_TG3_AUXCTL_ACTL_TX_6DB;
1566 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1567}
1568
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569static int tg3_wait_macro_done(struct tg3 *tp)
1570{
1571 int limit = 100;
1572
1573 while (limit--) {
1574 u32 tmp32;
1575
1576 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1577 if ((tmp32 & 0x1000) == 0)
1578 break;
1579 }
1580 }
1581 if (limit <= 0)
1582 return -EBUSY;
1583
1584 return 0;
1585}
1586
1587static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1588{
1589 static const u32 test_pat[4][6] = {
1590 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1591 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1592 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1593 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1594 };
1595 int chan;
1596
1597 for (chan = 0; chan < 4; chan++) {
1598 int i;
1599
1600 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1601 (chan * 0x2000) | 0x0200);
1602 tg3_writephy(tp, 0x16, 0x0002);
1603
1604 for (i = 0; i < 6; i++)
1605 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1606 test_pat[chan][i]);
1607
1608 tg3_writephy(tp, 0x16, 0x0202);
1609 if (tg3_wait_macro_done(tp)) {
1610 *resetp = 1;
1611 return -EBUSY;
1612 }
1613
1614 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1615 (chan * 0x2000) | 0x0200);
1616 tg3_writephy(tp, 0x16, 0x0082);
1617 if (tg3_wait_macro_done(tp)) {
1618 *resetp = 1;
1619 return -EBUSY;
1620 }
1621
1622 tg3_writephy(tp, 0x16, 0x0802);
1623 if (tg3_wait_macro_done(tp)) {
1624 *resetp = 1;
1625 return -EBUSY;
1626 }
1627
1628 for (i = 0; i < 6; i += 2) {
1629 u32 low, high;
1630
1631 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1632 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1633 tg3_wait_macro_done(tp)) {
1634 *resetp = 1;
1635 return -EBUSY;
1636 }
1637 low &= 0x7fff;
1638 high &= 0x000f;
1639 if (low != test_pat[chan][i] ||
1640 high != test_pat[chan][i+1]) {
1641 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1642 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1643 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1644
1645 return -EBUSY;
1646 }
1647 }
1648 }
1649
1650 return 0;
1651}
1652
1653static int tg3_phy_reset_chanpat(struct tg3 *tp)
1654{
1655 int chan;
1656
1657 for (chan = 0; chan < 4; chan++) {
1658 int i;
1659
1660 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1661 (chan * 0x2000) | 0x0200);
1662 tg3_writephy(tp, 0x16, 0x0002);
1663 for (i = 0; i < 6; i++)
1664 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1665 tg3_writephy(tp, 0x16, 0x0202);
1666 if (tg3_wait_macro_done(tp))
1667 return -EBUSY;
1668 }
1669
1670 return 0;
1671}
1672
1673static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1674{
1675 u32 reg32, phy9_orig;
1676 int retries, do_phy_reset, err;
1677
1678 retries = 10;
1679 do_phy_reset = 1;
1680 do {
1681 if (do_phy_reset) {
1682 err = tg3_bmcr_reset(tp);
1683 if (err)
1684 return err;
1685 do_phy_reset = 0;
1686 }
1687
1688 /* Disable transmitter and interrupt. */
1689 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1690 continue;
1691
1692 reg32 |= 0x3000;
1693 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1694
1695 /* Set full-duplex, 1000 mbps. */
1696 tg3_writephy(tp, MII_BMCR,
1697 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1698
1699 /* Set to master mode. */
1700 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1701 continue;
1702
1703 tg3_writephy(tp, MII_TG3_CTRL,
1704 (MII_TG3_CTRL_AS_MASTER |
1705 MII_TG3_CTRL_ENABLE_AS_MASTER));
1706
1707 /* Enable SM_DSP_CLOCK and 6dB. */
1708 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1709
1710 /* Block the PHY control access. */
1711 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1712 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1713
1714 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1715 if (!err)
1716 break;
1717 } while (--retries);
1718
1719 err = tg3_phy_reset_chanpat(tp);
1720 if (err)
1721 return err;
1722
1723 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1724 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1725
1726 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1727 tg3_writephy(tp, 0x16, 0x0000);
1728
1729 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1730 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1731 /* Set Extended packet length bit for jumbo frames */
1732 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1733 }
1734 else {
1735 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1736 }
1737
1738 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1739
1740 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1741 reg32 &= ~0x3000;
1742 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1743 } else if (!err)
1744 err = -EBUSY;
1745
1746 return err;
1747}
1748
1749/* This will reset the tigon3 PHY if there is no valid
1750 * link unless the FORCE argument is non-zero.
1751 */
1752static int tg3_phy_reset(struct tg3 *tp)
1753{
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001754 u32 cpmuctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 u32 phy_status;
1756 int err;
1757
Michael Chan60189dd2006-12-17 17:08:07 -08001758 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1759 u32 val;
1760
1761 val = tr32(GRC_MISC_CFG);
1762 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1763 udelay(40);
1764 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1766 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1767 if (err != 0)
1768 return -EBUSY;
1769
Michael Chanc8e1e822006-04-29 18:55:17 -07001770 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1771 netif_carrier_off(tp->dev);
1772 tg3_link_report(tp);
1773 }
1774
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1776 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1777 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1778 err = tg3_phy_reset_5703_4_5(tp);
1779 if (err)
1780 return err;
1781 goto out;
1782 }
1783
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001784 cpmuctrl = 0;
1785 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1786 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1787 cpmuctrl = tr32(TG3_CPMU_CTRL);
1788 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1789 tw32(TG3_CPMU_CTRL,
1790 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1791 }
1792
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 err = tg3_bmcr_reset(tp);
1794 if (err)
1795 return err;
1796
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001797 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1798 u32 phy;
1799
1800 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1801 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1802
1803 tw32(TG3_CPMU_CTRL, cpmuctrl);
1804 }
1805
Matt Carlsonbcb37f62008-11-03 16:52:09 -08001806 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1807 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
Matt Carlsonce057f02007-11-12 21:08:03 -08001808 u32 val;
1809
1810 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1811 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1812 CPMU_LSPD_1000MB_MACCLK_12_5) {
1813 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1814 udelay(40);
1815 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1816 }
Matt Carlson662f38d2007-11-12 21:16:17 -08001817
1818 /* Disable GPHY autopowerdown. */
1819 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1820 MII_TG3_MISC_SHDW_WREN |
1821 MII_TG3_MISC_SHDW_APD_SEL |
1822 MII_TG3_MISC_SHDW_APD_WKTM_84MS);
Matt Carlsonce057f02007-11-12 21:08:03 -08001823 }
1824
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001825 tg3_phy_apply_otp(tp);
1826
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827out:
1828 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1829 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1830 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1831 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1832 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1833 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1834 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1835 }
1836 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1837 tg3_writephy(tp, 0x1c, 0x8d68);
1838 tg3_writephy(tp, 0x1c, 0x8d68);
1839 }
1840 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1841 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1842 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1843 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1844 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1845 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1846 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1847 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1848 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1849 }
Michael Chanc424cb22006-04-29 18:56:34 -07001850 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1851 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1852 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
Michael Chanc1d2a192007-01-08 19:57:20 -08001853 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1854 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1855 tg3_writephy(tp, MII_TG3_TEST1,
1856 MII_TG3_TEST1_TRIM_EN | 0x4);
1857 } else
1858 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
Michael Chanc424cb22006-04-29 18:56:34 -07001859 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1860 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 /* Set Extended packet length bit (bit 14) on all chips that */
1862 /* support jumbo frames */
1863 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1864 /* Cannot do read-modify-write on 5401 */
1865 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
Michael Chan0f893dc2005-07-25 12:30:38 -07001866 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867 u32 phy_reg;
1868
1869 /* Set bit 14 with read-modify-write to preserve other bits */
1870 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1871 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1872 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1873 }
1874
1875 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1876 * jumbo frames transmission.
1877 */
Michael Chan0f893dc2005-07-25 12:30:38 -07001878 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879 u32 phy_reg;
1880
1881 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1882 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1883 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1884 }
1885
Michael Chan715116a2006-09-27 16:09:25 -07001886 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan715116a2006-09-27 16:09:25 -07001887 /* adjust output voltage */
1888 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
Michael Chan715116a2006-09-27 16:09:25 -07001889 }
1890
Matt Carlson9ef8ca92007-07-11 19:48:29 -07001891 tg3_phy_toggle_automdix(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892 tg3_phy_set_wirespeed(tp);
1893 return 0;
1894}
1895
1896static void tg3_frob_aux_power(struct tg3 *tp)
1897{
1898 struct tg3 *tp_peer = tp;
1899
Michael Chan9d26e212006-12-07 00:21:14 -08001900 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 return;
1902
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001903 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1904 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1905 struct net_device *dev_peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001907 dev_peer = pci_get_drvdata(tp->pdev_peer);
Michael Chanbc1c7562006-03-20 17:48:03 -08001908 /* remove_one() may have been run on the peer. */
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001909 if (!dev_peer)
Michael Chanbc1c7562006-03-20 17:48:03 -08001910 tp_peer = tp;
1911 else
1912 tp_peer = netdev_priv(dev_peer);
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001913 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914
1915 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
Michael Chan6921d202005-12-13 21:15:53 -08001916 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1917 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1918 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1920 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
Michael Chanb401e9e2005-12-19 16:27:04 -08001921 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1922 (GRC_LCLCTRL_GPIO_OE0 |
1923 GRC_LCLCTRL_GPIO_OE1 |
1924 GRC_LCLCTRL_GPIO_OE2 |
1925 GRC_LCLCTRL_GPIO_OUTPUT0 |
1926 GRC_LCLCTRL_GPIO_OUTPUT1),
1927 100);
Matt Carlson5f0c4a32008-06-09 15:41:12 -07001928 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1929 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1930 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1931 GRC_LCLCTRL_GPIO_OE1 |
1932 GRC_LCLCTRL_GPIO_OE2 |
1933 GRC_LCLCTRL_GPIO_OUTPUT0 |
1934 GRC_LCLCTRL_GPIO_OUTPUT1 |
1935 tp->grc_local_ctrl;
1936 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1937
1938 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1939 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1940
1941 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1942 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943 } else {
1944 u32 no_gpio2;
Michael Chandc56b7d2005-12-19 16:26:28 -08001945 u32 grc_local_ctrl = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946
1947 if (tp_peer != tp &&
1948 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1949 return;
1950
Michael Chandc56b7d2005-12-19 16:26:28 -08001951 /* Workaround to prevent overdrawing Amps. */
1952 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1953 ASIC_REV_5714) {
1954 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chanb401e9e2005-12-19 16:27:04 -08001955 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1956 grc_local_ctrl, 100);
Michael Chandc56b7d2005-12-19 16:26:28 -08001957 }
1958
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 /* On 5753 and variants, GPIO2 cannot be used. */
1960 no_gpio2 = tp->nic_sram_data_cfg &
1961 NIC_SRAM_DATA_CFG_NO_GPIO2;
1962
Michael Chandc56b7d2005-12-19 16:26:28 -08001963 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964 GRC_LCLCTRL_GPIO_OE1 |
1965 GRC_LCLCTRL_GPIO_OE2 |
1966 GRC_LCLCTRL_GPIO_OUTPUT1 |
1967 GRC_LCLCTRL_GPIO_OUTPUT2;
1968 if (no_gpio2) {
1969 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1970 GRC_LCLCTRL_GPIO_OUTPUT2);
1971 }
Michael Chanb401e9e2005-12-19 16:27:04 -08001972 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1973 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974
1975 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1976
Michael Chanb401e9e2005-12-19 16:27:04 -08001977 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1978 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979
1980 if (!no_gpio2) {
1981 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chanb401e9e2005-12-19 16:27:04 -08001982 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1983 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984 }
1985 }
1986 } else {
1987 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1988 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1989 if (tp_peer != tp &&
1990 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1991 return;
1992
Michael Chanb401e9e2005-12-19 16:27:04 -08001993 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1994 (GRC_LCLCTRL_GPIO_OE1 |
1995 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996
Michael Chanb401e9e2005-12-19 16:27:04 -08001997 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1998 GRC_LCLCTRL_GPIO_OE1, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999
Michael Chanb401e9e2005-12-19 16:27:04 -08002000 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2001 (GRC_LCLCTRL_GPIO_OE1 |
2002 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003 }
2004 }
2005}
2006
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002007static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2008{
2009 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2010 return 1;
2011 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2012 if (speed != SPEED_10)
2013 return 1;
2014 } else if (speed == SPEED_10)
2015 return 1;
2016
2017 return 0;
2018}
2019
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020static int tg3_setup_phy(struct tg3 *, int);
2021
2022#define RESET_KIND_SHUTDOWN 0
2023#define RESET_KIND_INIT 1
2024#define RESET_KIND_SUSPEND 2
2025
2026static void tg3_write_sig_post_reset(struct tg3 *, int);
2027static int tg3_halt_cpu(struct tg3 *, u32);
Michael Chan6921d202005-12-13 21:15:53 -08002028static int tg3_nvram_lock(struct tg3 *);
2029static void tg3_nvram_unlock(struct tg3 *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030
Matt Carlson0a459aa2008-11-03 16:54:15 -08002031static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
Michael Chan15c3b692006-03-22 01:06:52 -08002032{
Matt Carlsonce057f02007-11-12 21:08:03 -08002033 u32 val;
2034
Michael Chan51297242007-02-13 12:17:57 -08002035 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2036 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2037 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2038 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2039
2040 sg_dig_ctrl |=
2041 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2042 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2043 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2044 }
Michael Chan3f7045c2006-09-27 16:02:29 -07002045 return;
Michael Chan51297242007-02-13 12:17:57 -08002046 }
Michael Chan3f7045c2006-09-27 16:02:29 -07002047
Michael Chan60189dd2006-12-17 17:08:07 -08002048 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan60189dd2006-12-17 17:08:07 -08002049 tg3_bmcr_reset(tp);
2050 val = tr32(GRC_MISC_CFG);
2051 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2052 udelay(40);
2053 return;
Matt Carlson0a459aa2008-11-03 16:54:15 -08002054 } else if (do_low_power) {
Michael Chan715116a2006-09-27 16:09:25 -07002055 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2056 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
Matt Carlson0a459aa2008-11-03 16:54:15 -08002057
2058 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2059 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2060 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2061 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2062 MII_TG3_AUXCTL_PCTL_VREG_11V);
Michael Chan715116a2006-09-27 16:09:25 -07002063 }
Michael Chan3f7045c2006-09-27 16:02:29 -07002064
Michael Chan15c3b692006-03-22 01:06:52 -08002065 /* The PHY should not be powered down on some chips because
2066 * of bugs.
2067 */
2068 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2069 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2070 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2071 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2072 return;
Matt Carlsonce057f02007-11-12 21:08:03 -08002073
Matt Carlsonbcb37f62008-11-03 16:52:09 -08002074 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2075 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
Matt Carlsonce057f02007-11-12 21:08:03 -08002076 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2077 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2078 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2079 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2080 }
2081
Michael Chan15c3b692006-03-22 01:06:52 -08002082 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2083}
2084
Matt Carlson3f007892008-11-03 16:51:36 -08002085/* tp->lock is held. */
2086static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2087{
2088 u32 addr_high, addr_low;
2089 int i;
2090
2091 addr_high = ((tp->dev->dev_addr[0] << 8) |
2092 tp->dev->dev_addr[1]);
2093 addr_low = ((tp->dev->dev_addr[2] << 24) |
2094 (tp->dev->dev_addr[3] << 16) |
2095 (tp->dev->dev_addr[4] << 8) |
2096 (tp->dev->dev_addr[5] << 0));
2097 for (i = 0; i < 4; i++) {
2098 if (i == 1 && skip_mac_1)
2099 continue;
2100 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2101 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2102 }
2103
2104 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2105 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2106 for (i = 0; i < 12; i++) {
2107 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2108 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2109 }
2110 }
2111
2112 addr_high = (tp->dev->dev_addr[0] +
2113 tp->dev->dev_addr[1] +
2114 tp->dev->dev_addr[2] +
2115 tp->dev->dev_addr[3] +
2116 tp->dev->dev_addr[4] +
2117 tp->dev->dev_addr[5]) &
2118 TX_BACKOFF_SEED_MASK;
2119 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2120}
2121
Michael Chanbc1c7562006-03-20 17:48:03 -08002122static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123{
2124 u32 misc_host_ctrl;
Matt Carlson0a459aa2008-11-03 16:54:15 -08002125 bool device_should_wake, do_low_power;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126
2127 /* Make sure register accesses (indirect or otherwise)
2128 * will function correctly.
2129 */
2130 pci_write_config_dword(tp->pdev,
2131 TG3PCI_MISC_HOST_CTRL,
2132 tp->misc_host_ctrl);
2133
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 switch (state) {
Michael Chanbc1c7562006-03-20 17:48:03 -08002135 case PCI_D0:
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002136 pci_enable_wake(tp->pdev, state, false);
2137 pci_set_power_state(tp->pdev, PCI_D0);
Michael Chan8c6bda12005-04-21 17:09:08 -07002138
Michael Chan9d26e212006-12-07 00:21:14 -08002139 /* Switch out of Vaux if it is a NIC */
2140 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
Michael Chanb401e9e2005-12-19 16:27:04 -08002141 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142
2143 return 0;
2144
Michael Chanbc1c7562006-03-20 17:48:03 -08002145 case PCI_D1:
Michael Chanbc1c7562006-03-20 17:48:03 -08002146 case PCI_D2:
Michael Chanbc1c7562006-03-20 17:48:03 -08002147 case PCI_D3hot:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 break;
2149
2150 default:
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002151 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2152 tp->dev->name, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153 return -EINVAL;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002154 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2156 tw32(TG3PCI_MISC_HOST_CTRL,
2157 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2158
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002159 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2160 device_may_wakeup(&tp->pdev->dev) &&
2161 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2162
Matt Carlsondd477002008-05-25 23:45:58 -07002163 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
Matt Carlson0a459aa2008-11-03 16:54:15 -08002164 do_low_power = false;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002165 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2166 !tp->link_config.phy_is_low_power) {
2167 struct phy_device *phydev;
Matt Carlson0a459aa2008-11-03 16:54:15 -08002168 u32 phyid, advertising;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002169
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07002170 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002171
2172 tp->link_config.phy_is_low_power = 1;
2173
2174 tp->link_config.orig_speed = phydev->speed;
2175 tp->link_config.orig_duplex = phydev->duplex;
2176 tp->link_config.orig_autoneg = phydev->autoneg;
2177 tp->link_config.orig_advertising = phydev->advertising;
2178
2179 advertising = ADVERTISED_TP |
2180 ADVERTISED_Pause |
2181 ADVERTISED_Autoneg |
2182 ADVERTISED_10baseT_Half;
2183
2184 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002185 device_should_wake) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002186 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2187 advertising |=
2188 ADVERTISED_100baseT_Half |
2189 ADVERTISED_100baseT_Full |
2190 ADVERTISED_10baseT_Full;
2191 else
2192 advertising |= ADVERTISED_10baseT_Full;
2193 }
2194
2195 phydev->advertising = advertising;
2196
2197 phy_start_aneg(phydev);
Matt Carlson0a459aa2008-11-03 16:54:15 -08002198
2199 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2200 if (phyid != TG3_PHY_ID_BCMAC131) {
2201 phyid &= TG3_PHY_OUI_MASK;
2202 if (phyid == TG3_PHY_OUI_1 &&
2203 phyid == TG3_PHY_OUI_2 &&
2204 phyid == TG3_PHY_OUI_3)
2205 do_low_power = true;
2206 }
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002207 }
Matt Carlsondd477002008-05-25 23:45:58 -07002208 } else {
Matt Carlson0a459aa2008-11-03 16:54:15 -08002209 do_low_power = false;
2210
Matt Carlsondd477002008-05-25 23:45:58 -07002211 if (tp->link_config.phy_is_low_power == 0) {
2212 tp->link_config.phy_is_low_power = 1;
2213 tp->link_config.orig_speed = tp->link_config.speed;
2214 tp->link_config.orig_duplex = tp->link_config.duplex;
2215 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2216 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217
Matt Carlsondd477002008-05-25 23:45:58 -07002218 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2219 tp->link_config.speed = SPEED_10;
2220 tp->link_config.duplex = DUPLEX_HALF;
2221 tp->link_config.autoneg = AUTONEG_ENABLE;
2222 tg3_setup_phy(tp, 0);
2223 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 }
2225
Matt Carlson3f007892008-11-03 16:51:36 -08002226 __tg3_set_mac_addr(tp, 0);
2227
Michael Chanb5d37722006-09-27 16:06:21 -07002228 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2229 u32 val;
2230
2231 val = tr32(GRC_VCPU_EXT_CTRL);
2232 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2233 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chan6921d202005-12-13 21:15:53 -08002234 int i;
2235 u32 val;
2236
2237 for (i = 0; i < 200; i++) {
2238 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2239 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2240 break;
2241 msleep(1);
2242 }
2243 }
Gary Zambranoa85feb82007-05-05 11:52:19 -07002244 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2245 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2246 WOL_DRV_STATE_SHUTDOWN |
2247 WOL_DRV_WOL |
2248 WOL_SET_MAGIC_PKT);
Michael Chan6921d202005-12-13 21:15:53 -08002249
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002250 if (device_should_wake) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 u32 mac_mode;
2252
2253 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
Matt Carlson0a459aa2008-11-03 16:54:15 -08002254 if (do_low_power) {
Matt Carlsondd477002008-05-25 23:45:58 -07002255 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2256 udelay(40);
2257 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258
Michael Chan3f7045c2006-09-27 16:02:29 -07002259 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2260 mac_mode = MAC_MODE_PORT_MODE_GMII;
2261 else
2262 mac_mode = MAC_MODE_PORT_MODE_MII;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002264 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2265 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2266 ASIC_REV_5700) {
2267 u32 speed = (tp->tg3_flags &
2268 TG3_FLAG_WOL_SPEED_100MB) ?
2269 SPEED_100 : SPEED_10;
2270 if (tg3_5700_link_polarity(tp, speed))
2271 mac_mode |= MAC_MODE_LINK_POLARITY;
2272 else
2273 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2274 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275 } else {
2276 mac_mode = MAC_MODE_PORT_MODE_TBI;
2277 }
2278
John W. Linvillecbf46852005-04-21 17:01:29 -07002279 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 tw32(MAC_LED_CTRL, tp->led_ctrl);
2281
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002282 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2283 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2284 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2285 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2286 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2287 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288
Matt Carlson3bda1252008-08-15 14:08:22 -07002289 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2290 mac_mode |= tp->mac_mode &
2291 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2292 if (mac_mode & MAC_MODE_APE_TX_EN)
2293 mac_mode |= MAC_MODE_TDE_ENABLE;
2294 }
2295
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296 tw32_f(MAC_MODE, mac_mode);
2297 udelay(100);
2298
2299 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2300 udelay(10);
2301 }
2302
2303 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2304 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2305 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2306 u32 base_val;
2307
2308 base_val = tp->pci_clock_ctrl;
2309 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2310 CLOCK_CTRL_TXCLK_DISABLE);
2311
Michael Chanb401e9e2005-12-19 16:27:04 -08002312 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2313 CLOCK_CTRL_PWRDOWN_PLL133, 40);
Michael Chand7b0a852007-02-13 12:17:38 -08002314 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
Matt Carlson795d01c2007-10-07 23:28:17 -07002315 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
Michael Chand7b0a852007-02-13 12:17:38 -08002316 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
Michael Chan4cf78e42005-07-25 12:29:19 -07002317 /* do nothing */
Michael Chan85e94ce2005-04-21 17:05:28 -07002318 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2320 u32 newbits1, newbits2;
2321
2322 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2323 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2324 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2325 CLOCK_CTRL_TXCLK_DISABLE |
2326 CLOCK_CTRL_ALTCLK);
2327 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2328 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2329 newbits1 = CLOCK_CTRL_625_CORE;
2330 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2331 } else {
2332 newbits1 = CLOCK_CTRL_ALTCLK;
2333 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2334 }
2335
Michael Chanb401e9e2005-12-19 16:27:04 -08002336 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2337 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338
Michael Chanb401e9e2005-12-19 16:27:04 -08002339 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2340 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341
2342 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2343 u32 newbits3;
2344
2345 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2346 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2347 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2348 CLOCK_CTRL_TXCLK_DISABLE |
2349 CLOCK_CTRL_44MHZ_CORE);
2350 } else {
2351 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2352 }
2353
Michael Chanb401e9e2005-12-19 16:27:04 -08002354 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2355 tp->pci_clock_ctrl | newbits3, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 }
2357 }
2358
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002359 if (!(device_should_wake) &&
Matt Carlson0d3031d2007-10-10 18:02:43 -07002360 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
2361 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
Matt Carlson0a459aa2008-11-03 16:54:15 -08002362 tg3_power_down_phy(tp, do_low_power);
Michael Chan6921d202005-12-13 21:15:53 -08002363
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 tg3_frob_aux_power(tp);
2365
2366 /* Workaround for unstable PLL clock */
2367 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2368 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2369 u32 val = tr32(0x7d00);
2370
2371 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2372 tw32(0x7d00, val);
Michael Chan6921d202005-12-13 21:15:53 -08002373 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chanec41c7d2006-01-17 02:40:55 -08002374 int err;
2375
2376 err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 tg3_halt_cpu(tp, RX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -08002378 if (!err)
2379 tg3_nvram_unlock(tp);
Michael Chan6921d202005-12-13 21:15:53 -08002380 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381 }
2382
Michael Chanbbadf502006-04-06 21:46:34 -07002383 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2384
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002385 if (device_should_wake)
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002386 pci_enable_wake(tp->pdev, state, true);
2387
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388 /* Finally, set the new power state. */
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002389 pci_set_power_state(tp->pdev, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391 return 0;
2392}
2393
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2395{
2396 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2397 case MII_TG3_AUX_STAT_10HALF:
2398 *speed = SPEED_10;
2399 *duplex = DUPLEX_HALF;
2400 break;
2401
2402 case MII_TG3_AUX_STAT_10FULL:
2403 *speed = SPEED_10;
2404 *duplex = DUPLEX_FULL;
2405 break;
2406
2407 case MII_TG3_AUX_STAT_100HALF:
2408 *speed = SPEED_100;
2409 *duplex = DUPLEX_HALF;
2410 break;
2411
2412 case MII_TG3_AUX_STAT_100FULL:
2413 *speed = SPEED_100;
2414 *duplex = DUPLEX_FULL;
2415 break;
2416
2417 case MII_TG3_AUX_STAT_1000HALF:
2418 *speed = SPEED_1000;
2419 *duplex = DUPLEX_HALF;
2420 break;
2421
2422 case MII_TG3_AUX_STAT_1000FULL:
2423 *speed = SPEED_1000;
2424 *duplex = DUPLEX_FULL;
2425 break;
2426
2427 default:
Michael Chan715116a2006-09-27 16:09:25 -07002428 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2429 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2430 SPEED_10;
2431 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2432 DUPLEX_HALF;
2433 break;
2434 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435 *speed = SPEED_INVALID;
2436 *duplex = DUPLEX_INVALID;
2437 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002438 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439}
2440
2441static void tg3_phy_copper_begin(struct tg3 *tp)
2442{
2443 u32 new_adv;
2444 int i;
2445
2446 if (tp->link_config.phy_is_low_power) {
2447 /* Entering low power mode. Disable gigabit and
2448 * 100baseT advertisements.
2449 */
2450 tg3_writephy(tp, MII_TG3_CTRL, 0);
2451
2452 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2453 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2454 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2455 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2456
2457 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2458 } else if (tp->link_config.speed == SPEED_INVALID) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2460 tp->link_config.advertising &=
2461 ~(ADVERTISED_1000baseT_Half |
2462 ADVERTISED_1000baseT_Full);
2463
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002464 new_adv = ADVERTISE_CSMA;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2466 new_adv |= ADVERTISE_10HALF;
2467 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2468 new_adv |= ADVERTISE_10FULL;
2469 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2470 new_adv |= ADVERTISE_100HALF;
2471 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2472 new_adv |= ADVERTISE_100FULL;
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002473
2474 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2475
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2477
2478 if (tp->link_config.advertising &
2479 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2480 new_adv = 0;
2481 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2482 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2483 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2484 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2485 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2486 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2487 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2488 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2489 MII_TG3_CTRL_ENABLE_AS_MASTER);
2490 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2491 } else {
2492 tg3_writephy(tp, MII_TG3_CTRL, 0);
2493 }
2494 } else {
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002495 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2496 new_adv |= ADVERTISE_CSMA;
2497
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 /* Asking for a specific link mode. */
2499 if (tp->link_config.speed == SPEED_1000) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2501
2502 if (tp->link_config.duplex == DUPLEX_FULL)
2503 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2504 else
2505 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2506 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2507 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2508 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2509 MII_TG3_CTRL_ENABLE_AS_MASTER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 if (tp->link_config.speed == SPEED_100) {
2512 if (tp->link_config.duplex == DUPLEX_FULL)
2513 new_adv |= ADVERTISE_100FULL;
2514 else
2515 new_adv |= ADVERTISE_100HALF;
2516 } else {
2517 if (tp->link_config.duplex == DUPLEX_FULL)
2518 new_adv |= ADVERTISE_10FULL;
2519 else
2520 new_adv |= ADVERTISE_10HALF;
2521 }
2522 tg3_writephy(tp, MII_ADVERTISE, new_adv);
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002523
2524 new_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525 }
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002526
2527 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 }
2529
2530 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2531 tp->link_config.speed != SPEED_INVALID) {
2532 u32 bmcr, orig_bmcr;
2533
2534 tp->link_config.active_speed = tp->link_config.speed;
2535 tp->link_config.active_duplex = tp->link_config.duplex;
2536
2537 bmcr = 0;
2538 switch (tp->link_config.speed) {
2539 default:
2540 case SPEED_10:
2541 break;
2542
2543 case SPEED_100:
2544 bmcr |= BMCR_SPEED100;
2545 break;
2546
2547 case SPEED_1000:
2548 bmcr |= TG3_BMCR_SPEED1000;
2549 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002550 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551
2552 if (tp->link_config.duplex == DUPLEX_FULL)
2553 bmcr |= BMCR_FULLDPLX;
2554
2555 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2556 (bmcr != orig_bmcr)) {
2557 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2558 for (i = 0; i < 1500; i++) {
2559 u32 tmp;
2560
2561 udelay(10);
2562 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2563 tg3_readphy(tp, MII_BMSR, &tmp))
2564 continue;
2565 if (!(tmp & BMSR_LSTATUS)) {
2566 udelay(40);
2567 break;
2568 }
2569 }
2570 tg3_writephy(tp, MII_BMCR, bmcr);
2571 udelay(40);
2572 }
2573 } else {
2574 tg3_writephy(tp, MII_BMCR,
2575 BMCR_ANENABLE | BMCR_ANRESTART);
2576 }
2577}
2578
2579static int tg3_init_5401phy_dsp(struct tg3 *tp)
2580{
2581 int err;
2582
2583 /* Turn off tap power management. */
2584 /* Set Extended packet length bit */
2585 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2586
2587 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2588 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2589
2590 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2591 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2592
2593 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2594 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2595
2596 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2597 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2598
2599 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2600 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2601
2602 udelay(40);
2603
2604 return err;
2605}
2606
Michael Chan3600d912006-12-07 00:21:48 -08002607static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608{
Michael Chan3600d912006-12-07 00:21:48 -08002609 u32 adv_reg, all_mask = 0;
2610
2611 if (mask & ADVERTISED_10baseT_Half)
2612 all_mask |= ADVERTISE_10HALF;
2613 if (mask & ADVERTISED_10baseT_Full)
2614 all_mask |= ADVERTISE_10FULL;
2615 if (mask & ADVERTISED_100baseT_Half)
2616 all_mask |= ADVERTISE_100HALF;
2617 if (mask & ADVERTISED_100baseT_Full)
2618 all_mask |= ADVERTISE_100FULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619
2620 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2621 return 0;
2622
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623 if ((adv_reg & all_mask) != all_mask)
2624 return 0;
2625 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2626 u32 tg3_ctrl;
2627
Michael Chan3600d912006-12-07 00:21:48 -08002628 all_mask = 0;
2629 if (mask & ADVERTISED_1000baseT_Half)
2630 all_mask |= ADVERTISE_1000HALF;
2631 if (mask & ADVERTISED_1000baseT_Full)
2632 all_mask |= ADVERTISE_1000FULL;
2633
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2635 return 0;
2636
Linus Torvalds1da177e2005-04-16 15:20:36 -07002637 if ((tg3_ctrl & all_mask) != all_mask)
2638 return 0;
2639 }
2640 return 1;
2641}
2642
Matt Carlsonef167e22007-12-20 20:10:01 -08002643static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2644{
2645 u32 curadv, reqadv;
2646
2647 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2648 return 1;
2649
2650 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2651 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2652
2653 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2654 if (curadv != reqadv)
2655 return 0;
2656
2657 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2658 tg3_readphy(tp, MII_LPA, rmtadv);
2659 } else {
2660 /* Reprogram the advertisement register, even if it
2661 * does not affect the current link. If the link
2662 * gets renegotiated in the future, we can save an
2663 * additional renegotiation cycle by advertising
2664 * it correctly in the first place.
2665 */
2666 if (curadv != reqadv) {
2667 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2668 ADVERTISE_PAUSE_ASYM);
2669 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2670 }
2671 }
2672
2673 return 1;
2674}
2675
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2677{
2678 int current_link_up;
2679 u32 bmsr, dummy;
Matt Carlsonef167e22007-12-20 20:10:01 -08002680 u32 lcl_adv, rmt_adv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681 u16 current_speed;
2682 u8 current_duplex;
2683 int i, err;
2684
2685 tw32(MAC_EVENT, 0);
2686
2687 tw32_f(MAC_STATUS,
2688 (MAC_STATUS_SYNC_CHANGED |
2689 MAC_STATUS_CFG_CHANGED |
2690 MAC_STATUS_MI_COMPLETION |
2691 MAC_STATUS_LNKSTATE_CHANGED));
2692 udelay(40);
2693
Matt Carlson8ef21422008-05-02 16:47:53 -07002694 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2695 tw32_f(MAC_MI_MODE,
2696 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2697 udelay(80);
2698 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699
2700 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2701
2702 /* Some third-party PHYs need to be reset on link going
2703 * down.
2704 */
2705 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2706 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2707 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2708 netif_carrier_ok(tp->dev)) {
2709 tg3_readphy(tp, MII_BMSR, &bmsr);
2710 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2711 !(bmsr & BMSR_LSTATUS))
2712 force_reset = 1;
2713 }
2714 if (force_reset)
2715 tg3_phy_reset(tp);
2716
2717 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2718 tg3_readphy(tp, MII_BMSR, &bmsr);
2719 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2720 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2721 bmsr = 0;
2722
2723 if (!(bmsr & BMSR_LSTATUS)) {
2724 err = tg3_init_5401phy_dsp(tp);
2725 if (err)
2726 return err;
2727
2728 tg3_readphy(tp, MII_BMSR, &bmsr);
2729 for (i = 0; i < 1000; i++) {
2730 udelay(10);
2731 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2732 (bmsr & BMSR_LSTATUS)) {
2733 udelay(40);
2734 break;
2735 }
2736 }
2737
2738 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2739 !(bmsr & BMSR_LSTATUS) &&
2740 tp->link_config.active_speed == SPEED_1000) {
2741 err = tg3_phy_reset(tp);
2742 if (!err)
2743 err = tg3_init_5401phy_dsp(tp);
2744 if (err)
2745 return err;
2746 }
2747 }
2748 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2749 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2750 /* 5701 {A0,B0} CRC bug workaround */
2751 tg3_writephy(tp, 0x15, 0x0a75);
2752 tg3_writephy(tp, 0x1c, 0x8c68);
2753 tg3_writephy(tp, 0x1c, 0x8d68);
2754 tg3_writephy(tp, 0x1c, 0x8c68);
2755 }
2756
2757 /* Clear pending interrupts... */
2758 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2759 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2760
2761 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2762 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
Michael Chan715116a2006-09-27 16:09:25 -07002763 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2765
2766 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2767 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2768 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2769 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2770 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2771 else
2772 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2773 }
2774
2775 current_link_up = 0;
2776 current_speed = SPEED_INVALID;
2777 current_duplex = DUPLEX_INVALID;
2778
2779 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2780 u32 val;
2781
2782 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2783 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2784 if (!(val & (1 << 10))) {
2785 val |= (1 << 10);
2786 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2787 goto relink;
2788 }
2789 }
2790
2791 bmsr = 0;
2792 for (i = 0; i < 100; i++) {
2793 tg3_readphy(tp, MII_BMSR, &bmsr);
2794 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2795 (bmsr & BMSR_LSTATUS))
2796 break;
2797 udelay(40);
2798 }
2799
2800 if (bmsr & BMSR_LSTATUS) {
2801 u32 aux_stat, bmcr;
2802
2803 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2804 for (i = 0; i < 2000; i++) {
2805 udelay(10);
2806 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2807 aux_stat)
2808 break;
2809 }
2810
2811 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2812 &current_speed,
2813 &current_duplex);
2814
2815 bmcr = 0;
2816 for (i = 0; i < 200; i++) {
2817 tg3_readphy(tp, MII_BMCR, &bmcr);
2818 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2819 continue;
2820 if (bmcr && bmcr != 0x7fff)
2821 break;
2822 udelay(10);
2823 }
2824
Matt Carlsonef167e22007-12-20 20:10:01 -08002825 lcl_adv = 0;
2826 rmt_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827
Matt Carlsonef167e22007-12-20 20:10:01 -08002828 tp->link_config.active_speed = current_speed;
2829 tp->link_config.active_duplex = current_duplex;
2830
2831 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2832 if ((bmcr & BMCR_ANENABLE) &&
2833 tg3_copper_is_advertising_all(tp,
2834 tp->link_config.advertising)) {
2835 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2836 &rmt_adv))
2837 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838 }
2839 } else {
2840 if (!(bmcr & BMCR_ANENABLE) &&
2841 tp->link_config.speed == current_speed &&
Matt Carlsonef167e22007-12-20 20:10:01 -08002842 tp->link_config.duplex == current_duplex &&
2843 tp->link_config.flowctrl ==
2844 tp->link_config.active_flowctrl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846 }
2847 }
2848
Matt Carlsonef167e22007-12-20 20:10:01 -08002849 if (current_link_up == 1 &&
2850 tp->link_config.active_duplex == DUPLEX_FULL)
2851 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852 }
2853
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854relink:
Michael Chan6921d202005-12-13 21:15:53 -08002855 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856 u32 tmp;
2857
2858 tg3_phy_copper_begin(tp);
2859
2860 tg3_readphy(tp, MII_BMSR, &tmp);
2861 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2862 (tmp & BMSR_LSTATUS))
2863 current_link_up = 1;
2864 }
2865
2866 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2867 if (current_link_up == 1) {
2868 if (tp->link_config.active_speed == SPEED_100 ||
2869 tp->link_config.active_speed == SPEED_10)
2870 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2871 else
2872 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2873 } else
2874 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2875
2876 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2877 if (tp->link_config.active_duplex == DUPLEX_HALF)
2878 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2879
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002881 if (current_link_up == 1 &&
2882 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002884 else
2885 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886 }
2887
2888 /* ??? Without this setting Netgear GA302T PHY does not
2889 * ??? send/receive packets...
2890 */
2891 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2892 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2893 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2894 tw32_f(MAC_MI_MODE, tp->mi_mode);
2895 udelay(80);
2896 }
2897
2898 tw32_f(MAC_MODE, tp->mac_mode);
2899 udelay(40);
2900
2901 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2902 /* Polled via timer. */
2903 tw32_f(MAC_EVENT, 0);
2904 } else {
2905 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2906 }
2907 udelay(40);
2908
2909 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2910 current_link_up == 1 &&
2911 tp->link_config.active_speed == SPEED_1000 &&
2912 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2913 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2914 udelay(120);
2915 tw32_f(MAC_STATUS,
2916 (MAC_STATUS_SYNC_CHANGED |
2917 MAC_STATUS_CFG_CHANGED));
2918 udelay(40);
2919 tg3_write_mem(tp,
2920 NIC_SRAM_FIRMWARE_MBOX,
2921 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2922 }
2923
2924 if (current_link_up != netif_carrier_ok(tp->dev)) {
2925 if (current_link_up)
2926 netif_carrier_on(tp->dev);
2927 else
2928 netif_carrier_off(tp->dev);
2929 tg3_link_report(tp);
2930 }
2931
2932 return 0;
2933}
2934
2935struct tg3_fiber_aneginfo {
2936 int state;
2937#define ANEG_STATE_UNKNOWN 0
2938#define ANEG_STATE_AN_ENABLE 1
2939#define ANEG_STATE_RESTART_INIT 2
2940#define ANEG_STATE_RESTART 3
2941#define ANEG_STATE_DISABLE_LINK_OK 4
2942#define ANEG_STATE_ABILITY_DETECT_INIT 5
2943#define ANEG_STATE_ABILITY_DETECT 6
2944#define ANEG_STATE_ACK_DETECT_INIT 7
2945#define ANEG_STATE_ACK_DETECT 8
2946#define ANEG_STATE_COMPLETE_ACK_INIT 9
2947#define ANEG_STATE_COMPLETE_ACK 10
2948#define ANEG_STATE_IDLE_DETECT_INIT 11
2949#define ANEG_STATE_IDLE_DETECT 12
2950#define ANEG_STATE_LINK_OK 13
2951#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2952#define ANEG_STATE_NEXT_PAGE_WAIT 15
2953
2954 u32 flags;
2955#define MR_AN_ENABLE 0x00000001
2956#define MR_RESTART_AN 0x00000002
2957#define MR_AN_COMPLETE 0x00000004
2958#define MR_PAGE_RX 0x00000008
2959#define MR_NP_LOADED 0x00000010
2960#define MR_TOGGLE_TX 0x00000020
2961#define MR_LP_ADV_FULL_DUPLEX 0x00000040
2962#define MR_LP_ADV_HALF_DUPLEX 0x00000080
2963#define MR_LP_ADV_SYM_PAUSE 0x00000100
2964#define MR_LP_ADV_ASYM_PAUSE 0x00000200
2965#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2966#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2967#define MR_LP_ADV_NEXT_PAGE 0x00001000
2968#define MR_TOGGLE_RX 0x00002000
2969#define MR_NP_RX 0x00004000
2970
2971#define MR_LINK_OK 0x80000000
2972
2973 unsigned long link_time, cur_time;
2974
2975 u32 ability_match_cfg;
2976 int ability_match_count;
2977
2978 char ability_match, idle_match, ack_match;
2979
2980 u32 txconfig, rxconfig;
2981#define ANEG_CFG_NP 0x00000080
2982#define ANEG_CFG_ACK 0x00000040
2983#define ANEG_CFG_RF2 0x00000020
2984#define ANEG_CFG_RF1 0x00000010
2985#define ANEG_CFG_PS2 0x00000001
2986#define ANEG_CFG_PS1 0x00008000
2987#define ANEG_CFG_HD 0x00004000
2988#define ANEG_CFG_FD 0x00002000
2989#define ANEG_CFG_INVAL 0x00001f06
2990
2991};
2992#define ANEG_OK 0
2993#define ANEG_DONE 1
2994#define ANEG_TIMER_ENAB 2
2995#define ANEG_FAILED -1
2996
2997#define ANEG_STATE_SETTLE_TIME 10000
2998
2999static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3000 struct tg3_fiber_aneginfo *ap)
3001{
Matt Carlson5be73b42007-12-20 20:09:29 -08003002 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003003 unsigned long delta;
3004 u32 rx_cfg_reg;
3005 int ret;
3006
3007 if (ap->state == ANEG_STATE_UNKNOWN) {
3008 ap->rxconfig = 0;
3009 ap->link_time = 0;
3010 ap->cur_time = 0;
3011 ap->ability_match_cfg = 0;
3012 ap->ability_match_count = 0;
3013 ap->ability_match = 0;
3014 ap->idle_match = 0;
3015 ap->ack_match = 0;
3016 }
3017 ap->cur_time++;
3018
3019 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3020 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3021
3022 if (rx_cfg_reg != ap->ability_match_cfg) {
3023 ap->ability_match_cfg = rx_cfg_reg;
3024 ap->ability_match = 0;
3025 ap->ability_match_count = 0;
3026 } else {
3027 if (++ap->ability_match_count > 1) {
3028 ap->ability_match = 1;
3029 ap->ability_match_cfg = rx_cfg_reg;
3030 }
3031 }
3032 if (rx_cfg_reg & ANEG_CFG_ACK)
3033 ap->ack_match = 1;
3034 else
3035 ap->ack_match = 0;
3036
3037 ap->idle_match = 0;
3038 } else {
3039 ap->idle_match = 1;
3040 ap->ability_match_cfg = 0;
3041 ap->ability_match_count = 0;
3042 ap->ability_match = 0;
3043 ap->ack_match = 0;
3044
3045 rx_cfg_reg = 0;
3046 }
3047
3048 ap->rxconfig = rx_cfg_reg;
3049 ret = ANEG_OK;
3050
3051 switch(ap->state) {
3052 case ANEG_STATE_UNKNOWN:
3053 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3054 ap->state = ANEG_STATE_AN_ENABLE;
3055
3056 /* fallthru */
3057 case ANEG_STATE_AN_ENABLE:
3058 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3059 if (ap->flags & MR_AN_ENABLE) {
3060 ap->link_time = 0;
3061 ap->cur_time = 0;
3062 ap->ability_match_cfg = 0;
3063 ap->ability_match_count = 0;
3064 ap->ability_match = 0;
3065 ap->idle_match = 0;
3066 ap->ack_match = 0;
3067
3068 ap->state = ANEG_STATE_RESTART_INIT;
3069 } else {
3070 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3071 }
3072 break;
3073
3074 case ANEG_STATE_RESTART_INIT:
3075 ap->link_time = ap->cur_time;
3076 ap->flags &= ~(MR_NP_LOADED);
3077 ap->txconfig = 0;
3078 tw32(MAC_TX_AUTO_NEG, 0);
3079 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3080 tw32_f(MAC_MODE, tp->mac_mode);
3081 udelay(40);
3082
3083 ret = ANEG_TIMER_ENAB;
3084 ap->state = ANEG_STATE_RESTART;
3085
3086 /* fallthru */
3087 case ANEG_STATE_RESTART:
3088 delta = ap->cur_time - ap->link_time;
3089 if (delta > ANEG_STATE_SETTLE_TIME) {
3090 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3091 } else {
3092 ret = ANEG_TIMER_ENAB;
3093 }
3094 break;
3095
3096 case ANEG_STATE_DISABLE_LINK_OK:
3097 ret = ANEG_DONE;
3098 break;
3099
3100 case ANEG_STATE_ABILITY_DETECT_INIT:
3101 ap->flags &= ~(MR_TOGGLE_TX);
Matt Carlson5be73b42007-12-20 20:09:29 -08003102 ap->txconfig = ANEG_CFG_FD;
3103 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3104 if (flowctrl & ADVERTISE_1000XPAUSE)
3105 ap->txconfig |= ANEG_CFG_PS1;
3106 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3107 ap->txconfig |= ANEG_CFG_PS2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003108 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3109 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3110 tw32_f(MAC_MODE, tp->mac_mode);
3111 udelay(40);
3112
3113 ap->state = ANEG_STATE_ABILITY_DETECT;
3114 break;
3115
3116 case ANEG_STATE_ABILITY_DETECT:
3117 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3118 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3119 }
3120 break;
3121
3122 case ANEG_STATE_ACK_DETECT_INIT:
3123 ap->txconfig |= ANEG_CFG_ACK;
3124 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3125 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3126 tw32_f(MAC_MODE, tp->mac_mode);
3127 udelay(40);
3128
3129 ap->state = ANEG_STATE_ACK_DETECT;
3130
3131 /* fallthru */
3132 case ANEG_STATE_ACK_DETECT:
3133 if (ap->ack_match != 0) {
3134 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3135 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3136 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3137 } else {
3138 ap->state = ANEG_STATE_AN_ENABLE;
3139 }
3140 } else if (ap->ability_match != 0 &&
3141 ap->rxconfig == 0) {
3142 ap->state = ANEG_STATE_AN_ENABLE;
3143 }
3144 break;
3145
3146 case ANEG_STATE_COMPLETE_ACK_INIT:
3147 if (ap->rxconfig & ANEG_CFG_INVAL) {
3148 ret = ANEG_FAILED;
3149 break;
3150 }
3151 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3152 MR_LP_ADV_HALF_DUPLEX |
3153 MR_LP_ADV_SYM_PAUSE |
3154 MR_LP_ADV_ASYM_PAUSE |
3155 MR_LP_ADV_REMOTE_FAULT1 |
3156 MR_LP_ADV_REMOTE_FAULT2 |
3157 MR_LP_ADV_NEXT_PAGE |
3158 MR_TOGGLE_RX |
3159 MR_NP_RX);
3160 if (ap->rxconfig & ANEG_CFG_FD)
3161 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3162 if (ap->rxconfig & ANEG_CFG_HD)
3163 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3164 if (ap->rxconfig & ANEG_CFG_PS1)
3165 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3166 if (ap->rxconfig & ANEG_CFG_PS2)
3167 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3168 if (ap->rxconfig & ANEG_CFG_RF1)
3169 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3170 if (ap->rxconfig & ANEG_CFG_RF2)
3171 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3172 if (ap->rxconfig & ANEG_CFG_NP)
3173 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3174
3175 ap->link_time = ap->cur_time;
3176
3177 ap->flags ^= (MR_TOGGLE_TX);
3178 if (ap->rxconfig & 0x0008)
3179 ap->flags |= MR_TOGGLE_RX;
3180 if (ap->rxconfig & ANEG_CFG_NP)
3181 ap->flags |= MR_NP_RX;
3182 ap->flags |= MR_PAGE_RX;
3183
3184 ap->state = ANEG_STATE_COMPLETE_ACK;
3185 ret = ANEG_TIMER_ENAB;
3186 break;
3187
3188 case ANEG_STATE_COMPLETE_ACK:
3189 if (ap->ability_match != 0 &&
3190 ap->rxconfig == 0) {
3191 ap->state = ANEG_STATE_AN_ENABLE;
3192 break;
3193 }
3194 delta = ap->cur_time - ap->link_time;
3195 if (delta > ANEG_STATE_SETTLE_TIME) {
3196 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3197 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3198 } else {
3199 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3200 !(ap->flags & MR_NP_RX)) {
3201 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3202 } else {
3203 ret = ANEG_FAILED;
3204 }
3205 }
3206 }
3207 break;
3208
3209 case ANEG_STATE_IDLE_DETECT_INIT:
3210 ap->link_time = ap->cur_time;
3211 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3212 tw32_f(MAC_MODE, tp->mac_mode);
3213 udelay(40);
3214
3215 ap->state = ANEG_STATE_IDLE_DETECT;
3216 ret = ANEG_TIMER_ENAB;
3217 break;
3218
3219 case ANEG_STATE_IDLE_DETECT:
3220 if (ap->ability_match != 0 &&
3221 ap->rxconfig == 0) {
3222 ap->state = ANEG_STATE_AN_ENABLE;
3223 break;
3224 }
3225 delta = ap->cur_time - ap->link_time;
3226 if (delta > ANEG_STATE_SETTLE_TIME) {
3227 /* XXX another gem from the Broadcom driver :( */
3228 ap->state = ANEG_STATE_LINK_OK;
3229 }
3230 break;
3231
3232 case ANEG_STATE_LINK_OK:
3233 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3234 ret = ANEG_DONE;
3235 break;
3236
3237 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3238 /* ??? unimplemented */
3239 break;
3240
3241 case ANEG_STATE_NEXT_PAGE_WAIT:
3242 /* ??? unimplemented */
3243 break;
3244
3245 default:
3246 ret = ANEG_FAILED;
3247 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07003248 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003249
3250 return ret;
3251}
3252
Matt Carlson5be73b42007-12-20 20:09:29 -08003253static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003254{
3255 int res = 0;
3256 struct tg3_fiber_aneginfo aninfo;
3257 int status = ANEG_FAILED;
3258 unsigned int tick;
3259 u32 tmp;
3260
3261 tw32_f(MAC_TX_AUTO_NEG, 0);
3262
3263 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3264 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3265 udelay(40);
3266
3267 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3268 udelay(40);
3269
3270 memset(&aninfo, 0, sizeof(aninfo));
3271 aninfo.flags |= MR_AN_ENABLE;
3272 aninfo.state = ANEG_STATE_UNKNOWN;
3273 aninfo.cur_time = 0;
3274 tick = 0;
3275 while (++tick < 195000) {
3276 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3277 if (status == ANEG_DONE || status == ANEG_FAILED)
3278 break;
3279
3280 udelay(1);
3281 }
3282
3283 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3284 tw32_f(MAC_MODE, tp->mac_mode);
3285 udelay(40);
3286
Matt Carlson5be73b42007-12-20 20:09:29 -08003287 *txflags = aninfo.txconfig;
3288 *rxflags = aninfo.flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289
3290 if (status == ANEG_DONE &&
3291 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3292 MR_LP_ADV_FULL_DUPLEX)))
3293 res = 1;
3294
3295 return res;
3296}
3297
3298static void tg3_init_bcm8002(struct tg3 *tp)
3299{
3300 u32 mac_status = tr32(MAC_STATUS);
3301 int i;
3302
3303 /* Reset when initting first time or we have a link. */
3304 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3305 !(mac_status & MAC_STATUS_PCS_SYNCED))
3306 return;
3307
3308 /* Set PLL lock range. */
3309 tg3_writephy(tp, 0x16, 0x8007);
3310
3311 /* SW reset */
3312 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3313
3314 /* Wait for reset to complete. */
3315 /* XXX schedule_timeout() ... */
3316 for (i = 0; i < 500; i++)
3317 udelay(10);
3318
3319 /* Config mode; select PMA/Ch 1 regs. */
3320 tg3_writephy(tp, 0x10, 0x8411);
3321
3322 /* Enable auto-lock and comdet, select txclk for tx. */
3323 tg3_writephy(tp, 0x11, 0x0a10);
3324
3325 tg3_writephy(tp, 0x18, 0x00a0);
3326 tg3_writephy(tp, 0x16, 0x41ff);
3327
3328 /* Assert and deassert POR. */
3329 tg3_writephy(tp, 0x13, 0x0400);
3330 udelay(40);
3331 tg3_writephy(tp, 0x13, 0x0000);
3332
3333 tg3_writephy(tp, 0x11, 0x0a50);
3334 udelay(40);
3335 tg3_writephy(tp, 0x11, 0x0a10);
3336
3337 /* Wait for signal to stabilize */
3338 /* XXX schedule_timeout() ... */
3339 for (i = 0; i < 15000; i++)
3340 udelay(10);
3341
3342 /* Deselect the channel register so we can read the PHYID
3343 * later.
3344 */
3345 tg3_writephy(tp, 0x10, 0x8011);
3346}
3347
3348static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3349{
Matt Carlson82cd3d12007-12-20 20:09:00 -08003350 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351 u32 sg_dig_ctrl, sg_dig_status;
3352 u32 serdes_cfg, expected_sg_dig_ctrl;
3353 int workaround, port_a;
3354 int current_link_up;
3355
3356 serdes_cfg = 0;
3357 expected_sg_dig_ctrl = 0;
3358 workaround = 0;
3359 port_a = 1;
3360 current_link_up = 0;
3361
3362 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3363 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3364 workaround = 1;
3365 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3366 port_a = 0;
3367
3368 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3369 /* preserve bits 20-23 for voltage regulator */
3370 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3371 }
3372
3373 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3374
3375 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003376 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003377 if (workaround) {
3378 u32 val = serdes_cfg;
3379
3380 if (port_a)
3381 val |= 0xc010000;
3382 else
3383 val |= 0x4010000;
3384 tw32_f(MAC_SERDES_CFG, val);
3385 }
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003386
3387 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003388 }
3389 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3390 tg3_setup_flow_control(tp, 0, 0);
3391 current_link_up = 1;
3392 }
3393 goto out;
3394 }
3395
3396 /* Want auto-negotiation. */
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003397 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398
Matt Carlson82cd3d12007-12-20 20:09:00 -08003399 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3400 if (flowctrl & ADVERTISE_1000XPAUSE)
3401 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3402 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3403 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003404
3405 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003406 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3407 tp->serdes_counter &&
3408 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3409 MAC_STATUS_RCVD_CFG)) ==
3410 MAC_STATUS_PCS_SYNCED)) {
3411 tp->serdes_counter--;
3412 current_link_up = 1;
3413 goto out;
3414 }
3415restart_autoneg:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003416 if (workaround)
3417 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003418 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419 udelay(5);
3420 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3421
Michael Chan3d3ebe72006-09-27 15:59:15 -07003422 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3423 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3425 MAC_STATUS_SIGNAL_DET)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003426 sg_dig_status = tr32(SG_DIG_STATUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427 mac_status = tr32(MAC_STATUS);
3428
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003429 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003430 (mac_status & MAC_STATUS_PCS_SYNCED)) {
Matt Carlson82cd3d12007-12-20 20:09:00 -08003431 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003432
Matt Carlson82cd3d12007-12-20 20:09:00 -08003433 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3434 local_adv |= ADVERTISE_1000XPAUSE;
3435 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3436 local_adv |= ADVERTISE_1000XPSE_ASYM;
3437
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003438 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08003439 remote_adv |= LPA_1000XPAUSE;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003440 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08003441 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442
3443 tg3_setup_flow_control(tp, local_adv, remote_adv);
3444 current_link_up = 1;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003445 tp->serdes_counter = 0;
3446 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003447 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003448 if (tp->serdes_counter)
3449 tp->serdes_counter--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450 else {
3451 if (workaround) {
3452 u32 val = serdes_cfg;
3453
3454 if (port_a)
3455 val |= 0xc010000;
3456 else
3457 val |= 0x4010000;
3458
3459 tw32_f(MAC_SERDES_CFG, val);
3460 }
3461
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003462 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463 udelay(40);
3464
3465 /* Link parallel detection - link is up */
3466 /* only if we have PCS_SYNC and not */
3467 /* receiving config code words */
3468 mac_status = tr32(MAC_STATUS);
3469 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3470 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3471 tg3_setup_flow_control(tp, 0, 0);
3472 current_link_up = 1;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003473 tp->tg3_flags2 |=
3474 TG3_FLG2_PARALLEL_DETECT;
3475 tp->serdes_counter =
3476 SERDES_PARALLEL_DET_TIMEOUT;
3477 } else
3478 goto restart_autoneg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003479 }
3480 }
Michael Chan3d3ebe72006-09-27 15:59:15 -07003481 } else {
3482 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3483 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484 }
3485
3486out:
3487 return current_link_up;
3488}
3489
3490static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3491{
3492 int current_link_up = 0;
3493
Michael Chan5cf64b82007-05-05 12:11:21 -07003494 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496
3497 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
Matt Carlson5be73b42007-12-20 20:09:29 -08003498 u32 txflags, rxflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003499 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003500
Matt Carlson5be73b42007-12-20 20:09:29 -08003501 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3502 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003503
Matt Carlson5be73b42007-12-20 20:09:29 -08003504 if (txflags & ANEG_CFG_PS1)
3505 local_adv |= ADVERTISE_1000XPAUSE;
3506 if (txflags & ANEG_CFG_PS2)
3507 local_adv |= ADVERTISE_1000XPSE_ASYM;
3508
3509 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3510 remote_adv |= LPA_1000XPAUSE;
3511 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3512 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003513
3514 tg3_setup_flow_control(tp, local_adv, remote_adv);
3515
Linus Torvalds1da177e2005-04-16 15:20:36 -07003516 current_link_up = 1;
3517 }
3518 for (i = 0; i < 30; i++) {
3519 udelay(20);
3520 tw32_f(MAC_STATUS,
3521 (MAC_STATUS_SYNC_CHANGED |
3522 MAC_STATUS_CFG_CHANGED));
3523 udelay(40);
3524 if ((tr32(MAC_STATUS) &
3525 (MAC_STATUS_SYNC_CHANGED |
3526 MAC_STATUS_CFG_CHANGED)) == 0)
3527 break;
3528 }
3529
3530 mac_status = tr32(MAC_STATUS);
3531 if (current_link_up == 0 &&
3532 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3533 !(mac_status & MAC_STATUS_RCVD_CFG))
3534 current_link_up = 1;
3535 } else {
Matt Carlson5be73b42007-12-20 20:09:29 -08003536 tg3_setup_flow_control(tp, 0, 0);
3537
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538 /* Forcing 1000FD link up. */
3539 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540
3541 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3542 udelay(40);
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07003543
3544 tw32_f(MAC_MODE, tp->mac_mode);
3545 udelay(40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546 }
3547
3548out:
3549 return current_link_up;
3550}
3551
3552static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3553{
3554 u32 orig_pause_cfg;
3555 u16 orig_active_speed;
3556 u8 orig_active_duplex;
3557 u32 mac_status;
3558 int current_link_up;
3559 int i;
3560
Matt Carlson8d018622007-12-20 20:05:44 -08003561 orig_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003562 orig_active_speed = tp->link_config.active_speed;
3563 orig_active_duplex = tp->link_config.active_duplex;
3564
3565 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3566 netif_carrier_ok(tp->dev) &&
3567 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3568 mac_status = tr32(MAC_STATUS);
3569 mac_status &= (MAC_STATUS_PCS_SYNCED |
3570 MAC_STATUS_SIGNAL_DET |
3571 MAC_STATUS_CFG_CHANGED |
3572 MAC_STATUS_RCVD_CFG);
3573 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3574 MAC_STATUS_SIGNAL_DET)) {
3575 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3576 MAC_STATUS_CFG_CHANGED));
3577 return 0;
3578 }
3579 }
3580
3581 tw32_f(MAC_TX_AUTO_NEG, 0);
3582
3583 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3584 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3585 tw32_f(MAC_MODE, tp->mac_mode);
3586 udelay(40);
3587
3588 if (tp->phy_id == PHY_ID_BCM8002)
3589 tg3_init_bcm8002(tp);
3590
3591 /* Enable link change event even when serdes polling. */
3592 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3593 udelay(40);
3594
3595 current_link_up = 0;
3596 mac_status = tr32(MAC_STATUS);
3597
3598 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3599 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3600 else
3601 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3602
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603 tp->hw_status->status =
3604 (SD_STATUS_UPDATED |
3605 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3606
3607 for (i = 0; i < 100; i++) {
3608 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3609 MAC_STATUS_CFG_CHANGED));
3610 udelay(5);
3611 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
Michael Chan3d3ebe72006-09-27 15:59:15 -07003612 MAC_STATUS_CFG_CHANGED |
3613 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003614 break;
3615 }
3616
3617 mac_status = tr32(MAC_STATUS);
3618 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3619 current_link_up = 0;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003620 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3621 tp->serdes_counter == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622 tw32_f(MAC_MODE, (tp->mac_mode |
3623 MAC_MODE_SEND_CONFIGS));
3624 udelay(1);
3625 tw32_f(MAC_MODE, tp->mac_mode);
3626 }
3627 }
3628
3629 if (current_link_up == 1) {
3630 tp->link_config.active_speed = SPEED_1000;
3631 tp->link_config.active_duplex = DUPLEX_FULL;
3632 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3633 LED_CTRL_LNKLED_OVERRIDE |
3634 LED_CTRL_1000MBPS_ON));
3635 } else {
3636 tp->link_config.active_speed = SPEED_INVALID;
3637 tp->link_config.active_duplex = DUPLEX_INVALID;
3638 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3639 LED_CTRL_LNKLED_OVERRIDE |
3640 LED_CTRL_TRAFFIC_OVERRIDE));
3641 }
3642
3643 if (current_link_up != netif_carrier_ok(tp->dev)) {
3644 if (current_link_up)
3645 netif_carrier_on(tp->dev);
3646 else
3647 netif_carrier_off(tp->dev);
3648 tg3_link_report(tp);
3649 } else {
Matt Carlson8d018622007-12-20 20:05:44 -08003650 u32 now_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651 if (orig_pause_cfg != now_pause_cfg ||
3652 orig_active_speed != tp->link_config.active_speed ||
3653 orig_active_duplex != tp->link_config.active_duplex)
3654 tg3_link_report(tp);
3655 }
3656
3657 return 0;
3658}
3659
Michael Chan747e8f82005-07-25 12:33:22 -07003660static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3661{
3662 int current_link_up, err = 0;
3663 u32 bmsr, bmcr;
3664 u16 current_speed;
3665 u8 current_duplex;
Matt Carlsonef167e22007-12-20 20:10:01 -08003666 u32 local_adv, remote_adv;
Michael Chan747e8f82005-07-25 12:33:22 -07003667
3668 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3669 tw32_f(MAC_MODE, tp->mac_mode);
3670 udelay(40);
3671
3672 tw32(MAC_EVENT, 0);
3673
3674 tw32_f(MAC_STATUS,
3675 (MAC_STATUS_SYNC_CHANGED |
3676 MAC_STATUS_CFG_CHANGED |
3677 MAC_STATUS_MI_COMPLETION |
3678 MAC_STATUS_LNKSTATE_CHANGED));
3679 udelay(40);
3680
3681 if (force_reset)
3682 tg3_phy_reset(tp);
3683
3684 current_link_up = 0;
3685 current_speed = SPEED_INVALID;
3686 current_duplex = DUPLEX_INVALID;
3687
3688 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3689 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08003690 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3691 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3692 bmsr |= BMSR_LSTATUS;
3693 else
3694 bmsr &= ~BMSR_LSTATUS;
3695 }
Michael Chan747e8f82005-07-25 12:33:22 -07003696
3697 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3698
3699 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
Matt Carlson2bd3ed02008-06-09 15:39:55 -07003700 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
Michael Chan747e8f82005-07-25 12:33:22 -07003701 /* do nothing, just check for link up at the end */
3702 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3703 u32 adv, new_adv;
3704
3705 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3706 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3707 ADVERTISE_1000XPAUSE |
3708 ADVERTISE_1000XPSE_ASYM |
3709 ADVERTISE_SLCT);
3710
Matt Carlsonba4d07a2007-12-20 20:08:00 -08003711 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
Michael Chan747e8f82005-07-25 12:33:22 -07003712
3713 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3714 new_adv |= ADVERTISE_1000XHALF;
3715 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3716 new_adv |= ADVERTISE_1000XFULL;
3717
3718 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3719 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3720 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3721 tg3_writephy(tp, MII_BMCR, bmcr);
3722
3723 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
Michael Chan3d3ebe72006-09-27 15:59:15 -07003724 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
Michael Chan747e8f82005-07-25 12:33:22 -07003725 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3726
3727 return err;
3728 }
3729 } else {
3730 u32 new_bmcr;
3731
3732 bmcr &= ~BMCR_SPEED1000;
3733 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3734
3735 if (tp->link_config.duplex == DUPLEX_FULL)
3736 new_bmcr |= BMCR_FULLDPLX;
3737
3738 if (new_bmcr != bmcr) {
3739 /* BMCR_SPEED1000 is a reserved bit that needs
3740 * to be set on write.
3741 */
3742 new_bmcr |= BMCR_SPEED1000;
3743
3744 /* Force a linkdown */
3745 if (netif_carrier_ok(tp->dev)) {
3746 u32 adv;
3747
3748 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3749 adv &= ~(ADVERTISE_1000XFULL |
3750 ADVERTISE_1000XHALF |
3751 ADVERTISE_SLCT);
3752 tg3_writephy(tp, MII_ADVERTISE, adv);
3753 tg3_writephy(tp, MII_BMCR, bmcr |
3754 BMCR_ANRESTART |
3755 BMCR_ANENABLE);
3756 udelay(10);
3757 netif_carrier_off(tp->dev);
3758 }
3759 tg3_writephy(tp, MII_BMCR, new_bmcr);
3760 bmcr = new_bmcr;
3761 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3762 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08003763 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3764 ASIC_REV_5714) {
3765 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3766 bmsr |= BMSR_LSTATUS;
3767 else
3768 bmsr &= ~BMSR_LSTATUS;
3769 }
Michael Chan747e8f82005-07-25 12:33:22 -07003770 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3771 }
3772 }
3773
3774 if (bmsr & BMSR_LSTATUS) {
3775 current_speed = SPEED_1000;
3776 current_link_up = 1;
3777 if (bmcr & BMCR_FULLDPLX)
3778 current_duplex = DUPLEX_FULL;
3779 else
3780 current_duplex = DUPLEX_HALF;
3781
Matt Carlsonef167e22007-12-20 20:10:01 -08003782 local_adv = 0;
3783 remote_adv = 0;
3784
Michael Chan747e8f82005-07-25 12:33:22 -07003785 if (bmcr & BMCR_ANENABLE) {
Matt Carlsonef167e22007-12-20 20:10:01 -08003786 u32 common;
Michael Chan747e8f82005-07-25 12:33:22 -07003787
3788 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3789 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3790 common = local_adv & remote_adv;
3791 if (common & (ADVERTISE_1000XHALF |
3792 ADVERTISE_1000XFULL)) {
3793 if (common & ADVERTISE_1000XFULL)
3794 current_duplex = DUPLEX_FULL;
3795 else
3796 current_duplex = DUPLEX_HALF;
Michael Chan747e8f82005-07-25 12:33:22 -07003797 }
3798 else
3799 current_link_up = 0;
3800 }
3801 }
3802
Matt Carlsonef167e22007-12-20 20:10:01 -08003803 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3804 tg3_setup_flow_control(tp, local_adv, remote_adv);
3805
Michael Chan747e8f82005-07-25 12:33:22 -07003806 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3807 if (tp->link_config.active_duplex == DUPLEX_HALF)
3808 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3809
3810 tw32_f(MAC_MODE, tp->mac_mode);
3811 udelay(40);
3812
3813 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3814
3815 tp->link_config.active_speed = current_speed;
3816 tp->link_config.active_duplex = current_duplex;
3817
3818 if (current_link_up != netif_carrier_ok(tp->dev)) {
3819 if (current_link_up)
3820 netif_carrier_on(tp->dev);
3821 else {
3822 netif_carrier_off(tp->dev);
3823 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3824 }
3825 tg3_link_report(tp);
3826 }
3827 return err;
3828}
3829
3830static void tg3_serdes_parallel_detect(struct tg3 *tp)
3831{
Michael Chan3d3ebe72006-09-27 15:59:15 -07003832 if (tp->serdes_counter) {
Michael Chan747e8f82005-07-25 12:33:22 -07003833 /* Give autoneg time to complete. */
Michael Chan3d3ebe72006-09-27 15:59:15 -07003834 tp->serdes_counter--;
Michael Chan747e8f82005-07-25 12:33:22 -07003835 return;
3836 }
3837 if (!netif_carrier_ok(tp->dev) &&
3838 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3839 u32 bmcr;
3840
3841 tg3_readphy(tp, MII_BMCR, &bmcr);
3842 if (bmcr & BMCR_ANENABLE) {
3843 u32 phy1, phy2;
3844
3845 /* Select shadow register 0x1f */
3846 tg3_writephy(tp, 0x1c, 0x7c00);
3847 tg3_readphy(tp, 0x1c, &phy1);
3848
3849 /* Select expansion interrupt status register */
3850 tg3_writephy(tp, 0x17, 0x0f01);
3851 tg3_readphy(tp, 0x15, &phy2);
3852 tg3_readphy(tp, 0x15, &phy2);
3853
3854 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3855 /* We have signal detect and not receiving
3856 * config code words, link is up by parallel
3857 * detection.
3858 */
3859
3860 bmcr &= ~BMCR_ANENABLE;
3861 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3862 tg3_writephy(tp, MII_BMCR, bmcr);
3863 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3864 }
3865 }
3866 }
3867 else if (netif_carrier_ok(tp->dev) &&
3868 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3869 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3870 u32 phy2;
3871
3872 /* Select expansion interrupt status register */
3873 tg3_writephy(tp, 0x17, 0x0f01);
3874 tg3_readphy(tp, 0x15, &phy2);
3875 if (phy2 & 0x20) {
3876 u32 bmcr;
3877
3878 /* Config code words received, turn on autoneg. */
3879 tg3_readphy(tp, MII_BMCR, &bmcr);
3880 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3881
3882 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3883
3884 }
3885 }
3886}
3887
Linus Torvalds1da177e2005-04-16 15:20:36 -07003888static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3889{
3890 int err;
3891
3892 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3893 err = tg3_setup_fiber_phy(tp, force_reset);
Michael Chan747e8f82005-07-25 12:33:22 -07003894 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3895 err = tg3_setup_fiber_mii_phy(tp, force_reset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003896 } else {
3897 err = tg3_setup_copper_phy(tp, force_reset);
3898 }
3899
Matt Carlsonbcb37f62008-11-03 16:52:09 -08003900 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
Matt Carlsonaa6c91f2007-11-12 21:18:04 -08003901 u32 val, scale;
3902
3903 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3904 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3905 scale = 65;
3906 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3907 scale = 6;
3908 else
3909 scale = 12;
3910
3911 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3912 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3913 tw32(GRC_MISC_CFG, val);
3914 }
3915
Linus Torvalds1da177e2005-04-16 15:20:36 -07003916 if (tp->link_config.active_speed == SPEED_1000 &&
3917 tp->link_config.active_duplex == DUPLEX_HALF)
3918 tw32(MAC_TX_LENGTHS,
3919 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3920 (6 << TX_LENGTHS_IPG_SHIFT) |
3921 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3922 else
3923 tw32(MAC_TX_LENGTHS,
3924 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3925 (6 << TX_LENGTHS_IPG_SHIFT) |
3926 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3927
3928 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3929 if (netif_carrier_ok(tp->dev)) {
3930 tw32(HOSTCC_STAT_COAL_TICKS,
David S. Miller15f98502005-05-18 22:49:26 -07003931 tp->coal.stats_block_coalesce_usecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003932 } else {
3933 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3934 }
3935 }
3936
Matt Carlson8ed5d972007-05-07 00:25:49 -07003937 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3938 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3939 if (!netif_carrier_ok(tp->dev))
3940 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3941 tp->pwrmgmt_thresh;
3942 else
3943 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3944 tw32(PCIE_PWR_MGMT_THRESH, val);
3945 }
3946
Linus Torvalds1da177e2005-04-16 15:20:36 -07003947 return err;
3948}
3949
Michael Chandf3e6542006-05-26 17:48:07 -07003950/* This is called whenever we suspect that the system chipset is re-
3951 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3952 * is bogus tx completions. We try to recover by setting the
3953 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3954 * in the workqueue.
3955 */
3956static void tg3_tx_recover(struct tg3 *tp)
3957{
3958 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3959 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3960
3961 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3962 "mapped I/O cycles to the network device, attempting to "
3963 "recover. Please report the problem to the driver maintainer "
3964 "and include system chipset information.\n", tp->dev->name);
3965
3966 spin_lock(&tp->lock);
Michael Chandf3e6542006-05-26 17:48:07 -07003967 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
Michael Chandf3e6542006-05-26 17:48:07 -07003968 spin_unlock(&tp->lock);
3969}
3970
Michael Chan1b2a7202006-08-07 21:46:02 -07003971static inline u32 tg3_tx_avail(struct tg3 *tp)
3972{
3973 smp_mb();
3974 return (tp->tx_pending -
3975 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3976}
3977
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978/* Tigon3 never reports partial packet sends. So we do not
3979 * need special logic to handle SKBs that have not had all
3980 * of their frags sent yet, like SunGEM does.
3981 */
3982static void tg3_tx(struct tg3 *tp)
3983{
3984 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3985 u32 sw_idx = tp->tx_cons;
3986
3987 while (sw_idx != hw_idx) {
3988 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3989 struct sk_buff *skb = ri->skb;
Michael Chandf3e6542006-05-26 17:48:07 -07003990 int i, tx_bug = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003991
Michael Chandf3e6542006-05-26 17:48:07 -07003992 if (unlikely(skb == NULL)) {
3993 tg3_tx_recover(tp);
3994 return;
3995 }
3996
David S. Miller90079ce2008-09-11 04:52:51 -07003997 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003998
3999 ri->skb = NULL;
4000
4001 sw_idx = NEXT_TX(sw_idx);
4002
4003 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004004 ri = &tp->tx_buffers[sw_idx];
Michael Chandf3e6542006-05-26 17:48:07 -07004005 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4006 tx_bug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004007 sw_idx = NEXT_TX(sw_idx);
4008 }
4009
David S. Millerf47c11e2005-06-24 20:18:35 -07004010 dev_kfree_skb(skb);
Michael Chandf3e6542006-05-26 17:48:07 -07004011
4012 if (unlikely(tx_bug)) {
4013 tg3_tx_recover(tp);
4014 return;
4015 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004016 }
4017
4018 tp->tx_cons = sw_idx;
4019
Michael Chan1b2a7202006-08-07 21:46:02 -07004020 /* Need to make the tx_cons update visible to tg3_start_xmit()
4021 * before checking for netif_queue_stopped(). Without the
4022 * memory barrier, there is a small possibility that tg3_start_xmit()
4023 * will miss it and cause the queue to be stopped forever.
4024 */
4025 smp_mb();
4026
4027 if (unlikely(netif_queue_stopped(tp->dev) &&
Ranjit Manomohan42952232006-10-18 20:54:26 -07004028 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
Michael Chan1b2a7202006-08-07 21:46:02 -07004029 netif_tx_lock(tp->dev);
Michael Chan51b91462005-09-01 17:41:28 -07004030 if (netif_queue_stopped(tp->dev) &&
Ranjit Manomohan42952232006-10-18 20:54:26 -07004031 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
Michael Chan51b91462005-09-01 17:41:28 -07004032 netif_wake_queue(tp->dev);
Michael Chan1b2a7202006-08-07 21:46:02 -07004033 netif_tx_unlock(tp->dev);
Michael Chan51b91462005-09-01 17:41:28 -07004034 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004035}
4036
4037/* Returns size of skb allocated or < 0 on error.
4038 *
4039 * We only need to fill in the address because the other members
4040 * of the RX descriptor are invariant, see tg3_init_rings.
4041 *
4042 * Note the purposeful assymetry of cpu vs. chip accesses. For
4043 * posting buffers we only dirty the first cache line of the RX
4044 * descriptor (containing the address). Whereas for the RX status
4045 * buffers the cpu only reads the last cacheline of the RX descriptor
4046 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4047 */
4048static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
4049 int src_idx, u32 dest_idx_unmasked)
4050{
4051 struct tg3_rx_buffer_desc *desc;
4052 struct ring_info *map, *src_map;
4053 struct sk_buff *skb;
4054 dma_addr_t mapping;
4055 int skb_size, dest_idx;
4056
4057 src_map = NULL;
4058 switch (opaque_key) {
4059 case RXD_OPAQUE_RING_STD:
4060 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4061 desc = &tp->rx_std[dest_idx];
4062 map = &tp->rx_std_buffers[dest_idx];
4063 if (src_idx >= 0)
4064 src_map = &tp->rx_std_buffers[src_idx];
Michael Chan7e72aad2005-07-25 12:31:17 -07004065 skb_size = tp->rx_pkt_buf_sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004066 break;
4067
4068 case RXD_OPAQUE_RING_JUMBO:
4069 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4070 desc = &tp->rx_jumbo[dest_idx];
4071 map = &tp->rx_jumbo_buffers[dest_idx];
4072 if (src_idx >= 0)
4073 src_map = &tp->rx_jumbo_buffers[src_idx];
4074 skb_size = RX_JUMBO_PKT_BUF_SZ;
4075 break;
4076
4077 default:
4078 return -EINVAL;
Stephen Hemminger855e1112008-04-16 16:37:28 -07004079 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004080
4081 /* Do not overwrite any of the map or rp information
4082 * until we are sure we can commit to a new buffer.
4083 *
4084 * Callers depend upon this behavior and assume that
4085 * we leave everything unchanged if we fail.
4086 */
David S. Millera20e9c62006-07-31 22:38:16 -07004087 skb = netdev_alloc_skb(tp->dev, skb_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004088 if (skb == NULL)
4089 return -ENOMEM;
4090
Linus Torvalds1da177e2005-04-16 15:20:36 -07004091 skb_reserve(skb, tp->rx_offset);
4092
4093 mapping = pci_map_single(tp->pdev, skb->data,
4094 skb_size - tp->rx_offset,
4095 PCI_DMA_FROMDEVICE);
4096
4097 map->skb = skb;
4098 pci_unmap_addr_set(map, mapping, mapping);
4099
4100 if (src_map != NULL)
4101 src_map->skb = NULL;
4102
4103 desc->addr_hi = ((u64)mapping >> 32);
4104 desc->addr_lo = ((u64)mapping & 0xffffffff);
4105
4106 return skb_size;
4107}
4108
4109/* We only need to move over in the address because the other
4110 * members of the RX descriptor are invariant. See notes above
4111 * tg3_alloc_rx_skb for full details.
4112 */
4113static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
4114 int src_idx, u32 dest_idx_unmasked)
4115{
4116 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4117 struct ring_info *src_map, *dest_map;
4118 int dest_idx;
4119
4120 switch (opaque_key) {
4121 case RXD_OPAQUE_RING_STD:
4122 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4123 dest_desc = &tp->rx_std[dest_idx];
4124 dest_map = &tp->rx_std_buffers[dest_idx];
4125 src_desc = &tp->rx_std[src_idx];
4126 src_map = &tp->rx_std_buffers[src_idx];
4127 break;
4128
4129 case RXD_OPAQUE_RING_JUMBO:
4130 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4131 dest_desc = &tp->rx_jumbo[dest_idx];
4132 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4133 src_desc = &tp->rx_jumbo[src_idx];
4134 src_map = &tp->rx_jumbo_buffers[src_idx];
4135 break;
4136
4137 default:
4138 return;
Stephen Hemminger855e1112008-04-16 16:37:28 -07004139 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004140
4141 dest_map->skb = src_map->skb;
4142 pci_unmap_addr_set(dest_map, mapping,
4143 pci_unmap_addr(src_map, mapping));
4144 dest_desc->addr_hi = src_desc->addr_hi;
4145 dest_desc->addr_lo = src_desc->addr_lo;
4146
4147 src_map->skb = NULL;
4148}
4149
4150#if TG3_VLAN_TAG_USED
4151static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4152{
4153 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4154}
4155#endif
4156
4157/* The RX ring scheme is composed of multiple rings which post fresh
4158 * buffers to the chip, and one special ring the chip uses to report
4159 * status back to the host.
4160 *
4161 * The special ring reports the status of received packets to the
4162 * host. The chip does not write into the original descriptor the
4163 * RX buffer was obtained from. The chip simply takes the original
4164 * descriptor as provided by the host, updates the status and length
4165 * field, then writes this into the next status ring entry.
4166 *
4167 * Each ring the host uses to post buffers to the chip is described
4168 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4169 * it is first placed into the on-chip ram. When the packet's length
4170 * is known, it walks down the TG3_BDINFO entries to select the ring.
4171 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4172 * which is within the range of the new packet's length is chosen.
4173 *
4174 * The "separate ring for rx status" scheme may sound queer, but it makes
4175 * sense from a cache coherency perspective. If only the host writes
4176 * to the buffer post rings, and only the chip writes to the rx status
4177 * rings, then cache lines never move beyond shared-modified state.
4178 * If both the host and chip were to write into the same ring, cache line
4179 * eviction could occur since both entities want it in an exclusive state.
4180 */
4181static int tg3_rx(struct tg3 *tp, int budget)
4182{
Michael Chanf92905d2006-06-29 20:14:29 -07004183 u32 work_mask, rx_std_posted = 0;
Michael Chan483ba502005-04-25 15:14:03 -07004184 u32 sw_idx = tp->rx_rcb_ptr;
4185 u16 hw_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004186 int received;
4187
4188 hw_idx = tp->hw_status->idx[0].rx_producer;
4189 /*
4190 * We need to order the read of hw_idx and the read of
4191 * the opaque cookie.
4192 */
4193 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004194 work_mask = 0;
4195 received = 0;
4196 while (sw_idx != hw_idx && budget > 0) {
4197 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4198 unsigned int len;
4199 struct sk_buff *skb;
4200 dma_addr_t dma_addr;
4201 u32 opaque_key, desc_idx, *post_ptr;
4202
4203 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4204 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4205 if (opaque_key == RXD_OPAQUE_RING_STD) {
4206 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4207 mapping);
4208 skb = tp->rx_std_buffers[desc_idx].skb;
4209 post_ptr = &tp->rx_std_ptr;
Michael Chanf92905d2006-06-29 20:14:29 -07004210 rx_std_posted++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004211 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4212 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4213 mapping);
4214 skb = tp->rx_jumbo_buffers[desc_idx].skb;
4215 post_ptr = &tp->rx_jumbo_ptr;
4216 }
4217 else {
4218 goto next_pkt_nopost;
4219 }
4220
4221 work_mask |= opaque_key;
4222
4223 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4224 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4225 drop_it:
4226 tg3_recycle_rx(tp, opaque_key,
4227 desc_idx, *post_ptr);
4228 drop_it_no_recycle:
4229 /* Other statistics kept track of by card. */
4230 tp->net_stats.rx_dropped++;
4231 goto next_pkt;
4232 }
4233
4234 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
4235
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004236 if (len > RX_COPY_THRESHOLD
Linus Torvalds1da177e2005-04-16 15:20:36 -07004237 && tp->rx_offset == 2
4238 /* rx_offset != 2 iff this is a 5701 card running
4239 * in PCI-X mode [see tg3_get_invariants()] */
4240 ) {
4241 int skb_size;
4242
4243 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4244 desc_idx, *post_ptr);
4245 if (skb_size < 0)
4246 goto drop_it;
4247
4248 pci_unmap_single(tp->pdev, dma_addr,
4249 skb_size - tp->rx_offset,
4250 PCI_DMA_FROMDEVICE);
4251
4252 skb_put(skb, len);
4253 } else {
4254 struct sk_buff *copy_skb;
4255
4256 tg3_recycle_rx(tp, opaque_key,
4257 desc_idx, *post_ptr);
4258
David S. Millera20e9c62006-07-31 22:38:16 -07004259 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004260 if (copy_skb == NULL)
4261 goto drop_it_no_recycle;
4262
Linus Torvalds1da177e2005-04-16 15:20:36 -07004263 skb_reserve(copy_skb, 2);
4264 skb_put(copy_skb, len);
4265 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03004266 skb_copy_from_linear_data(skb, copy_skb->data, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004267 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4268
4269 /* We'll reuse the original ring buffer. */
4270 skb = copy_skb;
4271 }
4272
4273 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4274 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4275 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4276 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4277 skb->ip_summed = CHECKSUM_UNNECESSARY;
4278 else
4279 skb->ip_summed = CHECKSUM_NONE;
4280
4281 skb->protocol = eth_type_trans(skb, tp->dev);
4282#if TG3_VLAN_TAG_USED
4283 if (tp->vlgrp != NULL &&
4284 desc->type_flags & RXD_FLAG_VLAN) {
4285 tg3_vlan_rx(tp, skb,
4286 desc->err_vlan & RXD_VLAN_MASK);
4287 } else
4288#endif
4289 netif_receive_skb(skb);
4290
4291 tp->dev->last_rx = jiffies;
4292 received++;
4293 budget--;
4294
4295next_pkt:
4296 (*post_ptr)++;
Michael Chanf92905d2006-06-29 20:14:29 -07004297
4298 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4299 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4300
4301 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4302 TG3_64BIT_REG_LOW, idx);
4303 work_mask &= ~RXD_OPAQUE_RING_STD;
4304 rx_std_posted = 0;
4305 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004306next_pkt_nopost:
Michael Chan483ba502005-04-25 15:14:03 -07004307 sw_idx++;
Eric Dumazet6b31a512007-02-06 13:29:21 -08004308 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
Michael Chan52f6d692005-04-25 15:14:32 -07004309
4310 /* Refresh hw_idx to see if there is new work */
4311 if (sw_idx == hw_idx) {
4312 hw_idx = tp->hw_status->idx[0].rx_producer;
4313 rmb();
4314 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004315 }
4316
4317 /* ACK the status ring. */
Michael Chan483ba502005-04-25 15:14:03 -07004318 tp->rx_rcb_ptr = sw_idx;
4319 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004320
4321 /* Refill RX ring(s). */
4322 if (work_mask & RXD_OPAQUE_RING_STD) {
4323 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4324 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4325 sw_idx);
4326 }
4327 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4328 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4329 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4330 sw_idx);
4331 }
4332 mmiowb();
4333
4334 return received;
4335}
4336
David S. Miller6f535762007-10-11 18:08:29 -07004337static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004338{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004339 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004340
Linus Torvalds1da177e2005-04-16 15:20:36 -07004341 /* handle link change and other phy events */
4342 if (!(tp->tg3_flags &
4343 (TG3_FLAG_USE_LINKCHG_REG |
4344 TG3_FLAG_POLL_SERDES))) {
4345 if (sblk->status & SD_STATUS_LINK_CHG) {
4346 sblk->status = SD_STATUS_UPDATED |
4347 (sblk->status & ~SD_STATUS_LINK_CHG);
David S. Millerf47c11e2005-06-24 20:18:35 -07004348 spin_lock(&tp->lock);
Matt Carlsondd477002008-05-25 23:45:58 -07004349 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4350 tw32_f(MAC_STATUS,
4351 (MAC_STATUS_SYNC_CHANGED |
4352 MAC_STATUS_CFG_CHANGED |
4353 MAC_STATUS_MI_COMPLETION |
4354 MAC_STATUS_LNKSTATE_CHANGED));
4355 udelay(40);
4356 } else
4357 tg3_setup_phy(tp, 0);
David S. Millerf47c11e2005-06-24 20:18:35 -07004358 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004359 }
4360 }
4361
4362 /* run TX completion thread */
4363 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004364 tg3_tx(tp);
David S. Miller6f535762007-10-11 18:08:29 -07004365 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
Michael Chan4fd7ab52007-10-12 01:39:50 -07004366 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004367 }
4368
Linus Torvalds1da177e2005-04-16 15:20:36 -07004369 /* run RX thread, within the bounds set by NAPI.
4370 * All RX "locking" is done by ensuring outside
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004371 * code synchronizes with tg3->napi.poll()
Linus Torvalds1da177e2005-04-16 15:20:36 -07004372 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004373 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
David S. Miller6f535762007-10-11 18:08:29 -07004374 work_done += tg3_rx(tp, budget - work_done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004375
David S. Miller6f535762007-10-11 18:08:29 -07004376 return work_done;
4377}
David S. Millerf7383c22005-05-18 22:50:53 -07004378
David S. Miller6f535762007-10-11 18:08:29 -07004379static int tg3_poll(struct napi_struct *napi, int budget)
4380{
4381 struct tg3 *tp = container_of(napi, struct tg3, napi);
4382 int work_done = 0;
Michael Chan4fd7ab52007-10-12 01:39:50 -07004383 struct tg3_hw_status *sblk = tp->hw_status;
David S. Miller6f535762007-10-11 18:08:29 -07004384
4385 while (1) {
4386 work_done = tg3_poll_work(tp, work_done, budget);
4387
4388 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4389 goto tx_recovery;
4390
4391 if (unlikely(work_done >= budget))
4392 break;
4393
Michael Chan4fd7ab52007-10-12 01:39:50 -07004394 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4395 /* tp->last_tag is used in tg3_restart_ints() below
4396 * to tell the hw how much work has been processed,
4397 * so we must read it before checking for more work.
4398 */
4399 tp->last_tag = sblk->status_tag;
4400 rmb();
4401 } else
4402 sblk->status &= ~SD_STATUS_UPDATED;
4403
David S. Miller6f535762007-10-11 18:08:29 -07004404 if (likely(!tg3_has_work(tp))) {
David S. Miller6f535762007-10-11 18:08:29 -07004405 netif_rx_complete(tp->dev, napi);
4406 tg3_restart_ints(tp);
4407 break;
4408 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004409 }
4410
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004411 return work_done;
David S. Miller6f535762007-10-11 18:08:29 -07004412
4413tx_recovery:
Michael Chan4fd7ab52007-10-12 01:39:50 -07004414 /* work_done is guaranteed to be less than budget. */
David S. Miller6f535762007-10-11 18:08:29 -07004415 netif_rx_complete(tp->dev, napi);
4416 schedule_work(&tp->reset_task);
Michael Chan4fd7ab52007-10-12 01:39:50 -07004417 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004418}
4419
David S. Millerf47c11e2005-06-24 20:18:35 -07004420static void tg3_irq_quiesce(struct tg3 *tp)
4421{
4422 BUG_ON(tp->irq_sync);
4423
4424 tp->irq_sync = 1;
4425 smp_mb();
4426
4427 synchronize_irq(tp->pdev->irq);
4428}
4429
4430static inline int tg3_irq_sync(struct tg3 *tp)
4431{
4432 return tp->irq_sync;
4433}
4434
4435/* Fully shutdown all tg3 driver activity elsewhere in the system.
4436 * If irq_sync is non-zero, then the IRQ handler must be synchronized
4437 * with as well. Most of the time, this is not necessary except when
4438 * shutting down the device.
4439 */
4440static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4441{
Michael Chan46966542007-07-11 19:47:19 -07004442 spin_lock_bh(&tp->lock);
David S. Millerf47c11e2005-06-24 20:18:35 -07004443 if (irq_sync)
4444 tg3_irq_quiesce(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07004445}
4446
4447static inline void tg3_full_unlock(struct tg3 *tp)
4448{
David S. Millerf47c11e2005-06-24 20:18:35 -07004449 spin_unlock_bh(&tp->lock);
4450}
4451
Michael Chanfcfa0a32006-03-20 22:28:41 -08004452/* One-shot MSI handler - Chip automatically disables interrupt
4453 * after sending MSI so driver doesn't have to do it.
4454 */
David Howells7d12e782006-10-05 14:55:46 +01004455static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
Michael Chanfcfa0a32006-03-20 22:28:41 -08004456{
4457 struct net_device *dev = dev_id;
4458 struct tg3 *tp = netdev_priv(dev);
4459
4460 prefetch(tp->hw_status);
4461 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4462
4463 if (likely(!tg3_irq_sync(tp)))
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004464 netif_rx_schedule(dev, &tp->napi);
Michael Chanfcfa0a32006-03-20 22:28:41 -08004465
4466 return IRQ_HANDLED;
4467}
4468
Michael Chan88b06bc2005-04-21 17:13:25 -07004469/* MSI ISR - No need to check for interrupt sharing and no need to
4470 * flush status block and interrupt mailbox. PCI ordering rules
4471 * guarantee that MSI will arrive after the status block.
4472 */
David Howells7d12e782006-10-05 14:55:46 +01004473static irqreturn_t tg3_msi(int irq, void *dev_id)
Michael Chan88b06bc2005-04-21 17:13:25 -07004474{
4475 struct net_device *dev = dev_id;
4476 struct tg3 *tp = netdev_priv(dev);
Michael Chan88b06bc2005-04-21 17:13:25 -07004477
Michael Chan61487482005-09-05 17:53:19 -07004478 prefetch(tp->hw_status);
4479 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Michael Chan88b06bc2005-04-21 17:13:25 -07004480 /*
David S. Millerfac9b832005-05-18 22:46:34 -07004481 * Writing any value to intr-mbox-0 clears PCI INTA# and
Michael Chan88b06bc2005-04-21 17:13:25 -07004482 * chip-internal interrupt pending events.
David S. Millerfac9b832005-05-18 22:46:34 -07004483 * Writing non-zero to intr-mbox-0 additional tells the
Michael Chan88b06bc2005-04-21 17:13:25 -07004484 * NIC to stop sending us irqs, engaging "in-intr-handler"
4485 * event coalescing.
4486 */
4487 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chan61487482005-09-05 17:53:19 -07004488 if (likely(!tg3_irq_sync(tp)))
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004489 netif_rx_schedule(dev, &tp->napi);
Michael Chan61487482005-09-05 17:53:19 -07004490
Michael Chan88b06bc2005-04-21 17:13:25 -07004491 return IRQ_RETVAL(1);
4492}
4493
David Howells7d12e782006-10-05 14:55:46 +01004494static irqreturn_t tg3_interrupt(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004495{
4496 struct net_device *dev = dev_id;
4497 struct tg3 *tp = netdev_priv(dev);
4498 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004499 unsigned int handled = 1;
4500
Linus Torvalds1da177e2005-04-16 15:20:36 -07004501 /* In INTx mode, it is possible for the interrupt to arrive at
4502 * the CPU before the status block posted prior to the interrupt.
4503 * Reading the PCI State register will confirm whether the
4504 * interrupt is ours and will flush the status block.
4505 */
Michael Chand18edcb2007-03-24 20:57:11 -07004506 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4507 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4508 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4509 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07004510 goto out;
David S. Millerfac9b832005-05-18 22:46:34 -07004511 }
Michael Chand18edcb2007-03-24 20:57:11 -07004512 }
4513
4514 /*
4515 * Writing any value to intr-mbox-0 clears PCI INTA# and
4516 * chip-internal interrupt pending events.
4517 * Writing non-zero to intr-mbox-0 additional tells the
4518 * NIC to stop sending us irqs, engaging "in-intr-handler"
4519 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07004520 *
4521 * Flush the mailbox to de-assert the IRQ immediately to prevent
4522 * spurious interrupts. The flush impacts performance but
4523 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07004524 */
Michael Chanc04cb342007-05-07 00:26:15 -07004525 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chand18edcb2007-03-24 20:57:11 -07004526 if (tg3_irq_sync(tp))
4527 goto out;
4528 sblk->status &= ~SD_STATUS_UPDATED;
4529 if (likely(tg3_has_work(tp))) {
4530 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004531 netif_rx_schedule(dev, &tp->napi);
Michael Chand18edcb2007-03-24 20:57:11 -07004532 } else {
4533 /* No work, shared interrupt perhaps? re-enable
4534 * interrupts, and flush that PCI write
4535 */
4536 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4537 0x00000000);
David S. Millerfac9b832005-05-18 22:46:34 -07004538 }
David S. Millerf47c11e2005-06-24 20:18:35 -07004539out:
David S. Millerfac9b832005-05-18 22:46:34 -07004540 return IRQ_RETVAL(handled);
4541}
4542
David Howells7d12e782006-10-05 14:55:46 +01004543static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
David S. Millerfac9b832005-05-18 22:46:34 -07004544{
4545 struct net_device *dev = dev_id;
4546 struct tg3 *tp = netdev_priv(dev);
4547 struct tg3_hw_status *sblk = tp->hw_status;
David S. Millerfac9b832005-05-18 22:46:34 -07004548 unsigned int handled = 1;
4549
David S. Millerfac9b832005-05-18 22:46:34 -07004550 /* In INTx mode, it is possible for the interrupt to arrive at
4551 * the CPU before the status block posted prior to the interrupt.
4552 * Reading the PCI State register will confirm whether the
4553 * interrupt is ours and will flush the status block.
4554 */
Michael Chand18edcb2007-03-24 20:57:11 -07004555 if (unlikely(sblk->status_tag == tp->last_tag)) {
4556 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4557 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4558 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07004559 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004560 }
Michael Chand18edcb2007-03-24 20:57:11 -07004561 }
4562
4563 /*
4564 * writing any value to intr-mbox-0 clears PCI INTA# and
4565 * chip-internal interrupt pending events.
4566 * writing non-zero to intr-mbox-0 additional tells the
4567 * NIC to stop sending us irqs, engaging "in-intr-handler"
4568 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07004569 *
4570 * Flush the mailbox to de-assert the IRQ immediately to prevent
4571 * spurious interrupts. The flush impacts performance but
4572 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07004573 */
Michael Chanc04cb342007-05-07 00:26:15 -07004574 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chand18edcb2007-03-24 20:57:11 -07004575 if (tg3_irq_sync(tp))
4576 goto out;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004577 if (netif_rx_schedule_prep(dev, &tp->napi)) {
Michael Chand18edcb2007-03-24 20:57:11 -07004578 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4579 /* Update last_tag to mark that this status has been
4580 * seen. Because interrupt may be shared, we may be
4581 * racing with tg3_poll(), so only update last_tag
4582 * if tg3_poll() is not scheduled.
4583 */
4584 tp->last_tag = sblk->status_tag;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004585 __netif_rx_schedule(dev, &tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004586 }
David S. Millerf47c11e2005-06-24 20:18:35 -07004587out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004588 return IRQ_RETVAL(handled);
4589}
4590
Michael Chan79381092005-04-21 17:13:59 -07004591/* ISR for interrupt test */
David Howells7d12e782006-10-05 14:55:46 +01004592static irqreturn_t tg3_test_isr(int irq, void *dev_id)
Michael Chan79381092005-04-21 17:13:59 -07004593{
4594 struct net_device *dev = dev_id;
4595 struct tg3 *tp = netdev_priv(dev);
4596 struct tg3_hw_status *sblk = tp->hw_status;
4597
Michael Chanf9804dd2005-09-27 12:13:10 -07004598 if ((sblk->status & SD_STATUS_UPDATED) ||
4599 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
Michael Chanb16250e2006-09-27 16:10:14 -07004600 tg3_disable_ints(tp);
Michael Chan79381092005-04-21 17:13:59 -07004601 return IRQ_RETVAL(1);
4602 }
4603 return IRQ_RETVAL(0);
4604}
4605
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07004606static int tg3_init_hw(struct tg3 *, int);
Michael Chan944d9802005-05-29 14:57:48 -07004607static int tg3_halt(struct tg3 *, int, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004608
Michael Chanb9ec6c12006-07-25 16:37:27 -07004609/* Restart hardware after configuration changes, self-test, etc.
4610 * Invoked with tp->lock held.
4611 */
4612static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
Eric Dumazet78c61462008-04-24 23:33:06 -07004613 __releases(tp->lock)
4614 __acquires(tp->lock)
Michael Chanb9ec6c12006-07-25 16:37:27 -07004615{
4616 int err;
4617
4618 err = tg3_init_hw(tp, reset_phy);
4619 if (err) {
4620 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4621 "aborting.\n", tp->dev->name);
4622 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4623 tg3_full_unlock(tp);
4624 del_timer_sync(&tp->timer);
4625 tp->irq_sync = 0;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004626 napi_enable(&tp->napi);
Michael Chanb9ec6c12006-07-25 16:37:27 -07004627 dev_close(tp->dev);
4628 tg3_full_lock(tp, 0);
4629 }
4630 return err;
4631}
4632
Linus Torvalds1da177e2005-04-16 15:20:36 -07004633#ifdef CONFIG_NET_POLL_CONTROLLER
4634static void tg3_poll_controller(struct net_device *dev)
4635{
Michael Chan88b06bc2005-04-21 17:13:25 -07004636 struct tg3 *tp = netdev_priv(dev);
4637
David Howells7d12e782006-10-05 14:55:46 +01004638 tg3_interrupt(tp->pdev->irq, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004639}
4640#endif
4641
David Howellsc4028952006-11-22 14:57:56 +00004642static void tg3_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004643{
David Howellsc4028952006-11-22 14:57:56 +00004644 struct tg3 *tp = container_of(work, struct tg3, reset_task);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004645 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004646 unsigned int restart_timer;
4647
Michael Chan7faa0062006-02-02 17:29:28 -08004648 tg3_full_lock(tp, 0);
Michael Chan7faa0062006-02-02 17:29:28 -08004649
4650 if (!netif_running(tp->dev)) {
Michael Chan7faa0062006-02-02 17:29:28 -08004651 tg3_full_unlock(tp);
4652 return;
4653 }
4654
4655 tg3_full_unlock(tp);
4656
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004657 tg3_phy_stop(tp);
4658
Linus Torvalds1da177e2005-04-16 15:20:36 -07004659 tg3_netif_stop(tp);
4660
David S. Millerf47c11e2005-06-24 20:18:35 -07004661 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004662
4663 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4664 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4665
Michael Chandf3e6542006-05-26 17:48:07 -07004666 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4667 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4668 tp->write32_rx_mbox = tg3_write_flush_reg32;
4669 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4670 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4671 }
4672
Michael Chan944d9802005-05-29 14:57:48 -07004673 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004674 err = tg3_init_hw(tp, 1);
4675 if (err)
Michael Chanb9ec6c12006-07-25 16:37:27 -07004676 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004677
4678 tg3_netif_start(tp);
4679
Linus Torvalds1da177e2005-04-16 15:20:36 -07004680 if (restart_timer)
4681 mod_timer(&tp->timer, jiffies + 1);
Michael Chan7faa0062006-02-02 17:29:28 -08004682
Michael Chanb9ec6c12006-07-25 16:37:27 -07004683out:
Michael Chan7faa0062006-02-02 17:29:28 -08004684 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004685
4686 if (!err)
4687 tg3_phy_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004688}
4689
Michael Chanb0408752007-02-13 12:18:30 -08004690static void tg3_dump_short_state(struct tg3 *tp)
4691{
4692 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4693 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4694 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4695 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4696}
4697
Linus Torvalds1da177e2005-04-16 15:20:36 -07004698static void tg3_tx_timeout(struct net_device *dev)
4699{
4700 struct tg3 *tp = netdev_priv(dev);
4701
Michael Chanb0408752007-02-13 12:18:30 -08004702 if (netif_msg_tx_err(tp)) {
Michael Chan9f88f292006-12-07 00:22:54 -08004703 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4704 dev->name);
Michael Chanb0408752007-02-13 12:18:30 -08004705 tg3_dump_short_state(tp);
4706 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004707
4708 schedule_work(&tp->reset_task);
4709}
4710
Michael Chanc58ec932005-09-17 00:46:27 -07004711/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4712static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4713{
4714 u32 base = (u32) mapping & 0xffffffff;
4715
4716 return ((base > 0xffffdcc0) &&
4717 (base + len + 8 < base));
4718}
4719
Michael Chan72f2afb2006-03-06 19:28:35 -08004720/* Test for DMA addresses > 40-bit */
4721static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4722 int len)
4723{
4724#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
Michael Chan6728a8e2006-03-27 23:16:49 -08004725 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
Michael Chan72f2afb2006-03-06 19:28:35 -08004726 return (((u64) mapping + len) > DMA_40BIT_MASK);
4727 return 0;
4728#else
4729 return 0;
4730#endif
4731}
4732
Linus Torvalds1da177e2005-04-16 15:20:36 -07004733static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4734
Michael Chan72f2afb2006-03-06 19:28:35 -08004735/* Workaround 4GB and 40-bit hardware DMA bugs. */
4736static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
Michael Chanc58ec932005-09-17 00:46:27 -07004737 u32 last_plus_one, u32 *start,
4738 u32 base_flags, u32 mss)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004739{
Matt Carlson41588ba2008-04-19 18:12:33 -07004740 struct sk_buff *new_skb;
Michael Chanc58ec932005-09-17 00:46:27 -07004741 dma_addr_t new_addr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004742 u32 entry = *start;
Michael Chanc58ec932005-09-17 00:46:27 -07004743 int i, ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004744
Matt Carlson41588ba2008-04-19 18:12:33 -07004745 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4746 new_skb = skb_copy(skb, GFP_ATOMIC);
4747 else {
4748 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4749
4750 new_skb = skb_copy_expand(skb,
4751 skb_headroom(skb) + more_headroom,
4752 skb_tailroom(skb), GFP_ATOMIC);
4753 }
4754
Linus Torvalds1da177e2005-04-16 15:20:36 -07004755 if (!new_skb) {
Michael Chanc58ec932005-09-17 00:46:27 -07004756 ret = -1;
4757 } else {
4758 /* New SKB is guaranteed to be linear. */
4759 entry = *start;
David S. Miller90079ce2008-09-11 04:52:51 -07004760 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4761 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4762
Michael Chanc58ec932005-09-17 00:46:27 -07004763 /* Make sure new skb does not cross any 4G boundaries.
4764 * Drop the packet if it does.
4765 */
David S. Miller90079ce2008-09-11 04:52:51 -07004766 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
David S. Miller638266f2008-09-11 15:45:19 -07004767 if (!ret)
4768 skb_dma_unmap(&tp->pdev->dev, new_skb,
4769 DMA_TO_DEVICE);
Michael Chanc58ec932005-09-17 00:46:27 -07004770 ret = -1;
4771 dev_kfree_skb(new_skb);
4772 new_skb = NULL;
4773 } else {
4774 tg3_set_txd(tp, entry, new_addr, new_skb->len,
4775 base_flags, 1 | (mss << 1));
4776 *start = NEXT_TX(entry);
4777 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004778 }
4779
Linus Torvalds1da177e2005-04-16 15:20:36 -07004780 /* Now clean up the sw ring entries. */
4781 i = 0;
4782 while (entry != last_plus_one) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004783 if (i == 0) {
4784 tp->tx_buffers[entry].skb = new_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004785 } else {
4786 tp->tx_buffers[entry].skb = NULL;
4787 }
4788 entry = NEXT_TX(entry);
4789 i++;
4790 }
4791
David S. Miller90079ce2008-09-11 04:52:51 -07004792 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004793 dev_kfree_skb(skb);
4794
Michael Chanc58ec932005-09-17 00:46:27 -07004795 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004796}
4797
4798static void tg3_set_txd(struct tg3 *tp, int entry,
4799 dma_addr_t mapping, int len, u32 flags,
4800 u32 mss_and_is_end)
4801{
4802 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4803 int is_end = (mss_and_is_end & 0x1);
4804 u32 mss = (mss_and_is_end >> 1);
4805 u32 vlan_tag = 0;
4806
4807 if (is_end)
4808 flags |= TXD_FLAG_END;
4809 if (flags & TXD_FLAG_VLAN) {
4810 vlan_tag = flags >> 16;
4811 flags &= 0xffff;
4812 }
4813 vlan_tag |= (mss << TXD_MSS_SHIFT);
4814
4815 txd->addr_hi = ((u64) mapping >> 32);
4816 txd->addr_lo = ((u64) mapping & 0xffffffff);
4817 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4818 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4819}
4820
Michael Chan5a6f3072006-03-20 22:28:05 -08004821/* hard_start_xmit for devices that don't have any bugs and
4822 * support TG3_FLG2_HW_TSO_2 only.
4823 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004824static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4825{
4826 struct tg3 *tp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004827 u32 len, entry, base_flags, mss;
David S. Miller90079ce2008-09-11 04:52:51 -07004828 struct skb_shared_info *sp;
4829 dma_addr_t mapping;
Michael Chan5a6f3072006-03-20 22:28:05 -08004830
4831 len = skb_headlen(skb);
4832
Michael Chan00b70502006-06-17 21:58:45 -07004833 /* We are running in BH disabled context with netif_tx_lock
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004834 * and TX reclaim runs via tp->napi.poll inside of a software
Michael Chan5a6f3072006-03-20 22:28:05 -08004835 * interrupt. Furthermore, IRQ processing runs lockless so we have
4836 * no IRQ context deadlocks to worry about either. Rejoice!
4837 */
Michael Chan1b2a7202006-08-07 21:46:02 -07004838 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004839 if (!netif_queue_stopped(dev)) {
4840 netif_stop_queue(dev);
4841
4842 /* This is a hard error, log it. */
4843 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4844 "queue awake!\n", dev->name);
4845 }
Michael Chan5a6f3072006-03-20 22:28:05 -08004846 return NETDEV_TX_BUSY;
4847 }
4848
4849 entry = tp->tx_prod;
4850 base_flags = 0;
Michael Chan5a6f3072006-03-20 22:28:05 -08004851 mss = 0;
Matt Carlsonc13e3712007-05-05 11:50:04 -07004852 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004853 int tcp_opt_len, ip_tcp_len;
4854
4855 if (skb_header_cloned(skb) &&
4856 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4857 dev_kfree_skb(skb);
4858 goto out_unlock;
4859 }
4860
Michael Chanb0026622006-07-03 19:42:14 -07004861 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4862 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4863 else {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004864 struct iphdr *iph = ip_hdr(skb);
4865
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004866 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -03004867 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
Michael Chanb0026622006-07-03 19:42:14 -07004868
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004869 iph->check = 0;
4870 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
Michael Chanb0026622006-07-03 19:42:14 -07004871 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4872 }
Michael Chan5a6f3072006-03-20 22:28:05 -08004873
4874 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4875 TXD_FLAG_CPU_POST_DMA);
4876
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004877 tcp_hdr(skb)->check = 0;
Michael Chan5a6f3072006-03-20 22:28:05 -08004878
Michael Chan5a6f3072006-03-20 22:28:05 -08004879 }
Patrick McHardy84fa7932006-08-29 16:44:56 -07004880 else if (skb->ip_summed == CHECKSUM_PARTIAL)
Michael Chan5a6f3072006-03-20 22:28:05 -08004881 base_flags |= TXD_FLAG_TCPUDP_CSUM;
Michael Chan5a6f3072006-03-20 22:28:05 -08004882#if TG3_VLAN_TAG_USED
4883 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4884 base_flags |= (TXD_FLAG_VLAN |
4885 (vlan_tx_tag_get(skb) << 16));
4886#endif
4887
David S. Miller90079ce2008-09-11 04:52:51 -07004888 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4889 dev_kfree_skb(skb);
4890 goto out_unlock;
4891 }
4892
4893 sp = skb_shinfo(skb);
4894
4895 mapping = sp->dma_maps[0];
Michael Chan5a6f3072006-03-20 22:28:05 -08004896
4897 tp->tx_buffers[entry].skb = skb;
Michael Chan5a6f3072006-03-20 22:28:05 -08004898
4899 tg3_set_txd(tp, entry, mapping, len, base_flags,
4900 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4901
4902 entry = NEXT_TX(entry);
4903
4904 /* Now loop through additional data fragments, and queue them. */
4905 if (skb_shinfo(skb)->nr_frags > 0) {
4906 unsigned int i, last;
4907
4908 last = skb_shinfo(skb)->nr_frags - 1;
4909 for (i = 0; i <= last; i++) {
4910 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4911
4912 len = frag->size;
David S. Miller90079ce2008-09-11 04:52:51 -07004913 mapping = sp->dma_maps[i + 1];
Michael Chan5a6f3072006-03-20 22:28:05 -08004914 tp->tx_buffers[entry].skb = NULL;
Michael Chan5a6f3072006-03-20 22:28:05 -08004915
4916 tg3_set_txd(tp, entry, mapping, len,
4917 base_flags, (i == last) | (mss << 1));
4918
4919 entry = NEXT_TX(entry);
4920 }
4921 }
4922
4923 /* Packets are ready, update Tx producer idx local and on card. */
4924 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4925
4926 tp->tx_prod = entry;
Michael Chan1b2a7202006-08-07 21:46:02 -07004927 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004928 netif_stop_queue(dev);
Ranjit Manomohan42952232006-10-18 20:54:26 -07004929 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
Michael Chan5a6f3072006-03-20 22:28:05 -08004930 netif_wake_queue(tp->dev);
4931 }
4932
4933out_unlock:
4934 mmiowb();
Michael Chan5a6f3072006-03-20 22:28:05 -08004935
4936 dev->trans_start = jiffies;
4937
4938 return NETDEV_TX_OK;
4939}
4940
Michael Chan52c0fd82006-06-29 20:15:54 -07004941static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4942
4943/* Use GSO to workaround a rare TSO bug that may be triggered when the
4944 * TSO header is greater than 80 bytes.
4945 */
4946static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4947{
4948 struct sk_buff *segs, *nskb;
4949
4950 /* Estimate the number of fragments in the worst case */
Michael Chan1b2a7202006-08-07 21:46:02 -07004951 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
Michael Chan52c0fd82006-06-29 20:15:54 -07004952 netif_stop_queue(tp->dev);
Michael Chan7f62ad52007-02-20 23:25:40 -08004953 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4954 return NETDEV_TX_BUSY;
4955
4956 netif_wake_queue(tp->dev);
Michael Chan52c0fd82006-06-29 20:15:54 -07004957 }
4958
4959 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07004960 if (IS_ERR(segs))
Michael Chan52c0fd82006-06-29 20:15:54 -07004961 goto tg3_tso_bug_end;
4962
4963 do {
4964 nskb = segs;
4965 segs = segs->next;
4966 nskb->next = NULL;
4967 tg3_start_xmit_dma_bug(nskb, tp->dev);
4968 } while (segs);
4969
4970tg3_tso_bug_end:
4971 dev_kfree_skb(skb);
4972
4973 return NETDEV_TX_OK;
4974}
Michael Chan52c0fd82006-06-29 20:15:54 -07004975
Michael Chan5a6f3072006-03-20 22:28:05 -08004976/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4977 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4978 */
4979static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4980{
4981 struct tg3 *tp = netdev_priv(dev);
Michael Chan5a6f3072006-03-20 22:28:05 -08004982 u32 len, entry, base_flags, mss;
David S. Miller90079ce2008-09-11 04:52:51 -07004983 struct skb_shared_info *sp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004984 int would_hit_hwbug;
David S. Miller90079ce2008-09-11 04:52:51 -07004985 dma_addr_t mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004986
4987 len = skb_headlen(skb);
4988
Michael Chan00b70502006-06-17 21:58:45 -07004989 /* We are running in BH disabled context with netif_tx_lock
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004990 * and TX reclaim runs via tp->napi.poll inside of a software
David S. Millerf47c11e2005-06-24 20:18:35 -07004991 * interrupt. Furthermore, IRQ processing runs lockless so we have
4992 * no IRQ context deadlocks to worry about either. Rejoice!
Linus Torvalds1da177e2005-04-16 15:20:36 -07004993 */
Michael Chan1b2a7202006-08-07 21:46:02 -07004994 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
Stephen Hemminger1f064a82005-12-06 17:36:44 -08004995 if (!netif_queue_stopped(dev)) {
4996 netif_stop_queue(dev);
4997
4998 /* This is a hard error, log it. */
4999 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5000 "queue awake!\n", dev->name);
5001 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005002 return NETDEV_TX_BUSY;
5003 }
5004
5005 entry = tp->tx_prod;
5006 base_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07005007 if (skb->ip_summed == CHECKSUM_PARTIAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005008 base_flags |= TXD_FLAG_TCPUDP_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005009 mss = 0;
Matt Carlsonc13e3712007-05-05 11:50:04 -07005010 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005011 struct iphdr *iph;
Michael Chan52c0fd82006-06-29 20:15:54 -07005012 int tcp_opt_len, ip_tcp_len, hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005013
5014 if (skb_header_cloned(skb) &&
5015 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5016 dev_kfree_skb(skb);
5017 goto out_unlock;
5018 }
5019
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07005020 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -03005021 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005022
Michael Chan52c0fd82006-06-29 20:15:54 -07005023 hdr_len = ip_tcp_len + tcp_opt_len;
5024 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
Michael Chan7f62ad52007-02-20 23:25:40 -08005025 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
Michael Chan52c0fd82006-06-29 20:15:54 -07005026 return (tg3_tso_bug(tp, skb));
5027
Linus Torvalds1da177e2005-04-16 15:20:36 -07005028 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5029 TXD_FLAG_CPU_POST_DMA);
5030
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005031 iph = ip_hdr(skb);
5032 iph->check = 0;
5033 iph->tot_len = htons(mss + hdr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005034 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07005035 tcp_hdr(skb)->check = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005036 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07005037 } else
5038 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5039 iph->daddr, 0,
5040 IPPROTO_TCP,
5041 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005042
5043 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
5044 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005045 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005046 int tsflags;
5047
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005048 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005049 mss |= (tsflags << 11);
5050 }
5051 } else {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005052 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005053 int tsflags;
5054
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005055 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005056 base_flags |= tsflags << 12;
5057 }
5058 }
5059 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005060#if TG3_VLAN_TAG_USED
5061 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5062 base_flags |= (TXD_FLAG_VLAN |
5063 (vlan_tx_tag_get(skb) << 16));
5064#endif
5065
David S. Miller90079ce2008-09-11 04:52:51 -07005066 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5067 dev_kfree_skb(skb);
5068 goto out_unlock;
5069 }
5070
5071 sp = skb_shinfo(skb);
5072
5073 mapping = sp->dma_maps[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -07005074
5075 tp->tx_buffers[entry].skb = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005076
5077 would_hit_hwbug = 0;
5078
Matt Carlson41588ba2008-04-19 18:12:33 -07005079 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5080 would_hit_hwbug = 1;
5081 else if (tg3_4g_overflow_test(mapping, len))
Michael Chanc58ec932005-09-17 00:46:27 -07005082 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005083
5084 tg3_set_txd(tp, entry, mapping, len, base_flags,
5085 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5086
5087 entry = NEXT_TX(entry);
5088
5089 /* Now loop through additional data fragments, and queue them. */
5090 if (skb_shinfo(skb)->nr_frags > 0) {
5091 unsigned int i, last;
5092
5093 last = skb_shinfo(skb)->nr_frags - 1;
5094 for (i = 0; i <= last; i++) {
5095 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5096
5097 len = frag->size;
David S. Miller90079ce2008-09-11 04:52:51 -07005098 mapping = sp->dma_maps[i + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07005099
5100 tp->tx_buffers[entry].skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005101
Michael Chanc58ec932005-09-17 00:46:27 -07005102 if (tg3_4g_overflow_test(mapping, len))
5103 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005104
Michael Chan72f2afb2006-03-06 19:28:35 -08005105 if (tg3_40bit_overflow_test(tp, mapping, len))
5106 would_hit_hwbug = 1;
5107
Linus Torvalds1da177e2005-04-16 15:20:36 -07005108 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5109 tg3_set_txd(tp, entry, mapping, len,
5110 base_flags, (i == last)|(mss << 1));
5111 else
5112 tg3_set_txd(tp, entry, mapping, len,
5113 base_flags, (i == last));
5114
5115 entry = NEXT_TX(entry);
5116 }
5117 }
5118
5119 if (would_hit_hwbug) {
5120 u32 last_plus_one = entry;
5121 u32 start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005122
Michael Chanc58ec932005-09-17 00:46:27 -07005123 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5124 start &= (TG3_TX_RING_SIZE - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005125
5126 /* If the workaround fails due to memory/mapping
5127 * failure, silently drop this packet.
5128 */
Michael Chan72f2afb2006-03-06 19:28:35 -08005129 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
Michael Chanc58ec932005-09-17 00:46:27 -07005130 &start, base_flags, mss))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005131 goto out_unlock;
5132
5133 entry = start;
5134 }
5135
5136 /* Packets are ready, update Tx producer idx local and on card. */
5137 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5138
5139 tp->tx_prod = entry;
Michael Chan1b2a7202006-08-07 21:46:02 -07005140 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005141 netif_stop_queue(dev);
Ranjit Manomohan42952232006-10-18 20:54:26 -07005142 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
Michael Chan51b91462005-09-01 17:41:28 -07005143 netif_wake_queue(tp->dev);
5144 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005145
5146out_unlock:
5147 mmiowb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005148
5149 dev->trans_start = jiffies;
5150
5151 return NETDEV_TX_OK;
5152}
5153
5154static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5155 int new_mtu)
5156{
5157 dev->mtu = new_mtu;
5158
Michael Chanef7f5ec2005-07-25 12:32:25 -07005159 if (new_mtu > ETH_DATA_LEN) {
Michael Chana4e2b342005-10-26 15:46:52 -07005160 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chanef7f5ec2005-07-25 12:32:25 -07005161 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5162 ethtool_op_set_tso(dev, 0);
5163 }
5164 else
5165 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5166 } else {
Michael Chana4e2b342005-10-26 15:46:52 -07005167 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chanef7f5ec2005-07-25 12:32:25 -07005168 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
Michael Chan0f893dc2005-07-25 12:30:38 -07005169 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
Michael Chanef7f5ec2005-07-25 12:32:25 -07005170 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005171}
5172
5173static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5174{
5175 struct tg3 *tp = netdev_priv(dev);
Michael Chanb9ec6c12006-07-25 16:37:27 -07005176 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005177
5178 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5179 return -EINVAL;
5180
5181 if (!netif_running(dev)) {
5182 /* We'll just catch it later when the
5183 * device is up'd.
5184 */
5185 tg3_set_mtu(dev, tp, new_mtu);
5186 return 0;
5187 }
5188
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07005189 tg3_phy_stop(tp);
5190
Linus Torvalds1da177e2005-04-16 15:20:36 -07005191 tg3_netif_stop(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07005192
5193 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005194
Michael Chan944d9802005-05-29 14:57:48 -07005195 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005196
5197 tg3_set_mtu(dev, tp, new_mtu);
5198
Michael Chanb9ec6c12006-07-25 16:37:27 -07005199 err = tg3_restart_hw(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005200
Michael Chanb9ec6c12006-07-25 16:37:27 -07005201 if (!err)
5202 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005203
David S. Millerf47c11e2005-06-24 20:18:35 -07005204 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005205
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07005206 if (!err)
5207 tg3_phy_start(tp);
5208
Michael Chanb9ec6c12006-07-25 16:37:27 -07005209 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005210}
5211
5212/* Free up pending packets in all rx/tx rings.
5213 *
5214 * The chip has been shut down and the driver detached from
5215 * the networking, so no interrupts or new tx packets will
5216 * end up in the driver. tp->{tx,}lock is not held and we are not
5217 * in an interrupt context and thus may sleep.
5218 */
5219static void tg3_free_rings(struct tg3 *tp)
5220{
5221 struct ring_info *rxp;
5222 int i;
5223
5224 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5225 rxp = &tp->rx_std_buffers[i];
5226
5227 if (rxp->skb == NULL)
5228 continue;
5229 pci_unmap_single(tp->pdev,
5230 pci_unmap_addr(rxp, mapping),
Michael Chan7e72aad2005-07-25 12:31:17 -07005231 tp->rx_pkt_buf_sz - tp->rx_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005232 PCI_DMA_FROMDEVICE);
5233 dev_kfree_skb_any(rxp->skb);
5234 rxp->skb = NULL;
5235 }
5236
5237 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5238 rxp = &tp->rx_jumbo_buffers[i];
5239
5240 if (rxp->skb == NULL)
5241 continue;
5242 pci_unmap_single(tp->pdev,
5243 pci_unmap_addr(rxp, mapping),
5244 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5245 PCI_DMA_FROMDEVICE);
5246 dev_kfree_skb_any(rxp->skb);
5247 rxp->skb = NULL;
5248 }
5249
5250 for (i = 0; i < TG3_TX_RING_SIZE; ) {
5251 struct tx_ring_info *txp;
5252 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005253
5254 txp = &tp->tx_buffers[i];
5255 skb = txp->skb;
5256
5257 if (skb == NULL) {
5258 i++;
5259 continue;
5260 }
5261
David S. Miller90079ce2008-09-11 04:52:51 -07005262 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5263
Linus Torvalds1da177e2005-04-16 15:20:36 -07005264 txp->skb = NULL;
5265
David S. Miller90079ce2008-09-11 04:52:51 -07005266 i += skb_shinfo(skb)->nr_frags + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005267
5268 dev_kfree_skb_any(skb);
5269 }
5270}
5271
5272/* Initialize tx/rx rings for packet processing.
5273 *
5274 * The chip has been shut down and the driver detached from
5275 * the networking, so no interrupts or new tx packets will
5276 * end up in the driver. tp->{tx,}lock are held and thus
5277 * we may not sleep.
5278 */
Michael Chan32d8c572006-07-25 16:38:29 -07005279static int tg3_init_rings(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005280{
5281 u32 i;
5282
5283 /* Free up all the SKBs. */
5284 tg3_free_rings(tp);
5285
5286 /* Zero out all descriptors. */
5287 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5288 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5289 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5290 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5291
Michael Chan7e72aad2005-07-25 12:31:17 -07005292 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
Michael Chana4e2b342005-10-26 15:46:52 -07005293 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
Michael Chan7e72aad2005-07-25 12:31:17 -07005294 (tp->dev->mtu > ETH_DATA_LEN))
5295 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5296
Linus Torvalds1da177e2005-04-16 15:20:36 -07005297 /* Initialize invariants of the rings, we only set this
5298 * stuff once. This works because the card does not
5299 * write into the rx buffer posting rings.
5300 */
5301 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5302 struct tg3_rx_buffer_desc *rxd;
5303
5304 rxd = &tp->rx_std[i];
Michael Chan7e72aad2005-07-25 12:31:17 -07005305 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005306 << RXD_LEN_SHIFT;
5307 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5308 rxd->opaque = (RXD_OPAQUE_RING_STD |
5309 (i << RXD_OPAQUE_INDEX_SHIFT));
5310 }
5311
Michael Chan0f893dc2005-07-25 12:30:38 -07005312 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005313 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5314 struct tg3_rx_buffer_desc *rxd;
5315
5316 rxd = &tp->rx_jumbo[i];
5317 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5318 << RXD_LEN_SHIFT;
5319 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5320 RXD_FLAG_JUMBO;
5321 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5322 (i << RXD_OPAQUE_INDEX_SHIFT));
5323 }
5324 }
5325
5326 /* Now allocate fresh SKBs for each rx ring. */
5327 for (i = 0; i < tp->rx_pending; i++) {
Michael Chan32d8c572006-07-25 16:38:29 -07005328 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5329 printk(KERN_WARNING PFX
5330 "%s: Using a smaller RX standard ring, "
5331 "only %d out of %d buffers were allocated "
5332 "successfully.\n",
5333 tp->dev->name, i, tp->rx_pending);
5334 if (i == 0)
5335 return -ENOMEM;
5336 tp->rx_pending = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005337 break;
Michael Chan32d8c572006-07-25 16:38:29 -07005338 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005339 }
5340
Michael Chan0f893dc2005-07-25 12:30:38 -07005341 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005342 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5343 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
Michael Chan32d8c572006-07-25 16:38:29 -07005344 -1, i) < 0) {
5345 printk(KERN_WARNING PFX
5346 "%s: Using a smaller RX jumbo ring, "
5347 "only %d out of %d buffers were "
5348 "allocated successfully.\n",
5349 tp->dev->name, i, tp->rx_jumbo_pending);
5350 if (i == 0) {
5351 tg3_free_rings(tp);
5352 return -ENOMEM;
5353 }
5354 tp->rx_jumbo_pending = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005355 break;
Michael Chan32d8c572006-07-25 16:38:29 -07005356 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005357 }
5358 }
Michael Chan32d8c572006-07-25 16:38:29 -07005359 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005360}
5361
5362/*
5363 * Must not be invoked with interrupt sources disabled and
5364 * the hardware shutdown down.
5365 */
5366static void tg3_free_consistent(struct tg3 *tp)
5367{
Jesper Juhlb4558ea2005-10-28 16:53:13 -04005368 kfree(tp->rx_std_buffers);
5369 tp->rx_std_buffers = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005370 if (tp->rx_std) {
5371 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5372 tp->rx_std, tp->rx_std_mapping);
5373 tp->rx_std = NULL;
5374 }
5375 if (tp->rx_jumbo) {
5376 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5377 tp->rx_jumbo, tp->rx_jumbo_mapping);
5378 tp->rx_jumbo = NULL;
5379 }
5380 if (tp->rx_rcb) {
5381 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5382 tp->rx_rcb, tp->rx_rcb_mapping);
5383 tp->rx_rcb = NULL;
5384 }
5385 if (tp->tx_ring) {
5386 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5387 tp->tx_ring, tp->tx_desc_mapping);
5388 tp->tx_ring = NULL;
5389 }
5390 if (tp->hw_status) {
5391 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5392 tp->hw_status, tp->status_mapping);
5393 tp->hw_status = NULL;
5394 }
5395 if (tp->hw_stats) {
5396 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5397 tp->hw_stats, tp->stats_mapping);
5398 tp->hw_stats = NULL;
5399 }
5400}
5401
5402/*
5403 * Must not be invoked with interrupt sources disabled and
5404 * the hardware shutdown down. Can sleep.
5405 */
5406static int tg3_alloc_consistent(struct tg3 *tp)
5407{
Yan Burmanbd2b3342006-12-14 15:25:00 -08005408 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005409 (TG3_RX_RING_SIZE +
5410 TG3_RX_JUMBO_RING_SIZE)) +
5411 (sizeof(struct tx_ring_info) *
5412 TG3_TX_RING_SIZE),
5413 GFP_KERNEL);
5414 if (!tp->rx_std_buffers)
5415 return -ENOMEM;
5416
Linus Torvalds1da177e2005-04-16 15:20:36 -07005417 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5418 tp->tx_buffers = (struct tx_ring_info *)
5419 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5420
5421 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5422 &tp->rx_std_mapping);
5423 if (!tp->rx_std)
5424 goto err_out;
5425
5426 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5427 &tp->rx_jumbo_mapping);
5428
5429 if (!tp->rx_jumbo)
5430 goto err_out;
5431
5432 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5433 &tp->rx_rcb_mapping);
5434 if (!tp->rx_rcb)
5435 goto err_out;
5436
5437 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5438 &tp->tx_desc_mapping);
5439 if (!tp->tx_ring)
5440 goto err_out;
5441
5442 tp->hw_status = pci_alloc_consistent(tp->pdev,
5443 TG3_HW_STATUS_SIZE,
5444 &tp->status_mapping);
5445 if (!tp->hw_status)
5446 goto err_out;
5447
5448 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5449 sizeof(struct tg3_hw_stats),
5450 &tp->stats_mapping);
5451 if (!tp->hw_stats)
5452 goto err_out;
5453
5454 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5455 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5456
5457 return 0;
5458
5459err_out:
5460 tg3_free_consistent(tp);
5461 return -ENOMEM;
5462}
5463
5464#define MAX_WAIT_CNT 1000
5465
5466/* To stop a block, clear the enable bit and poll till it
5467 * clears. tp->lock is held.
5468 */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005469static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005470{
5471 unsigned int i;
5472 u32 val;
5473
5474 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5475 switch (ofs) {
5476 case RCVLSC_MODE:
5477 case DMAC_MODE:
5478 case MBFREE_MODE:
5479 case BUFMGR_MODE:
5480 case MEMARB_MODE:
5481 /* We can't enable/disable these bits of the
5482 * 5705/5750, just say success.
5483 */
5484 return 0;
5485
5486 default:
5487 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005488 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005489 }
5490
5491 val = tr32(ofs);
5492 val &= ~enable_bit;
5493 tw32_f(ofs, val);
5494
5495 for (i = 0; i < MAX_WAIT_CNT; i++) {
5496 udelay(100);
5497 val = tr32(ofs);
5498 if ((val & enable_bit) == 0)
5499 break;
5500 }
5501
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005502 if (i == MAX_WAIT_CNT && !silent) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005503 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5504 "ofs=%lx enable_bit=%x\n",
5505 ofs, enable_bit);
5506 return -ENODEV;
5507 }
5508
5509 return 0;
5510}
5511
5512/* tp->lock is held. */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005513static int tg3_abort_hw(struct tg3 *tp, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005514{
5515 int i, err;
5516
5517 tg3_disable_ints(tp);
5518
5519 tp->rx_mode &= ~RX_MODE_ENABLE;
5520 tw32_f(MAC_RX_MODE, tp->rx_mode);
5521 udelay(10);
5522
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005523 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5524 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5525 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5526 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5527 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5528 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005529
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005530 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5531 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5532 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5533 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5534 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5535 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5536 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005537
5538 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5539 tw32_f(MAC_MODE, tp->mac_mode);
5540 udelay(40);
5541
5542 tp->tx_mode &= ~TX_MODE_ENABLE;
5543 tw32_f(MAC_TX_MODE, tp->tx_mode);
5544
5545 for (i = 0; i < MAX_WAIT_CNT; i++) {
5546 udelay(100);
5547 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5548 break;
5549 }
5550 if (i >= MAX_WAIT_CNT) {
5551 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5552 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5553 tp->dev->name, tr32(MAC_TX_MODE));
Michael Chane6de8ad2005-05-05 14:42:41 -07005554 err |= -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005555 }
5556
Michael Chane6de8ad2005-05-05 14:42:41 -07005557 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005558 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5559 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005560
5561 tw32(FTQ_RESET, 0xffffffff);
5562 tw32(FTQ_RESET, 0x00000000);
5563
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005564 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5565 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005566
5567 if (tp->hw_status)
5568 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5569 if (tp->hw_stats)
5570 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5571
Linus Torvalds1da177e2005-04-16 15:20:36 -07005572 return err;
5573}
5574
5575/* tp->lock is held. */
5576static int tg3_nvram_lock(struct tg3 *tp)
5577{
5578 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5579 int i;
5580
Michael Chanec41c7d2006-01-17 02:40:55 -08005581 if (tp->nvram_lock_cnt == 0) {
5582 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5583 for (i = 0; i < 8000; i++) {
5584 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5585 break;
5586 udelay(20);
5587 }
5588 if (i == 8000) {
5589 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5590 return -ENODEV;
5591 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005592 }
Michael Chanec41c7d2006-01-17 02:40:55 -08005593 tp->nvram_lock_cnt++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005594 }
5595 return 0;
5596}
5597
5598/* tp->lock is held. */
5599static void tg3_nvram_unlock(struct tg3 *tp)
5600{
Michael Chanec41c7d2006-01-17 02:40:55 -08005601 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5602 if (tp->nvram_lock_cnt > 0)
5603 tp->nvram_lock_cnt--;
5604 if (tp->nvram_lock_cnt == 0)
5605 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5606 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005607}
5608
5609/* tp->lock is held. */
Michael Chane6af3012005-04-21 17:12:05 -07005610static void tg3_enable_nvram_access(struct tg3 *tp)
5611{
5612 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5613 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5614 u32 nvaccess = tr32(NVRAM_ACCESS);
5615
5616 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5617 }
5618}
5619
5620/* tp->lock is held. */
5621static void tg3_disable_nvram_access(struct tg3 *tp)
5622{
5623 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5624 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5625 u32 nvaccess = tr32(NVRAM_ACCESS);
5626
5627 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5628 }
5629}
5630
Matt Carlson0d3031d2007-10-10 18:02:43 -07005631static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5632{
5633 int i;
5634 u32 apedata;
5635
5636 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5637 if (apedata != APE_SEG_SIG_MAGIC)
5638 return;
5639
5640 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
Matt Carlson731fd792008-08-15 14:07:51 -07005641 if (!(apedata & APE_FW_STATUS_READY))
Matt Carlson0d3031d2007-10-10 18:02:43 -07005642 return;
5643
5644 /* Wait for up to 1 millisecond for APE to service previous event. */
5645 for (i = 0; i < 10; i++) {
5646 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5647 return;
5648
5649 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5650
5651 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5652 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5653 event | APE_EVENT_STATUS_EVENT_PENDING);
5654
5655 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5656
5657 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5658 break;
5659
5660 udelay(100);
5661 }
5662
5663 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5664 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5665}
5666
5667static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5668{
5669 u32 event;
5670 u32 apedata;
5671
5672 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5673 return;
5674
5675 switch (kind) {
5676 case RESET_KIND_INIT:
5677 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5678 APE_HOST_SEG_SIG_MAGIC);
5679 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5680 APE_HOST_SEG_LEN_MAGIC);
5681 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5682 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5683 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5684 APE_HOST_DRIVER_ID_MAGIC);
5685 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5686 APE_HOST_BEHAV_NO_PHYLOCK);
5687
5688 event = APE_EVENT_STATUS_STATE_START;
5689 break;
5690 case RESET_KIND_SHUTDOWN:
Matt Carlsonb2aee152008-11-03 16:51:11 -08005691 /* With the interface we are currently using,
5692 * APE does not track driver state. Wiping
5693 * out the HOST SEGMENT SIGNATURE forces
5694 * the APE to assume OS absent status.
5695 */
5696 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
5697
Matt Carlson0d3031d2007-10-10 18:02:43 -07005698 event = APE_EVENT_STATUS_STATE_UNLOAD;
5699 break;
5700 case RESET_KIND_SUSPEND:
5701 event = APE_EVENT_STATUS_STATE_SUSPEND;
5702 break;
5703 default:
5704 return;
5705 }
5706
5707 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5708
5709 tg3_ape_send_event(tp, event);
5710}
5711
Michael Chane6af3012005-04-21 17:12:05 -07005712/* tp->lock is held. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005713static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5714{
David S. Millerf49639e2006-06-09 11:58:36 -07005715 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5716 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005717
5718 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5719 switch (kind) {
5720 case RESET_KIND_INIT:
5721 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5722 DRV_STATE_START);
5723 break;
5724
5725 case RESET_KIND_SHUTDOWN:
5726 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5727 DRV_STATE_UNLOAD);
5728 break;
5729
5730 case RESET_KIND_SUSPEND:
5731 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5732 DRV_STATE_SUSPEND);
5733 break;
5734
5735 default:
5736 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005737 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005738 }
Matt Carlson0d3031d2007-10-10 18:02:43 -07005739
5740 if (kind == RESET_KIND_INIT ||
5741 kind == RESET_KIND_SUSPEND)
5742 tg3_ape_driver_state_change(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005743}
5744
5745/* tp->lock is held. */
5746static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5747{
5748 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5749 switch (kind) {
5750 case RESET_KIND_INIT:
5751 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5752 DRV_STATE_START_DONE);
5753 break;
5754
5755 case RESET_KIND_SHUTDOWN:
5756 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5757 DRV_STATE_UNLOAD_DONE);
5758 break;
5759
5760 default:
5761 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005762 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005763 }
Matt Carlson0d3031d2007-10-10 18:02:43 -07005764
5765 if (kind == RESET_KIND_SHUTDOWN)
5766 tg3_ape_driver_state_change(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005767}
5768
5769/* tp->lock is held. */
5770static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5771{
5772 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5773 switch (kind) {
5774 case RESET_KIND_INIT:
5775 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5776 DRV_STATE_START);
5777 break;
5778
5779 case RESET_KIND_SHUTDOWN:
5780 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5781 DRV_STATE_UNLOAD);
5782 break;
5783
5784 case RESET_KIND_SUSPEND:
5785 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5786 DRV_STATE_SUSPEND);
5787 break;
5788
5789 default:
5790 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005791 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005792 }
5793}
5794
Michael Chan7a6f4362006-09-27 16:03:31 -07005795static int tg3_poll_fw(struct tg3 *tp)
5796{
5797 int i;
5798 u32 val;
5799
Michael Chanb5d37722006-09-27 16:06:21 -07005800 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Gary Zambrano0ccead12006-11-14 16:34:00 -08005801 /* Wait up to 20ms for init done. */
5802 for (i = 0; i < 200; i++) {
Michael Chanb5d37722006-09-27 16:06:21 -07005803 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5804 return 0;
Gary Zambrano0ccead12006-11-14 16:34:00 -08005805 udelay(100);
Michael Chanb5d37722006-09-27 16:06:21 -07005806 }
5807 return -ENODEV;
5808 }
5809
Michael Chan7a6f4362006-09-27 16:03:31 -07005810 /* Wait for firmware initialization to complete. */
5811 for (i = 0; i < 100000; i++) {
5812 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5813 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5814 break;
5815 udelay(10);
5816 }
5817
5818 /* Chip might not be fitted with firmware. Some Sun onboard
5819 * parts are configured like that. So don't signal the timeout
5820 * of the above loop as an error, but do report the lack of
5821 * running firmware once.
5822 */
5823 if (i >= 100000 &&
5824 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5825 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5826
5827 printk(KERN_INFO PFX "%s: No firmware running.\n",
5828 tp->dev->name);
5829 }
5830
5831 return 0;
5832}
5833
Michael Chanee6a99b2007-07-18 21:49:10 -07005834/* Save PCI command register before chip reset */
5835static void tg3_save_pci_state(struct tg3 *tp)
5836{
Matt Carlson8a6eac92007-10-21 16:17:55 -07005837 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07005838}
5839
5840/* Restore PCI state after chip reset */
5841static void tg3_restore_pci_state(struct tg3 *tp)
5842{
5843 u32 val;
5844
5845 /* Re-enable indirect register accesses. */
5846 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5847 tp->misc_host_ctrl);
5848
5849 /* Set MAX PCI retry to zero. */
5850 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5851 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5852 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5853 val |= PCISTATE_RETRY_SAME_DMA;
Matt Carlson0d3031d2007-10-10 18:02:43 -07005854 /* Allow reads and writes to the APE register and memory space. */
5855 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5856 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5857 PCISTATE_ALLOW_APE_SHMEM_WR;
Michael Chanee6a99b2007-07-18 21:49:10 -07005858 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5859
Matt Carlson8a6eac92007-10-21 16:17:55 -07005860 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07005861
Matt Carlsonfcb389d2008-11-03 16:55:44 -08005862 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
5863 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5864 pcie_set_readrq(tp->pdev, 4096);
5865 else {
5866 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5867 tp->pci_cacheline_sz);
5868 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5869 tp->pci_lat_timer);
5870 }
Michael Chan114342f2007-10-15 02:12:26 -07005871 }
Matt Carlson5f5c51e2007-11-12 21:19:37 -08005872
Michael Chanee6a99b2007-07-18 21:49:10 -07005873 /* Make sure PCI-X relaxed ordering bit is clear. */
Matt Carlson9974a352007-10-07 23:27:28 -07005874 if (tp->pcix_cap) {
5875 u16 pcix_cmd;
5876
5877 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5878 &pcix_cmd);
5879 pcix_cmd &= ~PCI_X_CMD_ERO;
5880 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5881 pcix_cmd);
5882 }
Michael Chanee6a99b2007-07-18 21:49:10 -07005883
5884 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chanee6a99b2007-07-18 21:49:10 -07005885
5886 /* Chip reset on 5780 will reset MSI enable bit,
5887 * so need to restore it.
5888 */
5889 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5890 u16 ctrl;
5891
5892 pci_read_config_word(tp->pdev,
5893 tp->msi_cap + PCI_MSI_FLAGS,
5894 &ctrl);
5895 pci_write_config_word(tp->pdev,
5896 tp->msi_cap + PCI_MSI_FLAGS,
5897 ctrl | PCI_MSI_FLAGS_ENABLE);
5898 val = tr32(MSGINT_MODE);
5899 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5900 }
5901 }
5902}
5903
Linus Torvalds1da177e2005-04-16 15:20:36 -07005904static void tg3_stop_fw(struct tg3 *);
5905
5906/* tp->lock is held. */
5907static int tg3_chip_reset(struct tg3 *tp)
5908{
5909 u32 val;
Michael Chan1ee582d2005-08-09 20:16:46 -07005910 void (*write_op)(struct tg3 *, u32, u32);
Michael Chan7a6f4362006-09-27 16:03:31 -07005911 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005912
David S. Millerf49639e2006-06-09 11:58:36 -07005913 tg3_nvram_lock(tp);
5914
Matt Carlson158d7ab2008-05-29 01:37:54 -07005915 tg3_mdio_stop(tp);
5916
Matt Carlson77b483f2008-08-15 14:07:24 -07005917 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5918
David S. Millerf49639e2006-06-09 11:58:36 -07005919 /* No matching tg3_nvram_unlock() after this because
5920 * chip reset below will undo the nvram lock.
5921 */
5922 tp->nvram_lock_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005923
Michael Chanee6a99b2007-07-18 21:49:10 -07005924 /* GRC_MISC_CFG core clock reset will clear the memory
5925 * enable bit in PCI register 4 and the MSI enable bit
5926 * on some chips, so we save relevant registers here.
5927 */
5928 tg3_save_pci_state(tp);
5929
Michael Chand9ab5ad2006-03-20 22:27:35 -08005930 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -08005931 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07005932 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07005933 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -07005934 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5935 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chand9ab5ad2006-03-20 22:27:35 -08005936 tw32(GRC_FASTBOOT_PC, 0);
5937
Linus Torvalds1da177e2005-04-16 15:20:36 -07005938 /*
5939 * We must avoid the readl() that normally takes place.
5940 * It locks machines, causes machine checks, and other
5941 * fun things. So, temporarily disable the 5701
5942 * hardware workaround, while we do the reset.
5943 */
Michael Chan1ee582d2005-08-09 20:16:46 -07005944 write_op = tp->write32;
5945 if (write_op == tg3_write_flush_reg32)
5946 tp->write32 = tg3_write32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005947
Michael Chand18edcb2007-03-24 20:57:11 -07005948 /* Prevent the irq handler from reading or writing PCI registers
5949 * during chip reset when the memory enable bit in the PCI command
5950 * register may be cleared. The chip does not generate interrupt
5951 * at this time, but the irq handler may still be called due to irq
5952 * sharing or irqpoll.
5953 */
5954 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
Michael Chanb8fa2f32007-04-06 17:35:37 -07005955 if (tp->hw_status) {
5956 tp->hw_status->status = 0;
5957 tp->hw_status->status_tag = 0;
5958 }
Michael Chand18edcb2007-03-24 20:57:11 -07005959 tp->last_tag = 0;
5960 smp_mb();
5961 synchronize_irq(tp->pdev->irq);
5962
Linus Torvalds1da177e2005-04-16 15:20:36 -07005963 /* do the reset */
5964 val = GRC_MISC_CFG_CORECLK_RESET;
5965
5966 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5967 if (tr32(0x7e2c) == 0x60) {
5968 tw32(0x7e2c, 0x20);
5969 }
5970 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5971 tw32(GRC_MISC_CFG, (1 << 29));
5972 val |= (1 << 29);
5973 }
5974 }
5975
Michael Chanb5d37722006-09-27 16:06:21 -07005976 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5977 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5978 tw32(GRC_VCPU_EXT_CTRL,
5979 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5980 }
5981
Linus Torvalds1da177e2005-04-16 15:20:36 -07005982 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5983 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5984 tw32(GRC_MISC_CFG, val);
5985
Michael Chan1ee582d2005-08-09 20:16:46 -07005986 /* restore 5701 hardware bug workaround write method */
5987 tp->write32 = write_op;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005988
5989 /* Unfortunately, we have to delay before the PCI read back.
5990 * Some 575X chips even will not respond to a PCI cfg access
5991 * when the reset command is given to the chip.
5992 *
5993 * How do these hardware designers expect things to work
5994 * properly if the PCI write is posted for a long period
5995 * of time? It is always necessary to have some method by
5996 * which a register read back can occur to push the write
5997 * out which does the reset.
5998 *
5999 * For most tg3 variants the trick below was working.
6000 * Ho hum...
6001 */
6002 udelay(120);
6003
6004 /* Flush PCI posted writes. The normal MMIO registers
6005 * are inaccessible at this time so this is the only
6006 * way to make this reliably (actually, this is no longer
6007 * the case, see above). I tried to use indirect
6008 * register read/write but this upset some 5701 variants.
6009 */
6010 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6011
6012 udelay(120);
6013
6014 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6015 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6016 int i;
6017 u32 cfg_val;
6018
6019 /* Wait for link training to complete. */
6020 for (i = 0; i < 5000; i++)
6021 udelay(100);
6022
6023 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6024 pci_write_config_dword(tp->pdev, 0xc4,
6025 cfg_val | (1 << 15));
6026 }
Matt Carlsonfcb389d2008-11-03 16:55:44 -08006027 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
6028 /* Set PCIE max payload size and clear error status. */
6029 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006030 }
6031
Michael Chanee6a99b2007-07-18 21:49:10 -07006032 tg3_restore_pci_state(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006033
Michael Chand18edcb2007-03-24 20:57:11 -07006034 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6035
Michael Chanee6a99b2007-07-18 21:49:10 -07006036 val = 0;
6037 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan4cf78e42005-07-25 12:29:19 -07006038 val = tr32(MEMARB_MODE);
Michael Chanee6a99b2007-07-18 21:49:10 -07006039 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006040
6041 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6042 tg3_stop_fw(tp);
6043 tw32(0x5000, 0x400);
6044 }
6045
6046 tw32(GRC_MODE, tp->grc_mode);
6047
6048 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01006049 val = tr32(0xc4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006050
6051 tw32(0xc4, val | (1 << 15));
6052 }
6053
6054 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6055 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6056 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6057 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6058 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6059 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6060 }
6061
6062 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6063 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6064 tw32_f(MAC_MODE, tp->mac_mode);
Michael Chan747e8f82005-07-25 12:33:22 -07006065 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6066 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6067 tw32_f(MAC_MODE, tp->mac_mode);
Matt Carlson3bda1252008-08-15 14:08:22 -07006068 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6069 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6070 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6071 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6072 tw32_f(MAC_MODE, tp->mac_mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006073 } else
6074 tw32_f(MAC_MODE, 0);
6075 udelay(40);
6076
Matt Carlson158d7ab2008-05-29 01:37:54 -07006077 tg3_mdio_start(tp);
6078
Matt Carlson77b483f2008-08-15 14:07:24 -07006079 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6080
Michael Chan7a6f4362006-09-27 16:03:31 -07006081 err = tg3_poll_fw(tp);
6082 if (err)
6083 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006084
6085 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6086 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01006087 val = tr32(0x7c00);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006088
6089 tw32(0x7c00, val | (1 << 25));
6090 }
6091
6092 /* Reprobe ASF enable state. */
6093 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6094 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6095 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6096 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6097 u32 nic_cfg;
6098
6099 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6100 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6101 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
Matt Carlson4ba526c2008-08-15 14:10:04 -07006102 tp->last_event_jiffies = jiffies;
John W. Linvillecbf46852005-04-21 17:01:29 -07006103 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006104 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6105 }
6106 }
6107
6108 return 0;
6109}
6110
6111/* tp->lock is held. */
6112static void tg3_stop_fw(struct tg3 *tp)
6113{
Matt Carlson0d3031d2007-10-10 18:02:43 -07006114 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6115 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Matt Carlson7c5026a2008-05-02 16:49:29 -07006116 /* Wait for RX cpu to ACK the previous event. */
6117 tg3_wait_for_event_ack(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006118
6119 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
Matt Carlson4ba526c2008-08-15 14:10:04 -07006120
6121 tg3_generate_fw_event(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006122
Matt Carlson7c5026a2008-05-02 16:49:29 -07006123 /* Wait for RX cpu to ACK this event. */
6124 tg3_wait_for_event_ack(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006125 }
6126}
6127
6128/* tp->lock is held. */
Michael Chan944d9802005-05-29 14:57:48 -07006129static int tg3_halt(struct tg3 *tp, int kind, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006130{
6131 int err;
6132
6133 tg3_stop_fw(tp);
6134
Michael Chan944d9802005-05-29 14:57:48 -07006135 tg3_write_sig_pre_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006136
David S. Millerb3b7d6b2005-05-05 14:40:20 -07006137 tg3_abort_hw(tp, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006138 err = tg3_chip_reset(tp);
6139
Michael Chan944d9802005-05-29 14:57:48 -07006140 tg3_write_sig_legacy(tp, kind);
6141 tg3_write_sig_post_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006142
6143 if (err)
6144 return err;
6145
6146 return 0;
6147}
6148
6149#define TG3_FW_RELEASE_MAJOR 0x0
6150#define TG3_FW_RELASE_MINOR 0x0
6151#define TG3_FW_RELEASE_FIX 0x0
6152#define TG3_FW_START_ADDR 0x08000000
6153#define TG3_FW_TEXT_ADDR 0x08000000
6154#define TG3_FW_TEXT_LEN 0x9c0
6155#define TG3_FW_RODATA_ADDR 0x080009c0
6156#define TG3_FW_RODATA_LEN 0x60
6157#define TG3_FW_DATA_ADDR 0x08000a40
6158#define TG3_FW_DATA_LEN 0x20
6159#define TG3_FW_SBSS_ADDR 0x08000a60
6160#define TG3_FW_SBSS_LEN 0xc
6161#define TG3_FW_BSS_ADDR 0x08000a70
6162#define TG3_FW_BSS_LEN 0x10
6163
Andreas Mohr50da8592006-08-14 23:54:30 -07006164static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006165 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6166 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6167 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6168 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6169 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6170 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6171 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6172 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6173 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6174 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6175 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6176 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6177 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6178 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6179 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6180 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6181 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6182 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6183 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6184 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6185 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6186 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6187 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6188 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6189 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6190 0, 0, 0, 0, 0, 0,
6191 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6192 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6193 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6194 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6195 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6196 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6197 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6198 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6199 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6200 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6201 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6202 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6203 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6204 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6205 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6206 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6207 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6208 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6209 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6210 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6211 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6212 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6213 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6214 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6215 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6216 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6217 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6218 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6219 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6220 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6221 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6222 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6223 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6224 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6225 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6226 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6227 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6228 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6229 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6230 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6231 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6232 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6233 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6234 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6235 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6236 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6237 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6238 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6239 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6240 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6241 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6242 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6243 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6244 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6245 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6246 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6247 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6248 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6249 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6250 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6251 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6252 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6253 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6254 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6255 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6256};
6257
Andreas Mohr50da8592006-08-14 23:54:30 -07006258static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006259 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6260 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6261 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6262 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6263 0x00000000
6264};
6265
6266#if 0 /* All zeros, don't eat up space with it. */
6267u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6268 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6269 0x00000000, 0x00000000, 0x00000000, 0x00000000
6270};
6271#endif
6272
6273#define RX_CPU_SCRATCH_BASE 0x30000
6274#define RX_CPU_SCRATCH_SIZE 0x04000
6275#define TX_CPU_SCRATCH_BASE 0x34000
6276#define TX_CPU_SCRATCH_SIZE 0x04000
6277
6278/* tp->lock is held. */
6279static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6280{
6281 int i;
6282
Eric Sesterhenn5d9428d2006-04-02 13:52:48 +02006283 BUG_ON(offset == TX_CPU_BASE &&
6284 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006285
Michael Chanb5d37722006-09-27 16:06:21 -07006286 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6287 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6288
6289 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6290 return 0;
6291 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006292 if (offset == RX_CPU_BASE) {
6293 for (i = 0; i < 10000; i++) {
6294 tw32(offset + CPU_STATE, 0xffffffff);
6295 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6296 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6297 break;
6298 }
6299
6300 tw32(offset + CPU_STATE, 0xffffffff);
6301 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
6302 udelay(10);
6303 } else {
6304 for (i = 0; i < 10000; i++) {
6305 tw32(offset + CPU_STATE, 0xffffffff);
6306 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6307 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6308 break;
6309 }
6310 }
6311
6312 if (i >= 10000) {
6313 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6314 "and %s CPU\n",
6315 tp->dev->name,
6316 (offset == RX_CPU_BASE ? "RX" : "TX"));
6317 return -ENODEV;
6318 }
Michael Chanec41c7d2006-01-17 02:40:55 -08006319
6320 /* Clear firmware's nvram arbitration. */
6321 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6322 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006323 return 0;
6324}
6325
6326struct fw_info {
6327 unsigned int text_base;
6328 unsigned int text_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006329 const u32 *text_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006330 unsigned int rodata_base;
6331 unsigned int rodata_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006332 const u32 *rodata_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006333 unsigned int data_base;
6334 unsigned int data_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006335 const u32 *data_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006336};
6337
6338/* tp->lock is held. */
6339static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6340 int cpu_scratch_size, struct fw_info *info)
6341{
Michael Chanec41c7d2006-01-17 02:40:55 -08006342 int err, lock_err, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006343 void (*write_op)(struct tg3 *, u32, u32);
6344
6345 if (cpu_base == TX_CPU_BASE &&
6346 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6347 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6348 "TX cpu firmware on %s which is 5705.\n",
6349 tp->dev->name);
6350 return -EINVAL;
6351 }
6352
6353 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6354 write_op = tg3_write_mem;
6355 else
6356 write_op = tg3_write_indirect_reg32;
6357
Michael Chan1b628152005-05-29 14:59:49 -07006358 /* It is possible that bootcode is still loading at this point.
6359 * Get the nvram lock first before halting the cpu.
6360 */
Michael Chanec41c7d2006-01-17 02:40:55 -08006361 lock_err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006362 err = tg3_halt_cpu(tp, cpu_base);
Michael Chanec41c7d2006-01-17 02:40:55 -08006363 if (!lock_err)
6364 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006365 if (err)
6366 goto out;
6367
6368 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6369 write_op(tp, cpu_scratch_base + i, 0);
6370 tw32(cpu_base + CPU_STATE, 0xffffffff);
6371 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6372 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6373 write_op(tp, (cpu_scratch_base +
6374 (info->text_base & 0xffff) +
6375 (i * sizeof(u32))),
6376 (info->text_data ?
6377 info->text_data[i] : 0));
6378 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6379 write_op(tp, (cpu_scratch_base +
6380 (info->rodata_base & 0xffff) +
6381 (i * sizeof(u32))),
6382 (info->rodata_data ?
6383 info->rodata_data[i] : 0));
6384 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6385 write_op(tp, (cpu_scratch_base +
6386 (info->data_base & 0xffff) +
6387 (i * sizeof(u32))),
6388 (info->data_data ?
6389 info->data_data[i] : 0));
6390
6391 err = 0;
6392
6393out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006394 return err;
6395}
6396
6397/* tp->lock is held. */
6398static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6399{
6400 struct fw_info info;
6401 int err, i;
6402
6403 info.text_base = TG3_FW_TEXT_ADDR;
6404 info.text_len = TG3_FW_TEXT_LEN;
6405 info.text_data = &tg3FwText[0];
6406 info.rodata_base = TG3_FW_RODATA_ADDR;
6407 info.rodata_len = TG3_FW_RODATA_LEN;
6408 info.rodata_data = &tg3FwRodata[0];
6409 info.data_base = TG3_FW_DATA_ADDR;
6410 info.data_len = TG3_FW_DATA_LEN;
6411 info.data_data = NULL;
6412
6413 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6414 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6415 &info);
6416 if (err)
6417 return err;
6418
6419 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6420 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6421 &info);
6422 if (err)
6423 return err;
6424
6425 /* Now startup only the RX cpu. */
6426 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6427 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6428
6429 for (i = 0; i < 5; i++) {
6430 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6431 break;
6432 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6433 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
6434 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6435 udelay(1000);
6436 }
6437 if (i >= 5) {
6438 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6439 "to set RX CPU PC, is %08x should be %08x\n",
6440 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6441 TG3_FW_TEXT_ADDR);
6442 return -ENODEV;
6443 }
6444 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6445 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
6446
6447 return 0;
6448}
6449
Linus Torvalds1da177e2005-04-16 15:20:36 -07006450
6451#define TG3_TSO_FW_RELEASE_MAJOR 0x1
6452#define TG3_TSO_FW_RELASE_MINOR 0x6
6453#define TG3_TSO_FW_RELEASE_FIX 0x0
6454#define TG3_TSO_FW_START_ADDR 0x08000000
6455#define TG3_TSO_FW_TEXT_ADDR 0x08000000
6456#define TG3_TSO_FW_TEXT_LEN 0x1aa0
6457#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
6458#define TG3_TSO_FW_RODATA_LEN 0x60
6459#define TG3_TSO_FW_DATA_ADDR 0x08001b20
6460#define TG3_TSO_FW_DATA_LEN 0x30
6461#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
6462#define TG3_TSO_FW_SBSS_LEN 0x2c
6463#define TG3_TSO_FW_BSS_ADDR 0x08001b80
6464#define TG3_TSO_FW_BSS_LEN 0x894
6465
Andreas Mohr50da8592006-08-14 23:54:30 -07006466static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006467 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6468 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6469 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6470 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6471 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6472 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6473 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6474 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6475 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6476 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6477 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6478 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6479 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6480 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6481 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6482 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6483 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6484 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6485 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6486 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6487 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6488 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6489 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6490 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6491 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6492 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6493 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6494 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6495 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6496 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6497 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6498 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6499 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6500 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6501 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6502 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6503 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6504 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6505 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6506 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6507 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6508 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6509 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6510 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6511 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6512 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6513 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6514 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6515 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6516 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6517 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6518 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6519 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6520 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6521 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6522 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6523 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6524 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6525 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6526 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6527 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6528 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6529 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6530 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6531 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6532 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6533 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6534 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6535 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6536 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6537 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6538 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6539 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6540 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6541 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6542 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6543 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6544 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6545 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6546 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6547 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6548 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6549 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6550 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6551 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6552 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6553 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6554 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6555 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6556 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6557 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6558 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6559 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6560 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6561 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6562 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6563 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6564 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6565 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6566 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6567 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6568 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6569 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6570 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6571 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6572 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6573 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6574 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6575 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6576 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6577 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6578 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6579 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6580 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6581 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6582 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6583 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6584 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6585 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6586 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6587 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6588 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6589 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6590 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6591 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6592 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6593 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6594 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6595 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6596 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6597 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6598 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6599 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6600 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6601 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6602 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6603 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6604 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6605 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6606 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6607 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6608 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6609 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6610 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6611 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6612 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6613 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6614 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6615 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6616 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6617 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6618 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6619 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6620 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6621 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6622 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6623 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6624 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6625 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6626 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6627 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6628 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6629 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6630 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6631 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6632 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6633 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6634 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6635 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6636 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6637 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6638 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6639 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6640 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6641 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6642 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6643 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6644 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6645 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6646 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6647 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6648 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6649 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6650 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6651 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6652 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6653 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6654 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6655 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6656 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6657 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6658 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6659 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6660 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6661 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6662 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6663 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6664 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6665 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6666 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6667 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6668 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6669 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6670 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6671 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6672 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6673 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6674 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6675 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6676 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6677 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6678 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6679 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6680 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6681 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6682 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6683 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6684 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6685 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6686 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6687 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6688 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6689 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6690 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6691 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6692 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6693 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6694 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6695 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6696 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6697 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6698 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6699 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6700 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6701 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6702 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6703 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6704 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6705 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6706 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6707 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6708 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6709 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6710 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6711 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6712 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6713 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6714 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6715 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6716 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6717 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6718 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6719 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6720 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6721 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6722 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6723 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6724 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6725 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6726 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6727 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6728 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6729 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6730 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6731 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6732 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6733 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6734 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6735 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6736 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6737 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6738 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6739 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6740 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6741 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6742 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6743 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6744 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6745 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6746 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6747 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6748 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6749 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6750 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6751};
6752
Andreas Mohr50da8592006-08-14 23:54:30 -07006753static const u32 tg3TsoFwRodata[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006754 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6755 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6756 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6757 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6758 0x00000000,
6759};
6760
Andreas Mohr50da8592006-08-14 23:54:30 -07006761static const u32 tg3TsoFwData[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006762 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6763 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6764 0x00000000,
6765};
6766
6767/* 5705 needs a special version of the TSO firmware. */
6768#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
6769#define TG3_TSO5_FW_RELASE_MINOR 0x2
6770#define TG3_TSO5_FW_RELEASE_FIX 0x0
6771#define TG3_TSO5_FW_START_ADDR 0x00010000
6772#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
6773#define TG3_TSO5_FW_TEXT_LEN 0xe90
6774#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
6775#define TG3_TSO5_FW_RODATA_LEN 0x50
6776#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
6777#define TG3_TSO5_FW_DATA_LEN 0x20
6778#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
6779#define TG3_TSO5_FW_SBSS_LEN 0x28
6780#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
6781#define TG3_TSO5_FW_BSS_LEN 0x88
6782
Andreas Mohr50da8592006-08-14 23:54:30 -07006783static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006784 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6785 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6786 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6787 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6788 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6789 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6790 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6791 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6792 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6793 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6794 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6795 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6796 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6797 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6798 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6799 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6800 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6801 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6802 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6803 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6804 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6805 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6806 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6807 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6808 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6809 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6810 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6811 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6812 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6813 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6814 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6815 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6816 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6817 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6818 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6819 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6820 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6821 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6822 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6823 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6824 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6825 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6826 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6827 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6828 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6829 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6830 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6831 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6832 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6833 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6834 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6835 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6836 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6837 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6838 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6839 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6840 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6841 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6842 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6843 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6844 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6845 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6846 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6847 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6848 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6849 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6850 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6851 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6852 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6853 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6854 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6855 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6856 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6857 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6858 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6859 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6860 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6861 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6862 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6863 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6864 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6865 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6866 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6867 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6868 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6869 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6870 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6871 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6872 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6873 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6874 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6875 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6876 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6877 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6878 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6879 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6880 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6881 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6882 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6883 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6884 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6885 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6886 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6887 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6888 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6889 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6890 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6891 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6892 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6893 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6894 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6895 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6896 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6897 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6898 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6899 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6900 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6901 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6902 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6903 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6904 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6905 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6906 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6907 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6908 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6909 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6910 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6911 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6912 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6913 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6914 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6915 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6916 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6917 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6918 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6919 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6920 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6921 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6922 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6923 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6924 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6925 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6926 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6927 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6928 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6929 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6930 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6931 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6932 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6933 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6934 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6935 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6936 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6937 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6938 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6939 0x00000000, 0x00000000, 0x00000000,
6940};
6941
Andreas Mohr50da8592006-08-14 23:54:30 -07006942static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006943 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6944 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6945 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6946 0x00000000, 0x00000000, 0x00000000,
6947};
6948
Andreas Mohr50da8592006-08-14 23:54:30 -07006949static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006950 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6951 0x00000000, 0x00000000, 0x00000000,
6952};
6953
6954/* tp->lock is held. */
6955static int tg3_load_tso_firmware(struct tg3 *tp)
6956{
6957 struct fw_info info;
6958 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6959 int err, i;
6960
6961 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6962 return 0;
6963
6964 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6965 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6966 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6967 info.text_data = &tg3Tso5FwText[0];
6968 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6969 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6970 info.rodata_data = &tg3Tso5FwRodata[0];
6971 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6972 info.data_len = TG3_TSO5_FW_DATA_LEN;
6973 info.data_data = &tg3Tso5FwData[0];
6974 cpu_base = RX_CPU_BASE;
6975 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6976 cpu_scratch_size = (info.text_len +
6977 info.rodata_len +
6978 info.data_len +
6979 TG3_TSO5_FW_SBSS_LEN +
6980 TG3_TSO5_FW_BSS_LEN);
6981 } else {
6982 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6983 info.text_len = TG3_TSO_FW_TEXT_LEN;
6984 info.text_data = &tg3TsoFwText[0];
6985 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6986 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6987 info.rodata_data = &tg3TsoFwRodata[0];
6988 info.data_base = TG3_TSO_FW_DATA_ADDR;
6989 info.data_len = TG3_TSO_FW_DATA_LEN;
6990 info.data_data = &tg3TsoFwData[0];
6991 cpu_base = TX_CPU_BASE;
6992 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6993 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6994 }
6995
6996 err = tg3_load_firmware_cpu(tp, cpu_base,
6997 cpu_scratch_base, cpu_scratch_size,
6998 &info);
6999 if (err)
7000 return err;
7001
7002 /* Now startup the cpu. */
7003 tw32(cpu_base + CPU_STATE, 0xffffffff);
7004 tw32_f(cpu_base + CPU_PC, info.text_base);
7005
7006 for (i = 0; i < 5; i++) {
7007 if (tr32(cpu_base + CPU_PC) == info.text_base)
7008 break;
7009 tw32(cpu_base + CPU_STATE, 0xffffffff);
7010 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7011 tw32_f(cpu_base + CPU_PC, info.text_base);
7012 udelay(1000);
7013 }
7014 if (i >= 5) {
7015 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
7016 "to set CPU PC, is %08x should be %08x\n",
7017 tp->dev->name, tr32(cpu_base + CPU_PC),
7018 info.text_base);
7019 return -ENODEV;
7020 }
7021 tw32(cpu_base + CPU_STATE, 0xffffffff);
7022 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7023 return 0;
7024}
7025
Linus Torvalds1da177e2005-04-16 15:20:36 -07007026
Linus Torvalds1da177e2005-04-16 15:20:36 -07007027static int tg3_set_mac_addr(struct net_device *dev, void *p)
7028{
7029 struct tg3 *tp = netdev_priv(dev);
7030 struct sockaddr *addr = p;
Michael Chan986e0ae2007-05-05 12:10:20 -07007031 int err = 0, skip_mac_1 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007032
Michael Chanf9804dd2005-09-27 12:13:10 -07007033 if (!is_valid_ether_addr(addr->sa_data))
7034 return -EINVAL;
7035
Linus Torvalds1da177e2005-04-16 15:20:36 -07007036 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7037
Michael Chane75f7c92006-03-20 21:33:26 -08007038 if (!netif_running(dev))
7039 return 0;
7040
Michael Chan58712ef2006-04-29 18:58:01 -07007041 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
Michael Chan986e0ae2007-05-05 12:10:20 -07007042 u32 addr0_high, addr0_low, addr1_high, addr1_low;
Michael Chan58712ef2006-04-29 18:58:01 -07007043
Michael Chan986e0ae2007-05-05 12:10:20 -07007044 addr0_high = tr32(MAC_ADDR_0_HIGH);
7045 addr0_low = tr32(MAC_ADDR_0_LOW);
7046 addr1_high = tr32(MAC_ADDR_1_HIGH);
7047 addr1_low = tr32(MAC_ADDR_1_LOW);
7048
7049 /* Skip MAC addr 1 if ASF is using it. */
7050 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7051 !(addr1_high == 0 && addr1_low == 0))
7052 skip_mac_1 = 1;
Michael Chan58712ef2006-04-29 18:58:01 -07007053 }
Michael Chan986e0ae2007-05-05 12:10:20 -07007054 spin_lock_bh(&tp->lock);
7055 __tg3_set_mac_addr(tp, skip_mac_1);
7056 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007057
Michael Chanb9ec6c12006-07-25 16:37:27 -07007058 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007059}
7060
7061/* tp->lock is held. */
7062static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7063 dma_addr_t mapping, u32 maxlen_flags,
7064 u32 nic_addr)
7065{
7066 tg3_write_mem(tp,
7067 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7068 ((u64) mapping >> 32));
7069 tg3_write_mem(tp,
7070 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7071 ((u64) mapping & 0xffffffff));
7072 tg3_write_mem(tp,
7073 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7074 maxlen_flags);
7075
7076 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7077 tg3_write_mem(tp,
7078 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7079 nic_addr);
7080}
7081
7082static void __tg3_set_rx_mode(struct net_device *);
Michael Chand244c892005-07-05 14:42:33 -07007083static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
David S. Miller15f98502005-05-18 22:49:26 -07007084{
7085 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7086 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7087 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7088 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7089 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7090 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7091 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7092 }
7093 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7094 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7095 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7096 u32 val = ec->stats_block_coalesce_usecs;
7097
7098 if (!netif_carrier_ok(tp->dev))
7099 val = 0;
7100
7101 tw32(HOSTCC_STAT_COAL_TICKS, val);
7102 }
7103}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007104
7105/* tp->lock is held. */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007106static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007107{
7108 u32 val, rdmac_mode;
7109 int i, err, limit;
7110
7111 tg3_disable_ints(tp);
7112
7113 tg3_stop_fw(tp);
7114
7115 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7116
7117 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
Michael Chane6de8ad2005-05-05 14:42:41 -07007118 tg3_abort_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007119 }
7120
Matt Carlsondd477002008-05-25 23:45:58 -07007121 if (reset_phy &&
7122 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
Michael Chand4d2c552006-03-20 17:47:20 -08007123 tg3_phy_reset(tp);
7124
Linus Torvalds1da177e2005-04-16 15:20:36 -07007125 err = tg3_chip_reset(tp);
7126 if (err)
7127 return err;
7128
7129 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7130
Matt Carlsonbcb37f62008-11-03 16:52:09 -08007131 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07007132 val = tr32(TG3_CPMU_CTRL);
7133 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7134 tw32(TG3_CPMU_CTRL, val);
Matt Carlson9acb9612007-11-12 21:10:06 -08007135
7136 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7137 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7138 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7139 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7140
7141 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7142 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7143 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7144 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7145
7146 val = tr32(TG3_CPMU_HST_ACC);
7147 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7148 val |= CPMU_HST_ACC_MACCLK_6_25;
7149 tw32(TG3_CPMU_HST_ACC, val);
Matt Carlsond30cdd22007-10-07 23:28:35 -07007150 }
7151
Linus Torvalds1da177e2005-04-16 15:20:36 -07007152 /* This works around an issue with Athlon chipsets on
7153 * B3 tigon3 silicon. This bit has no effect on any
7154 * other revision. But do not set this on PCI Express
Matt Carlson795d01c2007-10-07 23:28:17 -07007155 * chips and don't even touch the clocks if the CPMU is present.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007156 */
Matt Carlson795d01c2007-10-07 23:28:17 -07007157 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7158 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7159 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7160 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7161 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007162
7163 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7164 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7165 val = tr32(TG3PCI_PCISTATE);
7166 val |= PCISTATE_RETRY_SAME_DMA;
7167 tw32(TG3PCI_PCISTATE, val);
7168 }
7169
Matt Carlson0d3031d2007-10-10 18:02:43 -07007170 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7171 /* Allow reads and writes to the
7172 * APE register and memory space.
7173 */
7174 val = tr32(TG3PCI_PCISTATE);
7175 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7176 PCISTATE_ALLOW_APE_SHMEM_WR;
7177 tw32(TG3PCI_PCISTATE, val);
7178 }
7179
Linus Torvalds1da177e2005-04-16 15:20:36 -07007180 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7181 /* Enable some hw fixes. */
7182 val = tr32(TG3PCI_MSI_DATA);
7183 val |= (1 << 26) | (1 << 28) | (1 << 29);
7184 tw32(TG3PCI_MSI_DATA, val);
7185 }
7186
7187 /* Descriptor ring init may make accesses to the
7188 * NIC SRAM area to setup the TX descriptors, so we
7189 * can only do this after the hardware has been
7190 * successfully reset.
7191 */
Michael Chan32d8c572006-07-25 16:38:29 -07007192 err = tg3_init_rings(tp);
7193 if (err)
7194 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007195
Matt Carlson9936bcf2007-10-10 18:03:07 -07007196 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
Matt Carlsonfcb389d2008-11-03 16:55:44 -08007197 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07007198 /* This value is determined during the probe time DMA
7199 * engine test, tg3_test_dma.
7200 */
7201 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7202 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007203
7204 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7205 GRC_MODE_4X_NIC_SEND_RINGS |
7206 GRC_MODE_NO_TX_PHDR_CSUM |
7207 GRC_MODE_NO_RX_PHDR_CSUM);
7208 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
Michael Chand2d746f2006-04-06 21:45:39 -07007209
7210 /* Pseudo-header checksum is done by hardware logic and not
7211 * the offload processers, so make the chip do the pseudo-
7212 * header checksums on receive. For transmit it is more
7213 * convenient to do the pseudo-header checksum in software
7214 * as Linux does that on transmit for us in all cases.
7215 */
7216 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007217
7218 tw32(GRC_MODE,
7219 tp->grc_mode |
7220 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7221
7222 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7223 val = tr32(GRC_MISC_CFG);
7224 val &= ~0xff;
7225 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7226 tw32(GRC_MISC_CFG, val);
7227
7228 /* Initialize MBUF/DESC pool. */
John W. Linvillecbf46852005-04-21 17:01:29 -07007229 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007230 /* Do nothing. */
7231 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7232 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7233 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7234 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7235 else
7236 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7237 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7238 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7239 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007240 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7241 int fw_len;
7242
7243 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7244 TG3_TSO5_FW_RODATA_LEN +
7245 TG3_TSO5_FW_DATA_LEN +
7246 TG3_TSO5_FW_SBSS_LEN +
7247 TG3_TSO5_FW_BSS_LEN);
7248 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7249 tw32(BUFMGR_MB_POOL_ADDR,
7250 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7251 tw32(BUFMGR_MB_POOL_SIZE,
7252 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007254
Michael Chan0f893dc2005-07-25 12:30:38 -07007255 if (tp->dev->mtu <= ETH_DATA_LEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007256 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7257 tp->bufmgr_config.mbuf_read_dma_low_water);
7258 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7259 tp->bufmgr_config.mbuf_mac_rx_low_water);
7260 tw32(BUFMGR_MB_HIGH_WATER,
7261 tp->bufmgr_config.mbuf_high_water);
7262 } else {
7263 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7264 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7265 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7266 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7267 tw32(BUFMGR_MB_HIGH_WATER,
7268 tp->bufmgr_config.mbuf_high_water_jumbo);
7269 }
7270 tw32(BUFMGR_DMA_LOW_WATER,
7271 tp->bufmgr_config.dma_low_water);
7272 tw32(BUFMGR_DMA_HIGH_WATER,
7273 tp->bufmgr_config.dma_high_water);
7274
7275 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7276 for (i = 0; i < 2000; i++) {
7277 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7278 break;
7279 udelay(10);
7280 }
7281 if (i >= 2000) {
7282 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7283 tp->dev->name);
7284 return -ENODEV;
7285 }
7286
7287 /* Setup replenish threshold. */
Michael Chanf92905d2006-06-29 20:14:29 -07007288 val = tp->rx_pending / 8;
7289 if (val == 0)
7290 val = 1;
7291 else if (val > tp->rx_std_max_post)
7292 val = tp->rx_std_max_post;
Michael Chanb5d37722006-09-27 16:06:21 -07007293 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7294 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7295 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7296
7297 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7298 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7299 }
Michael Chanf92905d2006-06-29 20:14:29 -07007300
7301 tw32(RCVBDI_STD_THRESH, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007302
7303 /* Initialize TG3_BDINFO's at:
7304 * RCVDBDI_STD_BD: standard eth size rx ring
7305 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7306 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7307 *
7308 * like so:
7309 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7310 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7311 * ring attribute flags
7312 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7313 *
7314 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7315 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7316 *
7317 * The size of each ring is fixed in the firmware, but the location is
7318 * configurable.
7319 */
7320 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7321 ((u64) tp->rx_std_mapping >> 32));
7322 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7323 ((u64) tp->rx_std_mapping & 0xffffffff));
7324 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7325 NIC_SRAM_RX_BUFFER_DESC);
7326
7327 /* Don't even try to program the JUMBO/MINI buffer descriptor
7328 * configs on 5705.
7329 */
7330 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7331 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7332 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7333 } else {
7334 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7335 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7336
7337 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7338 BDINFO_FLAGS_DISABLED);
7339
7340 /* Setup replenish threshold. */
7341 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7342
Michael Chan0f893dc2005-07-25 12:30:38 -07007343 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007344 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7345 ((u64) tp->rx_jumbo_mapping >> 32));
7346 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7347 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7348 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7349 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7350 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7351 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7352 } else {
7353 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7354 BDINFO_FLAGS_DISABLED);
7355 }
7356
7357 }
7358
7359 /* There is only one send ring on 5705/5750, no need to explicitly
7360 * disable the others.
7361 */
7362 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7363 /* Clear out send RCB ring in SRAM. */
7364 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7365 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7366 BDINFO_FLAGS_DISABLED);
7367 }
7368
7369 tp->tx_prod = 0;
7370 tp->tx_cons = 0;
7371 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7372 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7373
7374 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7375 tp->tx_desc_mapping,
7376 (TG3_TX_RING_SIZE <<
7377 BDINFO_FLAGS_MAXLEN_SHIFT),
7378 NIC_SRAM_TX_BUFFER_DESC);
7379
7380 /* There is only one receive return ring on 5705/5750, no need
7381 * to explicitly disable the others.
7382 */
7383 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7384 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7385 i += TG3_BDINFO_SIZE) {
7386 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7387 BDINFO_FLAGS_DISABLED);
7388 }
7389 }
7390
7391 tp->rx_rcb_ptr = 0;
7392 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7393
7394 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7395 tp->rx_rcb_mapping,
7396 (TG3_RX_RCB_RING_SIZE(tp) <<
7397 BDINFO_FLAGS_MAXLEN_SHIFT),
7398 0);
7399
7400 tp->rx_std_ptr = tp->rx_pending;
7401 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7402 tp->rx_std_ptr);
7403
Michael Chan0f893dc2005-07-25 12:30:38 -07007404 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
Linus Torvalds1da177e2005-04-16 15:20:36 -07007405 tp->rx_jumbo_pending : 0;
7406 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7407 tp->rx_jumbo_ptr);
7408
7409 /* Initialize MAC address and backoff seed. */
Michael Chan986e0ae2007-05-05 12:10:20 -07007410 __tg3_set_mac_addr(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007411
7412 /* MTU + ethernet header + FCS + optional VLAN tag */
7413 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7414
7415 /* The slot time is changed by tg3_setup_phy if we
7416 * run at gigabit with half duplex.
7417 */
7418 tw32(MAC_TX_LENGTHS,
7419 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7420 (6 << TX_LENGTHS_IPG_SHIFT) |
7421 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7422
7423 /* Receive rules. */
7424 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7425 tw32(RCVLPC_CONFIG, 0x0181);
7426
7427 /* Calculate RDMAC_MODE setting early, we need it to determine
7428 * the RCVLPC_STATE_ENABLE mask.
7429 */
7430 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7431 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7432 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7433 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7434 RDMAC_MODE_LNGREAD_ENAB);
Michael Chan85e94ce2005-04-21 17:05:28 -07007435
Matt Carlson57e69832008-05-25 23:48:31 -07007436 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7437 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsond30cdd22007-10-07 23:28:35 -07007438 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7439 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7440 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7441
Michael Chan85e94ce2005-04-21 17:05:28 -07007442 /* If statement applies to 5705 and 5750 PCI devices only */
7443 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7444 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7445 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007446 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
Matt Carlsonc13e3712007-05-05 11:50:04 -07007447 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007448 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7449 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7450 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7451 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7452 }
7453 }
7454
Michael Chan85e94ce2005-04-21 17:05:28 -07007455 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7456 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7457
Linus Torvalds1da177e2005-04-16 15:20:36 -07007458 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7459 rdmac_mode |= (1 << 27);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007460
7461 /* Receive/send statistics. */
Michael Chan16613942006-06-29 20:15:13 -07007462 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7463 val = tr32(RCVLPC_STATS_ENABLE);
7464 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7465 tw32(RCVLPC_STATS_ENABLE, val);
7466 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7467 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007468 val = tr32(RCVLPC_STATS_ENABLE);
7469 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7470 tw32(RCVLPC_STATS_ENABLE, val);
7471 } else {
7472 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7473 }
7474 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7475 tw32(SNDDATAI_STATSENAB, 0xffffff);
7476 tw32(SNDDATAI_STATSCTRL,
7477 (SNDDATAI_SCTRL_ENABLE |
7478 SNDDATAI_SCTRL_FASTUPD));
7479
7480 /* Setup host coalescing engine. */
7481 tw32(HOSTCC_MODE, 0);
7482 for (i = 0; i < 2000; i++) {
7483 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7484 break;
7485 udelay(10);
7486 }
7487
Michael Chand244c892005-07-05 14:42:33 -07007488 __tg3_set_coalesce(tp, &tp->coal);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007489
7490 /* set status block DMA address */
7491 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7492 ((u64) tp->status_mapping >> 32));
7493 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7494 ((u64) tp->status_mapping & 0xffffffff));
7495
7496 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7497 /* Status/statistics block address. See tg3_timer,
7498 * the tg3_periodic_fetch_stats call there, and
7499 * tg3_get_stats to see how this works for 5705/5750 chips.
7500 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007501 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7502 ((u64) tp->stats_mapping >> 32));
7503 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7504 ((u64) tp->stats_mapping & 0xffffffff));
7505 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7506 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7507 }
7508
7509 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7510
7511 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7512 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7513 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7514 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7515
7516 /* Clear statistics/status block in chip, and status block in ram. */
7517 for (i = NIC_SRAM_STATS_BLK;
7518 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7519 i += sizeof(u32)) {
7520 tg3_write_mem(tp, i, 0);
7521 udelay(40);
7522 }
7523 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7524
Michael Chanc94e3942005-09-27 12:12:42 -07007525 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7526 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7527 /* reset to prevent losing 1st rx packet intermittently */
7528 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7529 udelay(10);
7530 }
7531
Matt Carlson3bda1252008-08-15 14:08:22 -07007532 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7533 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7534 else
7535 tp->mac_mode = 0;
7536 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
Linus Torvalds1da177e2005-04-16 15:20:36 -07007537 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07007538 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7539 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7540 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7541 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007542 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7543 udelay(40);
7544
Michael Chan314fba32005-04-21 17:07:04 -07007545 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
Michael Chan9d26e212006-12-07 00:21:14 -08007546 * If TG3_FLG2_IS_NIC is zero, we should read the
Michael Chan314fba32005-04-21 17:07:04 -07007547 * register to preserve the GPIO settings for LOMs. The GPIOs,
7548 * whether used as inputs or outputs, are set by boot code after
7549 * reset.
7550 */
Michael Chan9d26e212006-12-07 00:21:14 -08007551 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
Michael Chan314fba32005-04-21 17:07:04 -07007552 u32 gpio_mask;
7553
Michael Chan9d26e212006-12-07 00:21:14 -08007554 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7555 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7556 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chan3e7d83b2005-04-21 17:10:36 -07007557
7558 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7559 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7560 GRC_LCLCTRL_GPIO_OUTPUT3;
7561
Michael Chanaf36e6b2006-03-23 01:28:06 -08007562 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7563 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7564
Gary Zambranoaaf84462007-05-05 11:51:45 -07007565 tp->grc_local_ctrl &= ~gpio_mask;
Michael Chan314fba32005-04-21 17:07:04 -07007566 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7567
7568 /* GPIO1 must be driven high for eeprom write protect */
Michael Chan9d26e212006-12-07 00:21:14 -08007569 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7570 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7571 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan314fba32005-04-21 17:07:04 -07007572 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007573 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7574 udelay(100);
7575
Michael Chan09ee9292005-08-09 20:17:00 -07007576 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
David S. Millerfac9b832005-05-18 22:46:34 -07007577 tp->last_tag = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007578
7579 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7580 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7581 udelay(40);
7582 }
7583
7584 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7585 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7586 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7587 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7588 WDMAC_MODE_LNGREAD_ENAB);
7589
Michael Chan85e94ce2005-04-21 17:05:28 -07007590 /* If statement applies to 5705 and 5750 PCI devices only */
7591 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7592 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7593 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007594 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7595 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7596 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7597 /* nothing */
7598 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7599 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7600 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7601 val |= WDMAC_MODE_RX_ACCEL;
7602 }
7603 }
7604
Michael Chand9ab5ad2006-03-20 22:27:35 -08007605 /* Enable host coalescing bug fix */
Michael Chanaf36e6b2006-03-23 01:28:06 -08007606 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07007607 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07007608 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
Matt Carlson57e69832008-05-25 23:48:31 -07007609 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7610 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
Matt Carlsonf51f3562008-05-25 23:45:08 -07007611 val |= WDMAC_MODE_STATUS_TAG_FIX;
Michael Chand9ab5ad2006-03-20 22:27:35 -08007612
Linus Torvalds1da177e2005-04-16 15:20:36 -07007613 tw32_f(WDMAC_MODE, val);
7614 udelay(40);
7615
Matt Carlson9974a352007-10-07 23:27:28 -07007616 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7617 u16 pcix_cmd;
7618
7619 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7620 &pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007621 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
Matt Carlson9974a352007-10-07 23:27:28 -07007622 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7623 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007624 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
Matt Carlson9974a352007-10-07 23:27:28 -07007625 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7626 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007627 }
Matt Carlson9974a352007-10-07 23:27:28 -07007628 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7629 pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007630 }
7631
7632 tw32_f(RDMAC_MODE, rdmac_mode);
7633 udelay(40);
7634
7635 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7636 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7637 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
Matt Carlson9936bcf2007-10-10 18:03:07 -07007638
7639 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7640 tw32(SNDDATAC_MODE,
7641 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7642 else
7643 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7644
Linus Torvalds1da177e2005-04-16 15:20:36 -07007645 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7646 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7647 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7648 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007649 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7650 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007651 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7652 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7653
7654 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7655 err = tg3_load_5701_a0_firmware_fix(tp);
7656 if (err)
7657 return err;
7658 }
7659
Linus Torvalds1da177e2005-04-16 15:20:36 -07007660 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7661 err = tg3_load_tso_firmware(tp);
7662 if (err)
7663 return err;
7664 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007665
7666 tp->tx_mode = TX_MODE_ENABLE;
7667 tw32_f(MAC_TX_MODE, tp->tx_mode);
7668 udelay(100);
7669
7670 tp->rx_mode = RX_MODE_ENABLE;
Matt Carlson9936bcf2007-10-10 18:03:07 -07007671 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlson57e69832008-05-25 23:48:31 -07007672 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7673 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7674 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chanaf36e6b2006-03-23 01:28:06 -08007675 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7676
Linus Torvalds1da177e2005-04-16 15:20:36 -07007677 tw32_f(MAC_RX_MODE, tp->rx_mode);
7678 udelay(10);
7679
Linus Torvalds1da177e2005-04-16 15:20:36 -07007680 tw32(MAC_LED_CTRL, tp->led_ctrl);
7681
7682 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
Michael Chanc94e3942005-09-27 12:12:42 -07007683 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007684 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7685 udelay(10);
7686 }
7687 tw32_f(MAC_RX_MODE, tp->rx_mode);
7688 udelay(10);
7689
7690 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7691 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7692 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7693 /* Set drive transmission level to 1.2V */
7694 /* only if the signal pre-emphasis bit is not set */
7695 val = tr32(MAC_SERDES_CFG);
7696 val &= 0xfffff000;
7697 val |= 0x880;
7698 tw32(MAC_SERDES_CFG, val);
7699 }
7700 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7701 tw32(MAC_SERDES_CFG, 0x616000);
7702 }
7703
7704 /* Prevent chip from dropping frames when flow control
7705 * is enabled.
7706 */
7707 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7708
7709 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7710 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7711 /* Use hardware link auto-negotiation */
7712 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7713 }
7714
Michael Chand4d2c552006-03-20 17:47:20 -08007715 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7716 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7717 u32 tmp;
7718
7719 tmp = tr32(SERDES_RX_CTRL);
7720 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7721 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7722 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7723 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7724 }
7725
Matt Carlsondd477002008-05-25 23:45:58 -07007726 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7727 if (tp->link_config.phy_is_low_power) {
7728 tp->link_config.phy_is_low_power = 0;
7729 tp->link_config.speed = tp->link_config.orig_speed;
7730 tp->link_config.duplex = tp->link_config.orig_duplex;
7731 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7732 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007733
Matt Carlsondd477002008-05-25 23:45:58 -07007734 err = tg3_setup_phy(tp, 0);
7735 if (err)
7736 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007737
Matt Carlsondd477002008-05-25 23:45:58 -07007738 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7739 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7740 u32 tmp;
7741
7742 /* Clear CRC stats. */
7743 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7744 tg3_writephy(tp, MII_TG3_TEST1,
7745 tmp | MII_TG3_TEST1_CRC_EN);
7746 tg3_readphy(tp, 0x14, &tmp);
7747 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007748 }
7749 }
7750
7751 __tg3_set_rx_mode(tp->dev);
7752
7753 /* Initialize receive rules. */
7754 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7755 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7756 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7757 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7758
Michael Chan4cf78e42005-07-25 12:29:19 -07007759 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
Michael Chana4e2b342005-10-26 15:46:52 -07007760 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007761 limit = 8;
7762 else
7763 limit = 16;
7764 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7765 limit -= 4;
7766 switch (limit) {
7767 case 16:
7768 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7769 case 15:
7770 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7771 case 14:
7772 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7773 case 13:
7774 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7775 case 12:
7776 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7777 case 11:
7778 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7779 case 10:
7780 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7781 case 9:
7782 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7783 case 8:
7784 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7785 case 7:
7786 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7787 case 6:
7788 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7789 case 5:
7790 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7791 case 4:
7792 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7793 case 3:
7794 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7795 case 2:
7796 case 1:
7797
7798 default:
7799 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07007800 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007801
Matt Carlson9ce768e2007-10-11 19:49:11 -07007802 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7803 /* Write our heartbeat update interval to APE. */
7804 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7805 APE_HOST_HEARTBEAT_INT_DISABLE);
Matt Carlson0d3031d2007-10-10 18:02:43 -07007806
Linus Torvalds1da177e2005-04-16 15:20:36 -07007807 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7808
Linus Torvalds1da177e2005-04-16 15:20:36 -07007809 return 0;
7810}
7811
7812/* Called at device open time to get the chip ready for
7813 * packet processing. Invoked with tp->lock held.
7814 */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007815static int tg3_init_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007816{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007817 tg3_switch_clocks(tp);
7818
7819 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7820
Matt Carlson2f751b62008-08-04 23:17:34 -07007821 return tg3_reset_hw(tp, reset_phy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007822}
7823
7824#define TG3_STAT_ADD32(PSTAT, REG) \
7825do { u32 __val = tr32(REG); \
7826 (PSTAT)->low += __val; \
7827 if ((PSTAT)->low < __val) \
7828 (PSTAT)->high += 1; \
7829} while (0)
7830
7831static void tg3_periodic_fetch_stats(struct tg3 *tp)
7832{
7833 struct tg3_hw_stats *sp = tp->hw_stats;
7834
7835 if (!netif_carrier_ok(tp->dev))
7836 return;
7837
7838 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7839 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7840 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7841 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7842 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7843 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7844 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7845 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7846 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7847 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7848 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7849 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7850 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7851
7852 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7853 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7854 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7855 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7856 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7857 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7858 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7859 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7860 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7861 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7862 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7863 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7864 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7865 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
Michael Chan463d3052006-05-22 16:36:27 -07007866
7867 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7868 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7869 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007870}
7871
7872static void tg3_timer(unsigned long __opaque)
7873{
7874 struct tg3 *tp = (struct tg3 *) __opaque;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007875
Michael Chanf475f162006-03-27 23:20:14 -08007876 if (tp->irq_sync)
7877 goto restart_timer;
7878
David S. Millerf47c11e2005-06-24 20:18:35 -07007879 spin_lock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007880
David S. Millerfac9b832005-05-18 22:46:34 -07007881 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7882 /* All of this garbage is because when using non-tagged
7883 * IRQ status the mailbox/status_block protocol the chip
7884 * uses with the cpu is race prone.
7885 */
7886 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7887 tw32(GRC_LOCAL_CTRL,
7888 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7889 } else {
7890 tw32(HOSTCC_MODE, tp->coalesce_mode |
7891 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7892 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007893
David S. Millerfac9b832005-05-18 22:46:34 -07007894 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7895 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
David S. Millerf47c11e2005-06-24 20:18:35 -07007896 spin_unlock(&tp->lock);
David S. Millerfac9b832005-05-18 22:46:34 -07007897 schedule_work(&tp->reset_task);
7898 return;
7899 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007900 }
7901
Linus Torvalds1da177e2005-04-16 15:20:36 -07007902 /* This part only runs once per second. */
7903 if (!--tp->timer_counter) {
David S. Millerfac9b832005-05-18 22:46:34 -07007904 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7905 tg3_periodic_fetch_stats(tp);
7906
Linus Torvalds1da177e2005-04-16 15:20:36 -07007907 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7908 u32 mac_stat;
7909 int phy_event;
7910
7911 mac_stat = tr32(MAC_STATUS);
7912
7913 phy_event = 0;
7914 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7915 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7916 phy_event = 1;
7917 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7918 phy_event = 1;
7919
7920 if (phy_event)
7921 tg3_setup_phy(tp, 0);
7922 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7923 u32 mac_stat = tr32(MAC_STATUS);
7924 int need_setup = 0;
7925
7926 if (netif_carrier_ok(tp->dev) &&
7927 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7928 need_setup = 1;
7929 }
7930 if (! netif_carrier_ok(tp->dev) &&
7931 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7932 MAC_STATUS_SIGNAL_DET))) {
7933 need_setup = 1;
7934 }
7935 if (need_setup) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07007936 if (!tp->serdes_counter) {
7937 tw32_f(MAC_MODE,
7938 (tp->mac_mode &
7939 ~MAC_MODE_PORT_MODE_MASK));
7940 udelay(40);
7941 tw32_f(MAC_MODE, tp->mac_mode);
7942 udelay(40);
7943 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007944 tg3_setup_phy(tp, 0);
7945 }
Michael Chan747e8f82005-07-25 12:33:22 -07007946 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7947 tg3_serdes_parallel_detect(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007948
7949 tp->timer_counter = tp->timer_multiplier;
7950 }
7951
Michael Chan130b8e42006-09-27 16:00:40 -07007952 /* Heartbeat is only sent once every 2 seconds.
7953 *
7954 * The heartbeat is to tell the ASF firmware that the host
7955 * driver is still alive. In the event that the OS crashes,
7956 * ASF needs to reset the hardware to free up the FIFO space
7957 * that may be filled with rx packets destined for the host.
7958 * If the FIFO is full, ASF will no longer function properly.
7959 *
7960 * Unintended resets have been reported on real time kernels
7961 * where the timer doesn't run on time. Netpoll will also have
7962 * same problem.
7963 *
7964 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7965 * to check the ring condition when the heartbeat is expiring
7966 * before doing the reset. This will prevent most unintended
7967 * resets.
7968 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007969 if (!--tp->asf_counter) {
Matt Carlsonbc7959b2008-08-15 14:08:55 -07007970 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7971 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Matt Carlson7c5026a2008-05-02 16:49:29 -07007972 tg3_wait_for_event_ack(tp);
7973
Michael Chanbbadf502006-04-06 21:46:34 -07007974 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
Michael Chan130b8e42006-09-27 16:00:40 -07007975 FWCMD_NICDRV_ALIVE3);
Michael Chanbbadf502006-04-06 21:46:34 -07007976 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
Michael Chan28fbef72005-10-26 15:48:35 -07007977 /* 5 seconds timeout */
Michael Chanbbadf502006-04-06 21:46:34 -07007978 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
Matt Carlson4ba526c2008-08-15 14:10:04 -07007979
7980 tg3_generate_fw_event(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007981 }
7982 tp->asf_counter = tp->asf_multiplier;
7983 }
7984
David S. Millerf47c11e2005-06-24 20:18:35 -07007985 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007986
Michael Chanf475f162006-03-27 23:20:14 -08007987restart_timer:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007988 tp->timer.expires = jiffies + tp->timer_offset;
7989 add_timer(&tp->timer);
7990}
7991
Adrian Bunk81789ef2006-03-20 23:00:14 -08007992static int tg3_request_irq(struct tg3 *tp)
Michael Chanfcfa0a32006-03-20 22:28:41 -08007993{
David Howells7d12e782006-10-05 14:55:46 +01007994 irq_handler_t fn;
Michael Chanfcfa0a32006-03-20 22:28:41 -08007995 unsigned long flags;
7996 struct net_device *dev = tp->dev;
7997
7998 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7999 fn = tg3_msi;
8000 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8001 fn = tg3_msi_1shot;
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07008002 flags = IRQF_SAMPLE_RANDOM;
Michael Chanfcfa0a32006-03-20 22:28:41 -08008003 } else {
8004 fn = tg3_interrupt;
8005 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8006 fn = tg3_interrupt_tagged;
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07008007 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
Michael Chanfcfa0a32006-03-20 22:28:41 -08008008 }
8009 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
8010}
8011
Michael Chan79381092005-04-21 17:13:59 -07008012static int tg3_test_interrupt(struct tg3 *tp)
8013{
8014 struct net_device *dev = tp->dev;
Michael Chanb16250e2006-09-27 16:10:14 -07008015 int err, i, intr_ok = 0;
Michael Chan79381092005-04-21 17:13:59 -07008016
Michael Chand4bc3922005-05-29 14:59:20 -07008017 if (!netif_running(dev))
8018 return -ENODEV;
8019
Michael Chan79381092005-04-21 17:13:59 -07008020 tg3_disable_ints(tp);
8021
8022 free_irq(tp->pdev->irq, dev);
8023
8024 err = request_irq(tp->pdev->irq, tg3_test_isr,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07008025 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
Michael Chan79381092005-04-21 17:13:59 -07008026 if (err)
8027 return err;
8028
Michael Chan38f38432005-09-05 17:53:32 -07008029 tp->hw_status->status &= ~SD_STATUS_UPDATED;
Michael Chan79381092005-04-21 17:13:59 -07008030 tg3_enable_ints(tp);
8031
8032 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8033 HOSTCC_MODE_NOW);
8034
8035 for (i = 0; i < 5; i++) {
Michael Chanb16250e2006-09-27 16:10:14 -07008036 u32 int_mbox, misc_host_ctrl;
8037
Michael Chan09ee9292005-08-09 20:17:00 -07008038 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
8039 TG3_64BIT_REG_LOW);
Michael Chanb16250e2006-09-27 16:10:14 -07008040 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8041
8042 if ((int_mbox != 0) ||
8043 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8044 intr_ok = 1;
Michael Chan79381092005-04-21 17:13:59 -07008045 break;
Michael Chanb16250e2006-09-27 16:10:14 -07008046 }
8047
Michael Chan79381092005-04-21 17:13:59 -07008048 msleep(10);
8049 }
8050
8051 tg3_disable_ints(tp);
8052
8053 free_irq(tp->pdev->irq, dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008054
Michael Chanfcfa0a32006-03-20 22:28:41 -08008055 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07008056
8057 if (err)
8058 return err;
8059
Michael Chanb16250e2006-09-27 16:10:14 -07008060 if (intr_ok)
Michael Chan79381092005-04-21 17:13:59 -07008061 return 0;
8062
8063 return -EIO;
8064}
8065
8066/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8067 * successfully restored
8068 */
8069static int tg3_test_msi(struct tg3 *tp)
8070{
8071 struct net_device *dev = tp->dev;
8072 int err;
8073 u16 pci_cmd;
8074
8075 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8076 return 0;
8077
8078 /* Turn off SERR reporting in case MSI terminates with Master
8079 * Abort.
8080 */
8081 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8082 pci_write_config_word(tp->pdev, PCI_COMMAND,
8083 pci_cmd & ~PCI_COMMAND_SERR);
8084
8085 err = tg3_test_interrupt(tp);
8086
8087 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8088
8089 if (!err)
8090 return 0;
8091
8092 /* other failures */
8093 if (err != -EIO)
8094 return err;
8095
8096 /* MSI test failed, go back to INTx mode */
8097 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
8098 "switching to INTx mode. Please report this failure to "
8099 "the PCI maintainer and include system chipset information.\n",
8100 tp->dev->name);
8101
8102 free_irq(tp->pdev->irq, dev);
8103 pci_disable_msi(tp->pdev);
8104
8105 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8106
Michael Chanfcfa0a32006-03-20 22:28:41 -08008107 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07008108 if (err)
8109 return err;
8110
8111 /* Need to reset the chip because the MSI cycle may have terminated
8112 * with Master Abort.
8113 */
David S. Millerf47c11e2005-06-24 20:18:35 -07008114 tg3_full_lock(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07008115
Michael Chan944d9802005-05-29 14:57:48 -07008116 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07008117 err = tg3_init_hw(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07008118
David S. Millerf47c11e2005-06-24 20:18:35 -07008119 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07008120
8121 if (err)
8122 free_irq(tp->pdev->irq, dev);
8123
8124 return err;
8125}
8126
Linus Torvalds1da177e2005-04-16 15:20:36 -07008127static int tg3_open(struct net_device *dev)
8128{
8129 struct tg3 *tp = netdev_priv(dev);
8130 int err;
8131
Michael Chanc49a1562006-12-17 17:07:29 -08008132 netif_carrier_off(tp->dev);
8133
Michael Chanbc1c7562006-03-20 17:48:03 -08008134 err = tg3_set_power_state(tp, PCI_D0);
Matt Carlson2f751b62008-08-04 23:17:34 -07008135 if (err)
Michael Chanbc1c7562006-03-20 17:48:03 -08008136 return err;
Matt Carlson2f751b62008-08-04 23:17:34 -07008137
8138 tg3_full_lock(tp, 0);
Michael Chanbc1c7562006-03-20 17:48:03 -08008139
Linus Torvalds1da177e2005-04-16 15:20:36 -07008140 tg3_disable_ints(tp);
8141 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8142
David S. Millerf47c11e2005-06-24 20:18:35 -07008143 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008144
8145 /* The placement of this call is tied
8146 * to the setup and use of Host TX descriptors.
8147 */
8148 err = tg3_alloc_consistent(tp);
8149 if (err)
8150 return err;
8151
Michael Chan7544b092007-05-05 13:08:32 -07008152 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
David S. Millerfac9b832005-05-18 22:46:34 -07008153 /* All MSI supporting chips should support tagged
8154 * status. Assert that this is the case.
8155 */
8156 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8157 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8158 "Not using MSI.\n", tp->dev->name);
8159 } else if (pci_enable_msi(tp->pdev) == 0) {
Michael Chan88b06bc2005-04-21 17:13:25 -07008160 u32 msi_mode;
8161
8162 msi_mode = tr32(MSGINT_MODE);
8163 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8164 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8165 }
8166 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08008167 err = tg3_request_irq(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008168
8169 if (err) {
Michael Chan88b06bc2005-04-21 17:13:25 -07008170 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8171 pci_disable_msi(tp->pdev);
8172 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8173 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008174 tg3_free_consistent(tp);
8175 return err;
8176 }
8177
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008178 napi_enable(&tp->napi);
8179
David S. Millerf47c11e2005-06-24 20:18:35 -07008180 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008181
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07008182 err = tg3_init_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008183 if (err) {
Michael Chan944d9802005-05-29 14:57:48 -07008184 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008185 tg3_free_rings(tp);
8186 } else {
David S. Millerfac9b832005-05-18 22:46:34 -07008187 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8188 tp->timer_offset = HZ;
8189 else
8190 tp->timer_offset = HZ / 10;
8191
8192 BUG_ON(tp->timer_offset > HZ);
8193 tp->timer_counter = tp->timer_multiplier =
8194 (HZ / tp->timer_offset);
8195 tp->asf_counter = tp->asf_multiplier =
Michael Chan28fbef72005-10-26 15:48:35 -07008196 ((HZ / tp->timer_offset) * 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008197
8198 init_timer(&tp->timer);
8199 tp->timer.expires = jiffies + tp->timer_offset;
8200 tp->timer.data = (unsigned long) tp;
8201 tp->timer.function = tg3_timer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008202 }
8203
David S. Millerf47c11e2005-06-24 20:18:35 -07008204 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008205
8206 if (err) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008207 napi_disable(&tp->napi);
Michael Chan88b06bc2005-04-21 17:13:25 -07008208 free_irq(tp->pdev->irq, dev);
8209 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8210 pci_disable_msi(tp->pdev);
8211 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8212 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008213 tg3_free_consistent(tp);
8214 return err;
8215 }
8216
Michael Chan79381092005-04-21 17:13:59 -07008217 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8218 err = tg3_test_msi(tp);
David S. Millerfac9b832005-05-18 22:46:34 -07008219
Michael Chan79381092005-04-21 17:13:59 -07008220 if (err) {
David S. Millerf47c11e2005-06-24 20:18:35 -07008221 tg3_full_lock(tp, 0);
Michael Chan79381092005-04-21 17:13:59 -07008222
8223 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8224 pci_disable_msi(tp->pdev);
8225 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8226 }
Michael Chan944d9802005-05-29 14:57:48 -07008227 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan79381092005-04-21 17:13:59 -07008228 tg3_free_rings(tp);
8229 tg3_free_consistent(tp);
8230
David S. Millerf47c11e2005-06-24 20:18:35 -07008231 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07008232
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008233 napi_disable(&tp->napi);
8234
Michael Chan79381092005-04-21 17:13:59 -07008235 return err;
8236 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08008237
8238 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8239 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
Michael Chanb5d37722006-09-27 16:06:21 -07008240 u32 val = tr32(PCIE_TRANSACTION_CFG);
Michael Chanfcfa0a32006-03-20 22:28:41 -08008241
Michael Chanb5d37722006-09-27 16:06:21 -07008242 tw32(PCIE_TRANSACTION_CFG,
8243 val | PCIE_TRANS_CFG_1SHOT_MSI);
Michael Chanfcfa0a32006-03-20 22:28:41 -08008244 }
8245 }
Michael Chan79381092005-04-21 17:13:59 -07008246 }
8247
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07008248 tg3_phy_start(tp);
8249
David S. Millerf47c11e2005-06-24 20:18:35 -07008250 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008251
Michael Chan79381092005-04-21 17:13:59 -07008252 add_timer(&tp->timer);
8253 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008254 tg3_enable_ints(tp);
8255
David S. Millerf47c11e2005-06-24 20:18:35 -07008256 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008257
8258 netif_start_queue(dev);
8259
8260 return 0;
8261}
8262
8263#if 0
8264/*static*/ void tg3_dump_state(struct tg3 *tp)
8265{
8266 u32 val32, val32_2, val32_3, val32_4, val32_5;
8267 u16 val16;
8268 int i;
8269
8270 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8271 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8272 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8273 val16, val32);
8274
8275 /* MAC block */
8276 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8277 tr32(MAC_MODE), tr32(MAC_STATUS));
8278 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8279 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8280 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8281 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8282 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8283 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8284
8285 /* Send data initiator control block */
8286 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8287 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8288 printk(" SNDDATAI_STATSCTRL[%08x]\n",
8289 tr32(SNDDATAI_STATSCTRL));
8290
8291 /* Send data completion control block */
8292 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8293
8294 /* Send BD ring selector block */
8295 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8296 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8297
8298 /* Send BD initiator control block */
8299 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8300 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8301
8302 /* Send BD completion control block */
8303 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8304
8305 /* Receive list placement control block */
8306 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8307 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8308 printk(" RCVLPC_STATSCTRL[%08x]\n",
8309 tr32(RCVLPC_STATSCTRL));
8310
8311 /* Receive data and receive BD initiator control block */
8312 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8313 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8314
8315 /* Receive data completion control block */
8316 printk("DEBUG: RCVDCC_MODE[%08x]\n",
8317 tr32(RCVDCC_MODE));
8318
8319 /* Receive BD initiator control block */
8320 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8321 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8322
8323 /* Receive BD completion control block */
8324 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8325 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8326
8327 /* Receive list selector control block */
8328 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8329 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8330
8331 /* Mbuf cluster free block */
8332 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8333 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8334
8335 /* Host coalescing control block */
8336 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8337 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8338 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8339 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8340 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8341 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8342 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8343 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8344 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8345 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8346 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8347 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8348
8349 /* Memory arbiter control block */
8350 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8351 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8352
8353 /* Buffer manager control block */
8354 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8355 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8356 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8357 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8358 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8359 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8360 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8361 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8362
8363 /* Read DMA control block */
8364 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8365 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8366
8367 /* Write DMA control block */
8368 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8369 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8370
8371 /* DMA completion block */
8372 printk("DEBUG: DMAC_MODE[%08x]\n",
8373 tr32(DMAC_MODE));
8374
8375 /* GRC block */
8376 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8377 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8378 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8379 tr32(GRC_LOCAL_CTRL));
8380
8381 /* TG3_BDINFOs */
8382 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8383 tr32(RCVDBDI_JUMBO_BD + 0x0),
8384 tr32(RCVDBDI_JUMBO_BD + 0x4),
8385 tr32(RCVDBDI_JUMBO_BD + 0x8),
8386 tr32(RCVDBDI_JUMBO_BD + 0xc));
8387 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8388 tr32(RCVDBDI_STD_BD + 0x0),
8389 tr32(RCVDBDI_STD_BD + 0x4),
8390 tr32(RCVDBDI_STD_BD + 0x8),
8391 tr32(RCVDBDI_STD_BD + 0xc));
8392 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8393 tr32(RCVDBDI_MINI_BD + 0x0),
8394 tr32(RCVDBDI_MINI_BD + 0x4),
8395 tr32(RCVDBDI_MINI_BD + 0x8),
8396 tr32(RCVDBDI_MINI_BD + 0xc));
8397
8398 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8399 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8400 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8401 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8402 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8403 val32, val32_2, val32_3, val32_4);
8404
8405 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8406 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8407 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8408 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8409 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8410 val32, val32_2, val32_3, val32_4);
8411
8412 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8413 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8414 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8415 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8416 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8417 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8418 val32, val32_2, val32_3, val32_4, val32_5);
8419
8420 /* SW status block */
8421 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8422 tp->hw_status->status,
8423 tp->hw_status->status_tag,
8424 tp->hw_status->rx_jumbo_consumer,
8425 tp->hw_status->rx_consumer,
8426 tp->hw_status->rx_mini_consumer,
8427 tp->hw_status->idx[0].rx_producer,
8428 tp->hw_status->idx[0].tx_consumer);
8429
8430 /* SW statistics block */
8431 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8432 ((u32 *)tp->hw_stats)[0],
8433 ((u32 *)tp->hw_stats)[1],
8434 ((u32 *)tp->hw_stats)[2],
8435 ((u32 *)tp->hw_stats)[3]);
8436
8437 /* Mailboxes */
8438 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
Michael Chan09ee9292005-08-09 20:17:00 -07008439 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8440 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8441 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8442 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008443
8444 /* NIC side send descriptors. */
8445 for (i = 0; i < 6; i++) {
8446 unsigned long txd;
8447
8448 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8449 + (i * sizeof(struct tg3_tx_buffer_desc));
8450 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8451 i,
8452 readl(txd + 0x0), readl(txd + 0x4),
8453 readl(txd + 0x8), readl(txd + 0xc));
8454 }
8455
8456 /* NIC side RX descriptors. */
8457 for (i = 0; i < 6; i++) {
8458 unsigned long rxd;
8459
8460 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8461 + (i * sizeof(struct tg3_rx_buffer_desc));
8462 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8463 i,
8464 readl(rxd + 0x0), readl(rxd + 0x4),
8465 readl(rxd + 0x8), readl(rxd + 0xc));
8466 rxd += (4 * sizeof(u32));
8467 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8468 i,
8469 readl(rxd + 0x0), readl(rxd + 0x4),
8470 readl(rxd + 0x8), readl(rxd + 0xc));
8471 }
8472
8473 for (i = 0; i < 6; i++) {
8474 unsigned long rxd;
8475
8476 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8477 + (i * sizeof(struct tg3_rx_buffer_desc));
8478 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8479 i,
8480 readl(rxd + 0x0), readl(rxd + 0x4),
8481 readl(rxd + 0x8), readl(rxd + 0xc));
8482 rxd += (4 * sizeof(u32));
8483 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8484 i,
8485 readl(rxd + 0x0), readl(rxd + 0x4),
8486 readl(rxd + 0x8), readl(rxd + 0xc));
8487 }
8488}
8489#endif
8490
8491static struct net_device_stats *tg3_get_stats(struct net_device *);
8492static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8493
8494static int tg3_close(struct net_device *dev)
8495{
8496 struct tg3 *tp = netdev_priv(dev);
8497
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008498 napi_disable(&tp->napi);
Oleg Nesterov28e53bd2007-05-09 02:34:22 -07008499 cancel_work_sync(&tp->reset_task);
Michael Chan7faa0062006-02-02 17:29:28 -08008500
Linus Torvalds1da177e2005-04-16 15:20:36 -07008501 netif_stop_queue(dev);
8502
8503 del_timer_sync(&tp->timer);
8504
David S. Millerf47c11e2005-06-24 20:18:35 -07008505 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008506#if 0
8507 tg3_dump_state(tp);
8508#endif
8509
8510 tg3_disable_ints(tp);
8511
Michael Chan944d9802005-05-29 14:57:48 -07008512 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008513 tg3_free_rings(tp);
Michael Chan5cf64b82007-05-05 12:11:21 -07008514 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008515
David S. Millerf47c11e2005-06-24 20:18:35 -07008516 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008517
Michael Chan88b06bc2005-04-21 17:13:25 -07008518 free_irq(tp->pdev->irq, dev);
8519 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8520 pci_disable_msi(tp->pdev);
8521 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8522 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008523
8524 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8525 sizeof(tp->net_stats_prev));
8526 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8527 sizeof(tp->estats_prev));
8528
8529 tg3_free_consistent(tp);
8530
Michael Chanbc1c7562006-03-20 17:48:03 -08008531 tg3_set_power_state(tp, PCI_D3hot);
8532
8533 netif_carrier_off(tp->dev);
8534
Linus Torvalds1da177e2005-04-16 15:20:36 -07008535 return 0;
8536}
8537
8538static inline unsigned long get_stat64(tg3_stat64_t *val)
8539{
8540 unsigned long ret;
8541
8542#if (BITS_PER_LONG == 32)
8543 ret = val->low;
8544#else
8545 ret = ((u64)val->high << 32) | ((u64)val->low);
8546#endif
8547 return ret;
8548}
8549
Stefan Buehler816f8b82008-08-15 14:10:54 -07008550static inline u64 get_estat64(tg3_stat64_t *val)
8551{
8552 return ((u64)val->high << 32) | ((u64)val->low);
8553}
8554
Linus Torvalds1da177e2005-04-16 15:20:36 -07008555static unsigned long calc_crc_errors(struct tg3 *tp)
8556{
8557 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8558
8559 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8560 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8561 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008562 u32 val;
8563
David S. Millerf47c11e2005-06-24 20:18:35 -07008564 spin_lock_bh(&tp->lock);
Michael Chan569a5df2007-02-13 12:18:15 -08008565 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8566 tg3_writephy(tp, MII_TG3_TEST1,
8567 val | MII_TG3_TEST1_CRC_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008568 tg3_readphy(tp, 0x14, &val);
8569 } else
8570 val = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07008571 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008572
8573 tp->phy_crc_errors += val;
8574
8575 return tp->phy_crc_errors;
8576 }
8577
8578 return get_stat64(&hw_stats->rx_fcs_errors);
8579}
8580
8581#define ESTAT_ADD(member) \
8582 estats->member = old_estats->member + \
Stefan Buehler816f8b82008-08-15 14:10:54 -07008583 get_estat64(&hw_stats->member)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008584
8585static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8586{
8587 struct tg3_ethtool_stats *estats = &tp->estats;
8588 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8589 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8590
8591 if (!hw_stats)
8592 return old_estats;
8593
8594 ESTAT_ADD(rx_octets);
8595 ESTAT_ADD(rx_fragments);
8596 ESTAT_ADD(rx_ucast_packets);
8597 ESTAT_ADD(rx_mcast_packets);
8598 ESTAT_ADD(rx_bcast_packets);
8599 ESTAT_ADD(rx_fcs_errors);
8600 ESTAT_ADD(rx_align_errors);
8601 ESTAT_ADD(rx_xon_pause_rcvd);
8602 ESTAT_ADD(rx_xoff_pause_rcvd);
8603 ESTAT_ADD(rx_mac_ctrl_rcvd);
8604 ESTAT_ADD(rx_xoff_entered);
8605 ESTAT_ADD(rx_frame_too_long_errors);
8606 ESTAT_ADD(rx_jabbers);
8607 ESTAT_ADD(rx_undersize_packets);
8608 ESTAT_ADD(rx_in_length_errors);
8609 ESTAT_ADD(rx_out_length_errors);
8610 ESTAT_ADD(rx_64_or_less_octet_packets);
8611 ESTAT_ADD(rx_65_to_127_octet_packets);
8612 ESTAT_ADD(rx_128_to_255_octet_packets);
8613 ESTAT_ADD(rx_256_to_511_octet_packets);
8614 ESTAT_ADD(rx_512_to_1023_octet_packets);
8615 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8616 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8617 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8618 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8619 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8620
8621 ESTAT_ADD(tx_octets);
8622 ESTAT_ADD(tx_collisions);
8623 ESTAT_ADD(tx_xon_sent);
8624 ESTAT_ADD(tx_xoff_sent);
8625 ESTAT_ADD(tx_flow_control);
8626 ESTAT_ADD(tx_mac_errors);
8627 ESTAT_ADD(tx_single_collisions);
8628 ESTAT_ADD(tx_mult_collisions);
8629 ESTAT_ADD(tx_deferred);
8630 ESTAT_ADD(tx_excessive_collisions);
8631 ESTAT_ADD(tx_late_collisions);
8632 ESTAT_ADD(tx_collide_2times);
8633 ESTAT_ADD(tx_collide_3times);
8634 ESTAT_ADD(tx_collide_4times);
8635 ESTAT_ADD(tx_collide_5times);
8636 ESTAT_ADD(tx_collide_6times);
8637 ESTAT_ADD(tx_collide_7times);
8638 ESTAT_ADD(tx_collide_8times);
8639 ESTAT_ADD(tx_collide_9times);
8640 ESTAT_ADD(tx_collide_10times);
8641 ESTAT_ADD(tx_collide_11times);
8642 ESTAT_ADD(tx_collide_12times);
8643 ESTAT_ADD(tx_collide_13times);
8644 ESTAT_ADD(tx_collide_14times);
8645 ESTAT_ADD(tx_collide_15times);
8646 ESTAT_ADD(tx_ucast_packets);
8647 ESTAT_ADD(tx_mcast_packets);
8648 ESTAT_ADD(tx_bcast_packets);
8649 ESTAT_ADD(tx_carrier_sense_errors);
8650 ESTAT_ADD(tx_discards);
8651 ESTAT_ADD(tx_errors);
8652
8653 ESTAT_ADD(dma_writeq_full);
8654 ESTAT_ADD(dma_write_prioq_full);
8655 ESTAT_ADD(rxbds_empty);
8656 ESTAT_ADD(rx_discards);
8657 ESTAT_ADD(rx_errors);
8658 ESTAT_ADD(rx_threshold_hit);
8659
8660 ESTAT_ADD(dma_readq_full);
8661 ESTAT_ADD(dma_read_prioq_full);
8662 ESTAT_ADD(tx_comp_queue_full);
8663
8664 ESTAT_ADD(ring_set_send_prod_index);
8665 ESTAT_ADD(ring_status_update);
8666 ESTAT_ADD(nic_irqs);
8667 ESTAT_ADD(nic_avoided_irqs);
8668 ESTAT_ADD(nic_tx_threshold_hit);
8669
8670 return estats;
8671}
8672
8673static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8674{
8675 struct tg3 *tp = netdev_priv(dev);
8676 struct net_device_stats *stats = &tp->net_stats;
8677 struct net_device_stats *old_stats = &tp->net_stats_prev;
8678 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8679
8680 if (!hw_stats)
8681 return old_stats;
8682
8683 stats->rx_packets = old_stats->rx_packets +
8684 get_stat64(&hw_stats->rx_ucast_packets) +
8685 get_stat64(&hw_stats->rx_mcast_packets) +
8686 get_stat64(&hw_stats->rx_bcast_packets);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008687
Linus Torvalds1da177e2005-04-16 15:20:36 -07008688 stats->tx_packets = old_stats->tx_packets +
8689 get_stat64(&hw_stats->tx_ucast_packets) +
8690 get_stat64(&hw_stats->tx_mcast_packets) +
8691 get_stat64(&hw_stats->tx_bcast_packets);
8692
8693 stats->rx_bytes = old_stats->rx_bytes +
8694 get_stat64(&hw_stats->rx_octets);
8695 stats->tx_bytes = old_stats->tx_bytes +
8696 get_stat64(&hw_stats->tx_octets);
8697
8698 stats->rx_errors = old_stats->rx_errors +
John W. Linville4f63b872005-09-12 14:43:18 -07008699 get_stat64(&hw_stats->rx_errors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008700 stats->tx_errors = old_stats->tx_errors +
8701 get_stat64(&hw_stats->tx_errors) +
8702 get_stat64(&hw_stats->tx_mac_errors) +
8703 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8704 get_stat64(&hw_stats->tx_discards);
8705
8706 stats->multicast = old_stats->multicast +
8707 get_stat64(&hw_stats->rx_mcast_packets);
8708 stats->collisions = old_stats->collisions +
8709 get_stat64(&hw_stats->tx_collisions);
8710
8711 stats->rx_length_errors = old_stats->rx_length_errors +
8712 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8713 get_stat64(&hw_stats->rx_undersize_packets);
8714
8715 stats->rx_over_errors = old_stats->rx_over_errors +
8716 get_stat64(&hw_stats->rxbds_empty);
8717 stats->rx_frame_errors = old_stats->rx_frame_errors +
8718 get_stat64(&hw_stats->rx_align_errors);
8719 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8720 get_stat64(&hw_stats->tx_discards);
8721 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8722 get_stat64(&hw_stats->tx_carrier_sense_errors);
8723
8724 stats->rx_crc_errors = old_stats->rx_crc_errors +
8725 calc_crc_errors(tp);
8726
John W. Linville4f63b872005-09-12 14:43:18 -07008727 stats->rx_missed_errors = old_stats->rx_missed_errors +
8728 get_stat64(&hw_stats->rx_discards);
8729
Linus Torvalds1da177e2005-04-16 15:20:36 -07008730 return stats;
8731}
8732
8733static inline u32 calc_crc(unsigned char *buf, int len)
8734{
8735 u32 reg;
8736 u32 tmp;
8737 int j, k;
8738
8739 reg = 0xffffffff;
8740
8741 for (j = 0; j < len; j++) {
8742 reg ^= buf[j];
8743
8744 for (k = 0; k < 8; k++) {
8745 tmp = reg & 0x01;
8746
8747 reg >>= 1;
8748
8749 if (tmp) {
8750 reg ^= 0xedb88320;
8751 }
8752 }
8753 }
8754
8755 return ~reg;
8756}
8757
8758static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8759{
8760 /* accept or reject all multicast frames */
8761 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8762 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8763 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8764 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8765}
8766
8767static void __tg3_set_rx_mode(struct net_device *dev)
8768{
8769 struct tg3 *tp = netdev_priv(dev);
8770 u32 rx_mode;
8771
8772 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8773 RX_MODE_KEEP_VLAN_TAG);
8774
8775 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8776 * flag clear.
8777 */
8778#if TG3_VLAN_TAG_USED
8779 if (!tp->vlgrp &&
8780 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8781 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8782#else
8783 /* By definition, VLAN is disabled always in this
8784 * case.
8785 */
8786 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8787 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8788#endif
8789
8790 if (dev->flags & IFF_PROMISC) {
8791 /* Promiscuous mode. */
8792 rx_mode |= RX_MODE_PROMISC;
8793 } else if (dev->flags & IFF_ALLMULTI) {
8794 /* Accept all multicast. */
8795 tg3_set_multi (tp, 1);
8796 } else if (dev->mc_count < 1) {
8797 /* Reject all multicast. */
8798 tg3_set_multi (tp, 0);
8799 } else {
8800 /* Accept one or more multicast(s). */
8801 struct dev_mc_list *mclist;
8802 unsigned int i;
8803 u32 mc_filter[4] = { 0, };
8804 u32 regidx;
8805 u32 bit;
8806 u32 crc;
8807
8808 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8809 i++, mclist = mclist->next) {
8810
8811 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8812 bit = ~crc & 0x7f;
8813 regidx = (bit & 0x60) >> 5;
8814 bit &= 0x1f;
8815 mc_filter[regidx] |= (1 << bit);
8816 }
8817
8818 tw32(MAC_HASH_REG_0, mc_filter[0]);
8819 tw32(MAC_HASH_REG_1, mc_filter[1]);
8820 tw32(MAC_HASH_REG_2, mc_filter[2]);
8821 tw32(MAC_HASH_REG_3, mc_filter[3]);
8822 }
8823
8824 if (rx_mode != tp->rx_mode) {
8825 tp->rx_mode = rx_mode;
8826 tw32_f(MAC_RX_MODE, rx_mode);
8827 udelay(10);
8828 }
8829}
8830
8831static void tg3_set_rx_mode(struct net_device *dev)
8832{
8833 struct tg3 *tp = netdev_priv(dev);
8834
Michael Chane75f7c92006-03-20 21:33:26 -08008835 if (!netif_running(dev))
8836 return;
8837
David S. Millerf47c11e2005-06-24 20:18:35 -07008838 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008839 __tg3_set_rx_mode(dev);
David S. Millerf47c11e2005-06-24 20:18:35 -07008840 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008841}
8842
8843#define TG3_REGDUMP_LEN (32 * 1024)
8844
8845static int tg3_get_regs_len(struct net_device *dev)
8846{
8847 return TG3_REGDUMP_LEN;
8848}
8849
8850static void tg3_get_regs(struct net_device *dev,
8851 struct ethtool_regs *regs, void *_p)
8852{
8853 u32 *p = _p;
8854 struct tg3 *tp = netdev_priv(dev);
8855 u8 *orig_p = _p;
8856 int i;
8857
8858 regs->version = 0;
8859
8860 memset(p, 0, TG3_REGDUMP_LEN);
8861
Michael Chanbc1c7562006-03-20 17:48:03 -08008862 if (tp->link_config.phy_is_low_power)
8863 return;
8864
David S. Millerf47c11e2005-06-24 20:18:35 -07008865 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008866
8867#define __GET_REG32(reg) (*(p)++ = tr32(reg))
8868#define GET_REG32_LOOP(base,len) \
8869do { p = (u32 *)(orig_p + (base)); \
8870 for (i = 0; i < len; i += 4) \
8871 __GET_REG32((base) + i); \
8872} while (0)
8873#define GET_REG32_1(reg) \
8874do { p = (u32 *)(orig_p + (reg)); \
8875 __GET_REG32((reg)); \
8876} while (0)
8877
8878 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8879 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8880 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8881 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8882 GET_REG32_1(SNDDATAC_MODE);
8883 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8884 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8885 GET_REG32_1(SNDBDC_MODE);
8886 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8887 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8888 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8889 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8890 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8891 GET_REG32_1(RCVDCC_MODE);
8892 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8893 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8894 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8895 GET_REG32_1(MBFREE_MODE);
8896 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8897 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8898 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8899 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8900 GET_REG32_LOOP(WDMAC_MODE, 0x08);
Chris Elmquist091465d2005-12-20 13:25:19 -08008901 GET_REG32_1(RX_CPU_MODE);
8902 GET_REG32_1(RX_CPU_STATE);
8903 GET_REG32_1(RX_CPU_PGMCTR);
8904 GET_REG32_1(RX_CPU_HWBKPT);
8905 GET_REG32_1(TX_CPU_MODE);
8906 GET_REG32_1(TX_CPU_STATE);
8907 GET_REG32_1(TX_CPU_PGMCTR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008908 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8909 GET_REG32_LOOP(FTQ_RESET, 0x120);
8910 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8911 GET_REG32_1(DMAC_MODE);
8912 GET_REG32_LOOP(GRC_MODE, 0x4c);
8913 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8914 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8915
8916#undef __GET_REG32
8917#undef GET_REG32_LOOP
8918#undef GET_REG32_1
8919
David S. Millerf47c11e2005-06-24 20:18:35 -07008920 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008921}
8922
8923static int tg3_get_eeprom_len(struct net_device *dev)
8924{
8925 struct tg3 *tp = netdev_priv(dev);
8926
8927 return tp->nvram_size;
8928}
8929
8930static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
Al Virob9fc7dc2007-12-17 22:59:57 -08008931static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
Michael Chan18201802006-03-20 22:29:15 -08008932static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008933
8934static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8935{
8936 struct tg3 *tp = netdev_priv(dev);
8937 int ret;
8938 u8 *pd;
Al Virob9fc7dc2007-12-17 22:59:57 -08008939 u32 i, offset, len, b_offset, b_count;
8940 __le32 val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008941
Michael Chanbc1c7562006-03-20 17:48:03 -08008942 if (tp->link_config.phy_is_low_power)
8943 return -EAGAIN;
8944
Linus Torvalds1da177e2005-04-16 15:20:36 -07008945 offset = eeprom->offset;
8946 len = eeprom->len;
8947 eeprom->len = 0;
8948
8949 eeprom->magic = TG3_EEPROM_MAGIC;
8950
8951 if (offset & 3) {
8952 /* adjustments to start on required 4 byte boundary */
8953 b_offset = offset & 3;
8954 b_count = 4 - b_offset;
8955 if (b_count > len) {
8956 /* i.e. offset=1 len=2 */
8957 b_count = len;
8958 }
Al Virob9fc7dc2007-12-17 22:59:57 -08008959 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008960 if (ret)
8961 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008962 memcpy(data, ((char*)&val) + b_offset, b_count);
8963 len -= b_count;
8964 offset += b_count;
8965 eeprom->len += b_count;
8966 }
8967
8968 /* read bytes upto the last 4 byte boundary */
8969 pd = &data[eeprom->len];
8970 for (i = 0; i < (len - (len & 3)); i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -08008971 ret = tg3_nvram_read_le(tp, offset + i, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008972 if (ret) {
8973 eeprom->len += i;
8974 return ret;
8975 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008976 memcpy(pd + i, &val, 4);
8977 }
8978 eeprom->len += i;
8979
8980 if (len & 3) {
8981 /* read last bytes not ending on 4 byte boundary */
8982 pd = &data[eeprom->len];
8983 b_count = len & 3;
8984 b_offset = offset + len - b_count;
Al Virob9fc7dc2007-12-17 22:59:57 -08008985 ret = tg3_nvram_read_le(tp, b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008986 if (ret)
8987 return ret;
Al Virob9fc7dc2007-12-17 22:59:57 -08008988 memcpy(pd, &val, b_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008989 eeprom->len += b_count;
8990 }
8991 return 0;
8992}
8993
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008994static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008995
8996static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8997{
8998 struct tg3 *tp = netdev_priv(dev);
8999 int ret;
Al Virob9fc7dc2007-12-17 22:59:57 -08009000 u32 offset, len, b_offset, odd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009001 u8 *buf;
Al Virob9fc7dc2007-12-17 22:59:57 -08009002 __le32 start, end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009003
Michael Chanbc1c7562006-03-20 17:48:03 -08009004 if (tp->link_config.phy_is_low_power)
9005 return -EAGAIN;
9006
Linus Torvalds1da177e2005-04-16 15:20:36 -07009007 if (eeprom->magic != TG3_EEPROM_MAGIC)
9008 return -EINVAL;
9009
9010 offset = eeprom->offset;
9011 len = eeprom->len;
9012
9013 if ((b_offset = (offset & 3))) {
9014 /* adjustments to start on required 4 byte boundary */
Al Virob9fc7dc2007-12-17 22:59:57 -08009015 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009016 if (ret)
9017 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009018 len += b_offset;
9019 offset &= ~3;
Michael Chan1c8594b2005-04-21 17:12:46 -07009020 if (len < 4)
9021 len = 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009022 }
9023
9024 odd_len = 0;
Michael Chan1c8594b2005-04-21 17:12:46 -07009025 if (len & 3) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009026 /* adjustments to end on required 4 byte boundary */
9027 odd_len = 1;
9028 len = (len + 3) & ~3;
Al Virob9fc7dc2007-12-17 22:59:57 -08009029 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009030 if (ret)
9031 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009032 }
9033
9034 buf = data;
9035 if (b_offset || odd_len) {
9036 buf = kmalloc(len, GFP_KERNEL);
Andy Gospodarekab0049b2007-09-06 20:42:14 +01009037 if (!buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009038 return -ENOMEM;
9039 if (b_offset)
9040 memcpy(buf, &start, 4);
9041 if (odd_len)
9042 memcpy(buf+len-4, &end, 4);
9043 memcpy(buf + b_offset, data, eeprom->len);
9044 }
9045
9046 ret = tg3_nvram_write_block(tp, offset, len, buf);
9047
9048 if (buf != data)
9049 kfree(buf);
9050
9051 return ret;
9052}
9053
9054static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9055{
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009056 struct tg3 *tp = netdev_priv(dev);
9057
9058 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9059 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9060 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009061 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009062 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009063
Linus Torvalds1da177e2005-04-16 15:20:36 -07009064 cmd->supported = (SUPPORTED_Autoneg);
9065
9066 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9067 cmd->supported |= (SUPPORTED_1000baseT_Half |
9068 SUPPORTED_1000baseT_Full);
9069
Karsten Keilef348142006-05-12 12:49:08 -07009070 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009071 cmd->supported |= (SUPPORTED_100baseT_Half |
9072 SUPPORTED_100baseT_Full |
9073 SUPPORTED_10baseT_Half |
9074 SUPPORTED_10baseT_Full |
Matt Carlson3bebab52007-11-12 21:22:40 -08009075 SUPPORTED_TP);
Karsten Keilef348142006-05-12 12:49:08 -07009076 cmd->port = PORT_TP;
9077 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009078 cmd->supported |= SUPPORTED_FIBRE;
Karsten Keilef348142006-05-12 12:49:08 -07009079 cmd->port = PORT_FIBRE;
9080 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009081
Linus Torvalds1da177e2005-04-16 15:20:36 -07009082 cmd->advertising = tp->link_config.advertising;
9083 if (netif_running(dev)) {
9084 cmd->speed = tp->link_config.active_speed;
9085 cmd->duplex = tp->link_config.active_duplex;
9086 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009087 cmd->phy_address = PHY_ADDR;
9088 cmd->transceiver = 0;
9089 cmd->autoneg = tp->link_config.autoneg;
9090 cmd->maxtxpkt = 0;
9091 cmd->maxrxpkt = 0;
9092 return 0;
9093}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009094
Linus Torvalds1da177e2005-04-16 15:20:36 -07009095static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9096{
9097 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009098
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009099 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9100 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9101 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009102 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009103 }
9104
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009105 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009106 /* These are the only valid advertisement bits allowed. */
9107 if (cmd->autoneg == AUTONEG_ENABLE &&
9108 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
9109 ADVERTISED_1000baseT_Full |
9110 ADVERTISED_Autoneg |
9111 ADVERTISED_FIBRE)))
9112 return -EINVAL;
Michael Chan37ff2382005-10-26 15:49:51 -07009113 /* Fiber can only do SPEED_1000. */
9114 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9115 (cmd->speed != SPEED_1000))
9116 return -EINVAL;
9117 /* Copper cannot force SPEED_1000. */
9118 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9119 (cmd->speed == SPEED_1000))
9120 return -EINVAL;
9121 else if ((cmd->speed == SPEED_1000) &&
Matt Carlson0ba11fb2008-06-09 15:40:26 -07009122 (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
Michael Chan37ff2382005-10-26 15:49:51 -07009123 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009124
David S. Millerf47c11e2005-06-24 20:18:35 -07009125 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009126
9127 tp->link_config.autoneg = cmd->autoneg;
9128 if (cmd->autoneg == AUTONEG_ENABLE) {
Andy Gospodarek405d8e52007-10-08 01:08:47 -07009129 tp->link_config.advertising = (cmd->advertising |
9130 ADVERTISED_Autoneg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009131 tp->link_config.speed = SPEED_INVALID;
9132 tp->link_config.duplex = DUPLEX_INVALID;
9133 } else {
9134 tp->link_config.advertising = 0;
9135 tp->link_config.speed = cmd->speed;
9136 tp->link_config.duplex = cmd->duplex;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009137 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009138
Michael Chan24fcad62006-12-17 17:06:46 -08009139 tp->link_config.orig_speed = tp->link_config.speed;
9140 tp->link_config.orig_duplex = tp->link_config.duplex;
9141 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9142
Linus Torvalds1da177e2005-04-16 15:20:36 -07009143 if (netif_running(dev))
9144 tg3_setup_phy(tp, 1);
9145
David S. Millerf47c11e2005-06-24 20:18:35 -07009146 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009147
Linus Torvalds1da177e2005-04-16 15:20:36 -07009148 return 0;
9149}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009150
Linus Torvalds1da177e2005-04-16 15:20:36 -07009151static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9152{
9153 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009154
Linus Torvalds1da177e2005-04-16 15:20:36 -07009155 strcpy(info->driver, DRV_MODULE_NAME);
9156 strcpy(info->version, DRV_MODULE_VERSION);
Michael Chanc4e65752006-03-20 22:29:32 -08009157 strcpy(info->fw_version, tp->fw_ver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009158 strcpy(info->bus_info, pci_name(tp->pdev));
9159}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009160
Linus Torvalds1da177e2005-04-16 15:20:36 -07009161static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9162{
9163 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009164
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009165 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9166 device_can_wakeup(&tp->pdev->dev))
Gary Zambranoa85feb82007-05-05 11:52:19 -07009167 wol->supported = WAKE_MAGIC;
9168 else
9169 wol->supported = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009170 wol->wolopts = 0;
Matt Carlson05ac4cb2008-11-03 16:53:46 -08009171 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9172 device_can_wakeup(&tp->pdev->dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009173 wol->wolopts = WAKE_MAGIC;
9174 memset(&wol->sopass, 0, sizeof(wol->sopass));
9175}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009176
Linus Torvalds1da177e2005-04-16 15:20:36 -07009177static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9178{
9179 struct tg3 *tp = netdev_priv(dev);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009180 struct device *dp = &tp->pdev->dev;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009181
Linus Torvalds1da177e2005-04-16 15:20:36 -07009182 if (wol->wolopts & ~WAKE_MAGIC)
9183 return -EINVAL;
9184 if ((wol->wolopts & WAKE_MAGIC) &&
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009185 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009186 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009187
David S. Millerf47c11e2005-06-24 20:18:35 -07009188 spin_lock_bh(&tp->lock);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009189 if (wol->wolopts & WAKE_MAGIC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009190 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009191 device_set_wakeup_enable(dp, true);
9192 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009193 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009194 device_set_wakeup_enable(dp, false);
9195 }
David S. Millerf47c11e2005-06-24 20:18:35 -07009196 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009197
Linus Torvalds1da177e2005-04-16 15:20:36 -07009198 return 0;
9199}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009200
Linus Torvalds1da177e2005-04-16 15:20:36 -07009201static u32 tg3_get_msglevel(struct net_device *dev)
9202{
9203 struct tg3 *tp = netdev_priv(dev);
9204 return tp->msg_enable;
9205}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009206
Linus Torvalds1da177e2005-04-16 15:20:36 -07009207static void tg3_set_msglevel(struct net_device *dev, u32 value)
9208{
9209 struct tg3 *tp = netdev_priv(dev);
9210 tp->msg_enable = value;
9211}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009212
Linus Torvalds1da177e2005-04-16 15:20:36 -07009213static int tg3_set_tso(struct net_device *dev, u32 value)
9214{
9215 struct tg3 *tp = netdev_priv(dev);
9216
9217 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9218 if (value)
9219 return -EINVAL;
9220 return 0;
9221 }
Michael Chanb5d37722006-09-27 16:06:21 -07009222 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
9223 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
Matt Carlson9936bcf2007-10-10 18:03:07 -07009224 if (value) {
Michael Chanb0026622006-07-03 19:42:14 -07009225 dev->features |= NETIF_F_TSO6;
Matt Carlson57e69832008-05-25 23:48:31 -07009226 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9227 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9228 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9229 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlson9936bcf2007-10-10 18:03:07 -07009230 dev->features |= NETIF_F_TSO_ECN;
9231 } else
9232 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
Michael Chanb0026622006-07-03 19:42:14 -07009233 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009234 return ethtool_op_set_tso(dev, value);
9235}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009236
Linus Torvalds1da177e2005-04-16 15:20:36 -07009237static int tg3_nway_reset(struct net_device *dev)
9238{
9239 struct tg3 *tp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009240 int r;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009241
Linus Torvalds1da177e2005-04-16 15:20:36 -07009242 if (!netif_running(dev))
9243 return -EAGAIN;
9244
Michael Chanc94e3942005-09-27 12:12:42 -07009245 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9246 return -EINVAL;
9247
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009248 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9249 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9250 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009251 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009252 } else {
9253 u32 bmcr;
9254
9255 spin_lock_bh(&tp->lock);
9256 r = -EINVAL;
9257 tg3_readphy(tp, MII_BMCR, &bmcr);
9258 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9259 ((bmcr & BMCR_ANENABLE) ||
9260 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9261 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9262 BMCR_ANENABLE);
9263 r = 0;
9264 }
9265 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009266 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009267
Linus Torvalds1da177e2005-04-16 15:20:36 -07009268 return r;
9269}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009270
Linus Torvalds1da177e2005-04-16 15:20:36 -07009271static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9272{
9273 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009274
Linus Torvalds1da177e2005-04-16 15:20:36 -07009275 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9276 ering->rx_mini_max_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08009277 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9278 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9279 else
9280 ering->rx_jumbo_max_pending = 0;
9281
9282 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009283
9284 ering->rx_pending = tp->rx_pending;
9285 ering->rx_mini_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08009286 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9287 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9288 else
9289 ering->rx_jumbo_pending = 0;
9290
Linus Torvalds1da177e2005-04-16 15:20:36 -07009291 ering->tx_pending = tp->tx_pending;
9292}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009293
Linus Torvalds1da177e2005-04-16 15:20:36 -07009294static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9295{
9296 struct tg3 *tp = netdev_priv(dev);
Michael Chanb9ec6c12006-07-25 16:37:27 -07009297 int irq_sync = 0, err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009298
Linus Torvalds1da177e2005-04-16 15:20:36 -07009299 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9300 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
Michael Chanbc3a9252006-10-18 20:55:18 -07009301 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9302 (ering->tx_pending <= MAX_SKB_FRAGS) ||
Michael Chan7f62ad52007-02-20 23:25:40 -08009303 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
Michael Chanbc3a9252006-10-18 20:55:18 -07009304 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009305 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009306
Michael Chanbbe832c2005-06-24 20:20:04 -07009307 if (netif_running(dev)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009308 tg3_phy_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009309 tg3_netif_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07009310 irq_sync = 1;
9311 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009312
Michael Chanbbe832c2005-06-24 20:20:04 -07009313 tg3_full_lock(tp, irq_sync);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009314
Linus Torvalds1da177e2005-04-16 15:20:36 -07009315 tp->rx_pending = ering->rx_pending;
9316
9317 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9318 tp->rx_pending > 63)
9319 tp->rx_pending = 63;
9320 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9321 tp->tx_pending = ering->tx_pending;
9322
9323 if (netif_running(dev)) {
Michael Chan944d9802005-05-29 14:57:48 -07009324 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chanb9ec6c12006-07-25 16:37:27 -07009325 err = tg3_restart_hw(tp, 1);
9326 if (!err)
9327 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009328 }
9329
David S. Millerf47c11e2005-06-24 20:18:35 -07009330 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009331
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009332 if (irq_sync && !err)
9333 tg3_phy_start(tp);
9334
Michael Chanb9ec6c12006-07-25 16:37:27 -07009335 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009336}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009337
Linus Torvalds1da177e2005-04-16 15:20:36 -07009338static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9339{
9340 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009341
Linus Torvalds1da177e2005-04-16 15:20:36 -07009342 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
Matt Carlson8d018622007-12-20 20:05:44 -08009343
9344 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
9345 epause->rx_pause = 1;
9346 else
9347 epause->rx_pause = 0;
9348
9349 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
9350 epause->tx_pause = 1;
9351 else
9352 epause->tx_pause = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009353}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009354
Linus Torvalds1da177e2005-04-16 15:20:36 -07009355static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9356{
9357 struct tg3 *tp = netdev_priv(dev);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009358 int err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009359
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009360 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9361 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9362 return -EAGAIN;
9363
9364 if (epause->autoneg) {
9365 u32 newadv;
9366 struct phy_device *phydev;
9367
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009368 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009369
9370 if (epause->rx_pause) {
9371 if (epause->tx_pause)
9372 newadv = ADVERTISED_Pause;
9373 else
9374 newadv = ADVERTISED_Pause |
9375 ADVERTISED_Asym_Pause;
9376 } else if (epause->tx_pause) {
9377 newadv = ADVERTISED_Asym_Pause;
9378 } else
9379 newadv = 0;
9380
9381 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9382 u32 oldadv = phydev->advertising &
9383 (ADVERTISED_Pause |
9384 ADVERTISED_Asym_Pause);
9385 if (oldadv != newadv) {
9386 phydev->advertising &=
9387 ~(ADVERTISED_Pause |
9388 ADVERTISED_Asym_Pause);
9389 phydev->advertising |= newadv;
9390 err = phy_start_aneg(phydev);
9391 }
9392 } else {
9393 tp->link_config.advertising &=
9394 ~(ADVERTISED_Pause |
9395 ADVERTISED_Asym_Pause);
9396 tp->link_config.advertising |= newadv;
9397 }
9398 } else {
9399 if (epause->rx_pause)
9400 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9401 else
9402 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9403
9404 if (epause->tx_pause)
9405 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9406 else
9407 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9408
9409 if (netif_running(dev))
9410 tg3_setup_flow_control(tp, 0, 0);
9411 }
9412 } else {
9413 int irq_sync = 0;
9414
9415 if (netif_running(dev)) {
9416 tg3_netif_stop(tp);
9417 irq_sync = 1;
9418 }
9419
9420 tg3_full_lock(tp, irq_sync);
9421
9422 if (epause->autoneg)
9423 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9424 else
9425 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9426 if (epause->rx_pause)
9427 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9428 else
9429 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9430 if (epause->tx_pause)
9431 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9432 else
9433 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9434
9435 if (netif_running(dev)) {
9436 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9437 err = tg3_restart_hw(tp, 1);
9438 if (!err)
9439 tg3_netif_start(tp);
9440 }
9441
9442 tg3_full_unlock(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07009443 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009444
Michael Chanb9ec6c12006-07-25 16:37:27 -07009445 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009446}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009447
Linus Torvalds1da177e2005-04-16 15:20:36 -07009448static u32 tg3_get_rx_csum(struct net_device *dev)
9449{
9450 struct tg3 *tp = netdev_priv(dev);
9451 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9452}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009453
Linus Torvalds1da177e2005-04-16 15:20:36 -07009454static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9455{
9456 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009457
Linus Torvalds1da177e2005-04-16 15:20:36 -07009458 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9459 if (data != 0)
9460 return -EINVAL;
9461 return 0;
9462 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009463
David S. Millerf47c11e2005-06-24 20:18:35 -07009464 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009465 if (data)
9466 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9467 else
9468 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
David S. Millerf47c11e2005-06-24 20:18:35 -07009469 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009470
Linus Torvalds1da177e2005-04-16 15:20:36 -07009471 return 0;
9472}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009473
Linus Torvalds1da177e2005-04-16 15:20:36 -07009474static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9475{
9476 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009477
Linus Torvalds1da177e2005-04-16 15:20:36 -07009478 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9479 if (data != 0)
9480 return -EINVAL;
9481 return 0;
9482 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009483
Michael Chanaf36e6b2006-03-23 01:28:06 -08009484 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07009485 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07009486 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -07009487 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9488 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan6460d942007-07-14 19:07:52 -07009489 ethtool_op_set_tx_ipv6_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009490 else
Michael Chan9c27dbd2006-03-20 22:28:27 -08009491 ethtool_op_set_tx_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009492
9493 return 0;
9494}
9495
Jeff Garzikb9f2c042007-10-03 18:07:32 -07009496static int tg3_get_sset_count (struct net_device *dev, int sset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009497{
Jeff Garzikb9f2c042007-10-03 18:07:32 -07009498 switch (sset) {
9499 case ETH_SS_TEST:
9500 return TG3_NUM_TEST;
9501 case ETH_SS_STATS:
9502 return TG3_NUM_STATS;
9503 default:
9504 return -EOPNOTSUPP;
9505 }
Michael Chan4cafd3f2005-05-29 14:56:34 -07009506}
9507
Linus Torvalds1da177e2005-04-16 15:20:36 -07009508static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9509{
9510 switch (stringset) {
9511 case ETH_SS_STATS:
9512 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9513 break;
Michael Chan4cafd3f2005-05-29 14:56:34 -07009514 case ETH_SS_TEST:
9515 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9516 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009517 default:
9518 WARN_ON(1); /* we need a WARN() */
9519 break;
9520 }
9521}
9522
Michael Chan4009a932005-09-05 17:52:54 -07009523static int tg3_phys_id(struct net_device *dev, u32 data)
9524{
9525 struct tg3 *tp = netdev_priv(dev);
9526 int i;
9527
9528 if (!netif_running(tp->dev))
9529 return -EAGAIN;
9530
9531 if (data == 0)
Stephen Hemminger759afc32008-02-23 19:51:59 -08009532 data = UINT_MAX / 2;
Michael Chan4009a932005-09-05 17:52:54 -07009533
9534 for (i = 0; i < (data * 2); i++) {
9535 if ((i % 2) == 0)
9536 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9537 LED_CTRL_1000MBPS_ON |
9538 LED_CTRL_100MBPS_ON |
9539 LED_CTRL_10MBPS_ON |
9540 LED_CTRL_TRAFFIC_OVERRIDE |
9541 LED_CTRL_TRAFFIC_BLINK |
9542 LED_CTRL_TRAFFIC_LED);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009543
Michael Chan4009a932005-09-05 17:52:54 -07009544 else
9545 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9546 LED_CTRL_TRAFFIC_OVERRIDE);
9547
9548 if (msleep_interruptible(500))
9549 break;
9550 }
9551 tw32(MAC_LED_CTRL, tp->led_ctrl);
9552 return 0;
9553}
9554
Linus Torvalds1da177e2005-04-16 15:20:36 -07009555static void tg3_get_ethtool_stats (struct net_device *dev,
9556 struct ethtool_stats *estats, u64 *tmp_stats)
9557{
9558 struct tg3 *tp = netdev_priv(dev);
9559 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9560}
9561
Michael Chan566f86a2005-05-29 14:56:58 -07009562#define NVRAM_TEST_SIZE 0x100
Matt Carlsona5767de2007-11-12 21:10:58 -08009563#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
9564#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
9565#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
Michael Chanb16250e2006-09-27 16:10:14 -07009566#define NVRAM_SELFBOOT_HW_SIZE 0x20
9567#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
Michael Chan566f86a2005-05-29 14:56:58 -07009568
9569static int tg3_test_nvram(struct tg3 *tp)
9570{
Al Virob9fc7dc2007-12-17 22:59:57 -08009571 u32 csum, magic;
9572 __le32 *buf;
Andy Gospodarekab0049b2007-09-06 20:42:14 +01009573 int i, j, k, err = 0, size;
Michael Chan566f86a2005-05-29 14:56:58 -07009574
Michael Chan18201802006-03-20 22:29:15 -08009575 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Michael Chan1b277772006-03-20 22:27:48 -08009576 return -EIO;
9577
Michael Chan1b277772006-03-20 22:27:48 -08009578 if (magic == TG3_EEPROM_MAGIC)
9579 size = NVRAM_TEST_SIZE;
Michael Chanb16250e2006-09-27 16:10:14 -07009580 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
Matt Carlsona5767de2007-11-12 21:10:58 -08009581 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9582 TG3_EEPROM_SB_FORMAT_1) {
9583 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9584 case TG3_EEPROM_SB_REVISION_0:
9585 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9586 break;
9587 case TG3_EEPROM_SB_REVISION_2:
9588 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9589 break;
9590 case TG3_EEPROM_SB_REVISION_3:
9591 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9592 break;
9593 default:
9594 return 0;
9595 }
9596 } else
Michael Chan1b277772006-03-20 22:27:48 -08009597 return 0;
Michael Chanb16250e2006-09-27 16:10:14 -07009598 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9599 size = NVRAM_SELFBOOT_HW_SIZE;
9600 else
Michael Chan1b277772006-03-20 22:27:48 -08009601 return -EIO;
9602
9603 buf = kmalloc(size, GFP_KERNEL);
Michael Chan566f86a2005-05-29 14:56:58 -07009604 if (buf == NULL)
9605 return -ENOMEM;
9606
Michael Chan1b277772006-03-20 22:27:48 -08009607 err = -EIO;
9608 for (i = 0, j = 0; i < size; i += 4, j++) {
Al Virob9fc7dc2007-12-17 22:59:57 -08009609 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
Michael Chan566f86a2005-05-29 14:56:58 -07009610 break;
Michael Chan566f86a2005-05-29 14:56:58 -07009611 }
Michael Chan1b277772006-03-20 22:27:48 -08009612 if (i < size)
Michael Chan566f86a2005-05-29 14:56:58 -07009613 goto out;
9614
Michael Chan1b277772006-03-20 22:27:48 -08009615 /* Selfboot format */
Al Virob9fc7dc2007-12-17 22:59:57 -08009616 magic = swab32(le32_to_cpu(buf[0]));
9617 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -07009618 TG3_EEPROM_MAGIC_FW) {
Michael Chan1b277772006-03-20 22:27:48 -08009619 u8 *buf8 = (u8 *) buf, csum8 = 0;
9620
Al Virob9fc7dc2007-12-17 22:59:57 -08009621 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
Matt Carlsona5767de2007-11-12 21:10:58 -08009622 TG3_EEPROM_SB_REVISION_2) {
9623 /* For rev 2, the csum doesn't include the MBA. */
9624 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9625 csum8 += buf8[i];
9626 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9627 csum8 += buf8[i];
9628 } else {
9629 for (i = 0; i < size; i++)
9630 csum8 += buf8[i];
9631 }
Michael Chan1b277772006-03-20 22:27:48 -08009632
Adrian Bunkad96b482006-04-05 22:21:04 -07009633 if (csum8 == 0) {
9634 err = 0;
9635 goto out;
9636 }
9637
9638 err = -EIO;
9639 goto out;
Michael Chan1b277772006-03-20 22:27:48 -08009640 }
Michael Chan566f86a2005-05-29 14:56:58 -07009641
Al Virob9fc7dc2007-12-17 22:59:57 -08009642 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -07009643 TG3_EEPROM_MAGIC_HW) {
9644 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9645 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9646 u8 *buf8 = (u8 *) buf;
Michael Chanb16250e2006-09-27 16:10:14 -07009647
9648 /* Separate the parity bits and the data bytes. */
9649 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9650 if ((i == 0) || (i == 8)) {
9651 int l;
9652 u8 msk;
9653
9654 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9655 parity[k++] = buf8[i] & msk;
9656 i++;
9657 }
9658 else if (i == 16) {
9659 int l;
9660 u8 msk;
9661
9662 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9663 parity[k++] = buf8[i] & msk;
9664 i++;
9665
9666 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9667 parity[k++] = buf8[i] & msk;
9668 i++;
9669 }
9670 data[j++] = buf8[i];
9671 }
9672
9673 err = -EIO;
9674 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9675 u8 hw8 = hweight8(data[i]);
9676
9677 if ((hw8 & 0x1) && parity[i])
9678 goto out;
9679 else if (!(hw8 & 0x1) && !parity[i])
9680 goto out;
9681 }
9682 err = 0;
9683 goto out;
9684 }
9685
Michael Chan566f86a2005-05-29 14:56:58 -07009686 /* Bootstrap checksum at offset 0x10 */
9687 csum = calc_crc((unsigned char *) buf, 0x10);
Al Virob9fc7dc2007-12-17 22:59:57 -08009688 if(csum != le32_to_cpu(buf[0x10/4]))
Michael Chan566f86a2005-05-29 14:56:58 -07009689 goto out;
9690
9691 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9692 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
Al Virob9fc7dc2007-12-17 22:59:57 -08009693 if (csum != le32_to_cpu(buf[0xfc/4]))
Michael Chan566f86a2005-05-29 14:56:58 -07009694 goto out;
9695
9696 err = 0;
9697
9698out:
9699 kfree(buf);
9700 return err;
9701}
9702
Michael Chanca430072005-05-29 14:57:23 -07009703#define TG3_SERDES_TIMEOUT_SEC 2
9704#define TG3_COPPER_TIMEOUT_SEC 6
9705
9706static int tg3_test_link(struct tg3 *tp)
9707{
9708 int i, max;
9709
9710 if (!netif_running(tp->dev))
9711 return -ENODEV;
9712
Michael Chan4c987482005-09-05 17:52:38 -07009713 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Michael Chanca430072005-05-29 14:57:23 -07009714 max = TG3_SERDES_TIMEOUT_SEC;
9715 else
9716 max = TG3_COPPER_TIMEOUT_SEC;
9717
9718 for (i = 0; i < max; i++) {
9719 if (netif_carrier_ok(tp->dev))
9720 return 0;
9721
9722 if (msleep_interruptible(1000))
9723 break;
9724 }
9725
9726 return -EIO;
9727}
9728
Michael Chana71116d2005-05-29 14:58:11 -07009729/* Only test the commonly used registers */
David S. Miller30ca3e32006-03-20 23:02:36 -08009730static int tg3_test_registers(struct tg3 *tp)
Michael Chana71116d2005-05-29 14:58:11 -07009731{
Michael Chanb16250e2006-09-27 16:10:14 -07009732 int i, is_5705, is_5750;
Michael Chana71116d2005-05-29 14:58:11 -07009733 u32 offset, read_mask, write_mask, val, save_val, read_val;
9734 static struct {
9735 u16 offset;
9736 u16 flags;
9737#define TG3_FL_5705 0x1
9738#define TG3_FL_NOT_5705 0x2
9739#define TG3_FL_NOT_5788 0x4
Michael Chanb16250e2006-09-27 16:10:14 -07009740#define TG3_FL_NOT_5750 0x8
Michael Chana71116d2005-05-29 14:58:11 -07009741 u32 read_mask;
9742 u32 write_mask;
9743 } reg_tbl[] = {
9744 /* MAC Control Registers */
9745 { MAC_MODE, TG3_FL_NOT_5705,
9746 0x00000000, 0x00ef6f8c },
9747 { MAC_MODE, TG3_FL_5705,
9748 0x00000000, 0x01ef6b8c },
9749 { MAC_STATUS, TG3_FL_NOT_5705,
9750 0x03800107, 0x00000000 },
9751 { MAC_STATUS, TG3_FL_5705,
9752 0x03800100, 0x00000000 },
9753 { MAC_ADDR_0_HIGH, 0x0000,
9754 0x00000000, 0x0000ffff },
9755 { MAC_ADDR_0_LOW, 0x0000,
9756 0x00000000, 0xffffffff },
9757 { MAC_RX_MTU_SIZE, 0x0000,
9758 0x00000000, 0x0000ffff },
9759 { MAC_TX_MODE, 0x0000,
9760 0x00000000, 0x00000070 },
9761 { MAC_TX_LENGTHS, 0x0000,
9762 0x00000000, 0x00003fff },
9763 { MAC_RX_MODE, TG3_FL_NOT_5705,
9764 0x00000000, 0x000007fc },
9765 { MAC_RX_MODE, TG3_FL_5705,
9766 0x00000000, 0x000007dc },
9767 { MAC_HASH_REG_0, 0x0000,
9768 0x00000000, 0xffffffff },
9769 { MAC_HASH_REG_1, 0x0000,
9770 0x00000000, 0xffffffff },
9771 { MAC_HASH_REG_2, 0x0000,
9772 0x00000000, 0xffffffff },
9773 { MAC_HASH_REG_3, 0x0000,
9774 0x00000000, 0xffffffff },
9775
9776 /* Receive Data and Receive BD Initiator Control Registers. */
9777 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9778 0x00000000, 0xffffffff },
9779 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9780 0x00000000, 0xffffffff },
9781 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9782 0x00000000, 0x00000003 },
9783 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9784 0x00000000, 0xffffffff },
9785 { RCVDBDI_STD_BD+0, 0x0000,
9786 0x00000000, 0xffffffff },
9787 { RCVDBDI_STD_BD+4, 0x0000,
9788 0x00000000, 0xffffffff },
9789 { RCVDBDI_STD_BD+8, 0x0000,
9790 0x00000000, 0xffff0002 },
9791 { RCVDBDI_STD_BD+0xc, 0x0000,
9792 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009793
Michael Chana71116d2005-05-29 14:58:11 -07009794 /* Receive BD Initiator Control Registers. */
9795 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9796 0x00000000, 0xffffffff },
9797 { RCVBDI_STD_THRESH, TG3_FL_5705,
9798 0x00000000, 0x000003ff },
9799 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9800 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009801
Michael Chana71116d2005-05-29 14:58:11 -07009802 /* Host Coalescing Control Registers. */
9803 { HOSTCC_MODE, TG3_FL_NOT_5705,
9804 0x00000000, 0x00000004 },
9805 { HOSTCC_MODE, TG3_FL_5705,
9806 0x00000000, 0x000000f6 },
9807 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9808 0x00000000, 0xffffffff },
9809 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9810 0x00000000, 0x000003ff },
9811 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9812 0x00000000, 0xffffffff },
9813 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9814 0x00000000, 0x000003ff },
9815 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9816 0x00000000, 0xffffffff },
9817 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9818 0x00000000, 0x000000ff },
9819 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9820 0x00000000, 0xffffffff },
9821 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9822 0x00000000, 0x000000ff },
9823 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9824 0x00000000, 0xffffffff },
9825 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9826 0x00000000, 0xffffffff },
9827 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9828 0x00000000, 0xffffffff },
9829 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9830 0x00000000, 0x000000ff },
9831 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9832 0x00000000, 0xffffffff },
9833 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9834 0x00000000, 0x000000ff },
9835 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9836 0x00000000, 0xffffffff },
9837 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9838 0x00000000, 0xffffffff },
9839 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9840 0x00000000, 0xffffffff },
9841 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9842 0x00000000, 0xffffffff },
9843 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9844 0x00000000, 0xffffffff },
9845 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9846 0xffffffff, 0x00000000 },
9847 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9848 0xffffffff, 0x00000000 },
9849
9850 /* Buffer Manager Control Registers. */
Michael Chanb16250e2006-09-27 16:10:14 -07009851 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -07009852 0x00000000, 0x007fff80 },
Michael Chanb16250e2006-09-27 16:10:14 -07009853 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -07009854 0x00000000, 0x007fffff },
9855 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9856 0x00000000, 0x0000003f },
9857 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9858 0x00000000, 0x000001ff },
9859 { BUFMGR_MB_HIGH_WATER, 0x0000,
9860 0x00000000, 0x000001ff },
9861 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9862 0xffffffff, 0x00000000 },
9863 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9864 0xffffffff, 0x00000000 },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009865
Michael Chana71116d2005-05-29 14:58:11 -07009866 /* Mailbox Registers */
9867 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9868 0x00000000, 0x000001ff },
9869 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9870 0x00000000, 0x000001ff },
9871 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9872 0x00000000, 0x000007ff },
9873 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9874 0x00000000, 0x000001ff },
9875
9876 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9877 };
9878
Michael Chanb16250e2006-09-27 16:10:14 -07009879 is_5705 = is_5750 = 0;
9880 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
Michael Chana71116d2005-05-29 14:58:11 -07009881 is_5705 = 1;
Michael Chanb16250e2006-09-27 16:10:14 -07009882 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9883 is_5750 = 1;
9884 }
Michael Chana71116d2005-05-29 14:58:11 -07009885
9886 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9887 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9888 continue;
9889
9890 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9891 continue;
9892
9893 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9894 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9895 continue;
9896
Michael Chanb16250e2006-09-27 16:10:14 -07009897 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9898 continue;
9899
Michael Chana71116d2005-05-29 14:58:11 -07009900 offset = (u32) reg_tbl[i].offset;
9901 read_mask = reg_tbl[i].read_mask;
9902 write_mask = reg_tbl[i].write_mask;
9903
9904 /* Save the original register content */
9905 save_val = tr32(offset);
9906
9907 /* Determine the read-only value. */
9908 read_val = save_val & read_mask;
9909
9910 /* Write zero to the register, then make sure the read-only bits
9911 * are not changed and the read/write bits are all zeros.
9912 */
9913 tw32(offset, 0);
9914
9915 val = tr32(offset);
9916
9917 /* Test the read-only and read/write bits. */
9918 if (((val & read_mask) != read_val) || (val & write_mask))
9919 goto out;
9920
9921 /* Write ones to all the bits defined by RdMask and WrMask, then
9922 * make sure the read-only bits are not changed and the
9923 * read/write bits are all ones.
9924 */
9925 tw32(offset, read_mask | write_mask);
9926
9927 val = tr32(offset);
9928
9929 /* Test the read-only bits. */
9930 if ((val & read_mask) != read_val)
9931 goto out;
9932
9933 /* Test the read/write bits. */
9934 if ((val & write_mask) != write_mask)
9935 goto out;
9936
9937 tw32(offset, save_val);
9938 }
9939
9940 return 0;
9941
9942out:
Michael Chan9f88f292006-12-07 00:22:54 -08009943 if (netif_msg_hw(tp))
9944 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9945 offset);
Michael Chana71116d2005-05-29 14:58:11 -07009946 tw32(offset, save_val);
9947 return -EIO;
9948}
9949
Michael Chan7942e1d2005-05-29 14:58:36 -07009950static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9951{
Arjan van de Venf71e1302006-03-03 21:33:57 -05009952 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
Michael Chan7942e1d2005-05-29 14:58:36 -07009953 int i;
9954 u32 j;
9955
Alejandro Martinez Ruize9edda62007-10-15 03:37:43 +02009956 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
Michael Chan7942e1d2005-05-29 14:58:36 -07009957 for (j = 0; j < len; j += 4) {
9958 u32 val;
9959
9960 tg3_write_mem(tp, offset + j, test_pattern[i]);
9961 tg3_read_mem(tp, offset + j, &val);
9962 if (val != test_pattern[i])
9963 return -EIO;
9964 }
9965 }
9966 return 0;
9967}
9968
9969static int tg3_test_memory(struct tg3 *tp)
9970{
9971 static struct mem_entry {
9972 u32 offset;
9973 u32 len;
9974 } mem_tbl_570x[] = {
Michael Chan38690192005-12-19 16:27:28 -08009975 { 0x00000000, 0x00b50},
Michael Chan7942e1d2005-05-29 14:58:36 -07009976 { 0x00002000, 0x1c000},
9977 { 0xffffffff, 0x00000}
9978 }, mem_tbl_5705[] = {
9979 { 0x00000100, 0x0000c},
9980 { 0x00000200, 0x00008},
Michael Chan7942e1d2005-05-29 14:58:36 -07009981 { 0x00004000, 0x00800},
9982 { 0x00006000, 0x01000},
9983 { 0x00008000, 0x02000},
9984 { 0x00010000, 0x0e000},
9985 { 0xffffffff, 0x00000}
Michael Chan79f4d132006-03-20 22:28:57 -08009986 }, mem_tbl_5755[] = {
9987 { 0x00000200, 0x00008},
9988 { 0x00004000, 0x00800},
9989 { 0x00006000, 0x00800},
9990 { 0x00008000, 0x02000},
9991 { 0x00010000, 0x0c000},
9992 { 0xffffffff, 0x00000}
Michael Chanb16250e2006-09-27 16:10:14 -07009993 }, mem_tbl_5906[] = {
9994 { 0x00000200, 0x00008},
9995 { 0x00004000, 0x00400},
9996 { 0x00006000, 0x00400},
9997 { 0x00008000, 0x01000},
9998 { 0x00010000, 0x01000},
9999 { 0xffffffff, 0x00000}
Michael Chan7942e1d2005-05-29 14:58:36 -070010000 };
10001 struct mem_entry *mem_tbl;
10002 int err = 0;
10003 int i;
10004
Michael Chan79f4d132006-03-20 22:28:57 -080010005 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
Michael Chanaf36e6b2006-03-23 01:28:06 -080010006 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070010007 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070010008 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010009 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10010 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan79f4d132006-03-20 22:28:57 -080010011 mem_tbl = mem_tbl_5755;
Michael Chanb16250e2006-09-27 16:10:14 -070010012 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10013 mem_tbl = mem_tbl_5906;
Michael Chan79f4d132006-03-20 22:28:57 -080010014 else
10015 mem_tbl = mem_tbl_5705;
10016 } else
Michael Chan7942e1d2005-05-29 14:58:36 -070010017 mem_tbl = mem_tbl_570x;
10018
10019 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10020 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
10021 mem_tbl[i].len)) != 0)
10022 break;
10023 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010024
Michael Chan7942e1d2005-05-29 14:58:36 -070010025 return err;
10026}
10027
Michael Chan9f40dea2005-09-05 17:53:06 -070010028#define TG3_MAC_LOOPBACK 0
10029#define TG3_PHY_LOOPBACK 1
10030
10031static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
Michael Chanc76949a2005-05-29 14:58:59 -070010032{
Michael Chan9f40dea2005-09-05 17:53:06 -070010033 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
Michael Chanc76949a2005-05-29 14:58:59 -070010034 u32 desc_idx;
10035 struct sk_buff *skb, *rx_skb;
10036 u8 *tx_data;
10037 dma_addr_t map;
10038 int num_pkts, tx_len, rx_len, i, err;
10039 struct tg3_rx_buffer_desc *desc;
10040
Michael Chan9f40dea2005-09-05 17:53:06 -070010041 if (loopback_mode == TG3_MAC_LOOPBACK) {
Michael Chanc94e3942005-09-27 12:12:42 -070010042 /* HW errata - mac loopback fails in some cases on 5780.
10043 * Normal traffic and PHY loopback are not affected by
10044 * errata.
10045 */
10046 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10047 return 0;
10048
Michael Chan9f40dea2005-09-05 17:53:06 -070010049 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
Matt Carlsone8f3f6c2007-07-11 19:47:55 -070010050 MAC_MODE_PORT_INT_LPBACK;
10051 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10052 mac_mode |= MAC_MODE_LINK_POLARITY;
Michael Chan3f7045c2006-09-27 16:02:29 -070010053 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10054 mac_mode |= MAC_MODE_PORT_MODE_MII;
10055 else
10056 mac_mode |= MAC_MODE_PORT_MODE_GMII;
Michael Chan9f40dea2005-09-05 17:53:06 -070010057 tw32(MAC_MODE, mac_mode);
10058 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
Michael Chan3f7045c2006-09-27 16:02:29 -070010059 u32 val;
10060
Michael Chanb16250e2006-09-27 16:10:14 -070010061 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10062 u32 phytest;
10063
10064 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
10065 u32 phy;
10066
10067 tg3_writephy(tp, MII_TG3_EPHY_TEST,
10068 phytest | MII_TG3_EPHY_SHADOW_EN);
10069 if (!tg3_readphy(tp, 0x1b, &phy))
10070 tg3_writephy(tp, 0x1b, phy & ~0x20);
Michael Chanb16250e2006-09-27 16:10:14 -070010071 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
10072 }
Michael Chan5d64ad32006-12-07 00:19:40 -080010073 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10074 } else
10075 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
Michael Chan3f7045c2006-09-27 16:02:29 -070010076
Matt Carlson9ef8ca92007-07-11 19:48:29 -070010077 tg3_phy_toggle_automdix(tp, 0);
10078
Michael Chan3f7045c2006-09-27 16:02:29 -070010079 tg3_writephy(tp, MII_BMCR, val);
Michael Chanc94e3942005-09-27 12:12:42 -070010080 udelay(40);
Michael Chan5d64ad32006-12-07 00:19:40 -080010081
Matt Carlsone8f3f6c2007-07-11 19:47:55 -070010082 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
Michael Chan5d64ad32006-12-07 00:19:40 -080010083 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chanb16250e2006-09-27 16:10:14 -070010084 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
Michael Chan5d64ad32006-12-07 00:19:40 -080010085 mac_mode |= MAC_MODE_PORT_MODE_MII;
10086 } else
10087 mac_mode |= MAC_MODE_PORT_MODE_GMII;
Michael Chanb16250e2006-09-27 16:10:14 -070010088
Michael Chanc94e3942005-09-27 12:12:42 -070010089 /* reset to prevent losing 1st rx packet intermittently */
10090 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10091 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10092 udelay(10);
10093 tw32_f(MAC_RX_MODE, tp->rx_mode);
10094 }
Matt Carlsone8f3f6c2007-07-11 19:47:55 -070010095 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10096 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10097 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10098 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10099 mac_mode |= MAC_MODE_LINK_POLARITY;
Michael Chanff18ff02006-03-27 23:17:27 -080010100 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10101 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10102 }
Michael Chan9f40dea2005-09-05 17:53:06 -070010103 tw32(MAC_MODE, mac_mode);
Michael Chan9f40dea2005-09-05 17:53:06 -070010104 }
10105 else
10106 return -EINVAL;
Michael Chanc76949a2005-05-29 14:58:59 -070010107
10108 err = -EIO;
10109
Michael Chanc76949a2005-05-29 14:58:59 -070010110 tx_len = 1514;
David S. Millera20e9c62006-07-31 22:38:16 -070010111 skb = netdev_alloc_skb(tp->dev, tx_len);
Jesper Juhla50bb7b2006-05-09 23:14:35 -070010112 if (!skb)
10113 return -ENOMEM;
10114
Michael Chanc76949a2005-05-29 14:58:59 -070010115 tx_data = skb_put(skb, tx_len);
10116 memcpy(tx_data, tp->dev->dev_addr, 6);
10117 memset(tx_data + 6, 0x0, 8);
10118
10119 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10120
10121 for (i = 14; i < tx_len; i++)
10122 tx_data[i] = (u8) (i & 0xff);
10123
10124 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10125
10126 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10127 HOSTCC_MODE_NOW);
10128
10129 udelay(10);
10130
10131 rx_start_idx = tp->hw_status->idx[0].rx_producer;
10132
Michael Chanc76949a2005-05-29 14:58:59 -070010133 num_pkts = 0;
10134
Michael Chan9f40dea2005-09-05 17:53:06 -070010135 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
Michael Chanc76949a2005-05-29 14:58:59 -070010136
Michael Chan9f40dea2005-09-05 17:53:06 -070010137 tp->tx_prod++;
Michael Chanc76949a2005-05-29 14:58:59 -070010138 num_pkts++;
10139
Michael Chan9f40dea2005-09-05 17:53:06 -070010140 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10141 tp->tx_prod);
Michael Chan09ee9292005-08-09 20:17:00 -070010142 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
Michael Chanc76949a2005-05-29 14:58:59 -070010143
10144 udelay(10);
10145
Michael Chan3f7045c2006-09-27 16:02:29 -070010146 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
10147 for (i = 0; i < 25; i++) {
Michael Chanc76949a2005-05-29 14:58:59 -070010148 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10149 HOSTCC_MODE_NOW);
10150
10151 udelay(10);
10152
10153 tx_idx = tp->hw_status->idx[0].tx_consumer;
10154 rx_idx = tp->hw_status->idx[0].rx_producer;
Michael Chan9f40dea2005-09-05 17:53:06 -070010155 if ((tx_idx == tp->tx_prod) &&
Michael Chanc76949a2005-05-29 14:58:59 -070010156 (rx_idx == (rx_start_idx + num_pkts)))
10157 break;
10158 }
10159
10160 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10161 dev_kfree_skb(skb);
10162
Michael Chan9f40dea2005-09-05 17:53:06 -070010163 if (tx_idx != tp->tx_prod)
Michael Chanc76949a2005-05-29 14:58:59 -070010164 goto out;
10165
10166 if (rx_idx != rx_start_idx + num_pkts)
10167 goto out;
10168
10169 desc = &tp->rx_rcb[rx_start_idx];
10170 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10171 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10172 if (opaque_key != RXD_OPAQUE_RING_STD)
10173 goto out;
10174
10175 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10176 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10177 goto out;
10178
10179 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10180 if (rx_len != tx_len)
10181 goto out;
10182
10183 rx_skb = tp->rx_std_buffers[desc_idx].skb;
10184
10185 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10186 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10187
10188 for (i = 14; i < tx_len; i++) {
10189 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10190 goto out;
10191 }
10192 err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010193
Michael Chanc76949a2005-05-29 14:58:59 -070010194 /* tg3_free_rings will unmap and free the rx_skb */
10195out:
10196 return err;
10197}
10198
Michael Chan9f40dea2005-09-05 17:53:06 -070010199#define TG3_MAC_LOOPBACK_FAILED 1
10200#define TG3_PHY_LOOPBACK_FAILED 2
10201#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10202 TG3_PHY_LOOPBACK_FAILED)
10203
10204static int tg3_test_loopback(struct tg3 *tp)
10205{
10206 int err = 0;
Matt Carlson9936bcf2007-10-10 18:03:07 -070010207 u32 cpmuctrl = 0;
Michael Chan9f40dea2005-09-05 17:53:06 -070010208
10209 if (!netif_running(tp->dev))
10210 return TG3_LOOPBACK_FAILED;
10211
Michael Chanb9ec6c12006-07-25 16:37:27 -070010212 err = tg3_reset_hw(tp, 1);
10213 if (err)
10214 return TG3_LOOPBACK_FAILED;
Michael Chan9f40dea2005-09-05 17:53:06 -070010215
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010216 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010217 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10218 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Matt Carlson9936bcf2007-10-10 18:03:07 -070010219 int i;
10220 u32 status;
10221
10222 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10223
10224 /* Wait for up to 40 microseconds to acquire lock. */
10225 for (i = 0; i < 4; i++) {
10226 status = tr32(TG3_CPMU_MUTEX_GNT);
10227 if (status == CPMU_MUTEX_GNT_DRIVER)
10228 break;
10229 udelay(10);
10230 }
10231
10232 if (status != CPMU_MUTEX_GNT_DRIVER)
10233 return TG3_LOOPBACK_FAILED;
10234
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010235 /* Turn off link-based power management. */
Matt Carlsone8750932007-11-12 21:11:51 -080010236 cpmuctrl = tr32(TG3_CPMU_CTRL);
Matt Carlson109115e2008-05-02 16:48:59 -070010237 tw32(TG3_CPMU_CTRL,
10238 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10239 CPMU_CTRL_LINK_AWARE_MODE));
Matt Carlson9936bcf2007-10-10 18:03:07 -070010240 }
10241
Michael Chan9f40dea2005-09-05 17:53:06 -070010242 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10243 err |= TG3_MAC_LOOPBACK_FAILED;
Matt Carlson9936bcf2007-10-10 18:03:07 -070010244
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010245 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010246 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10247 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Matt Carlson9936bcf2007-10-10 18:03:07 -070010248 tw32(TG3_CPMU_CTRL, cpmuctrl);
10249
10250 /* Release the mutex */
10251 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10252 }
10253
Matt Carlsondd477002008-05-25 23:45:58 -070010254 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10255 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
Michael Chan9f40dea2005-09-05 17:53:06 -070010256 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10257 err |= TG3_PHY_LOOPBACK_FAILED;
10258 }
10259
10260 return err;
10261}
10262
Michael Chan4cafd3f2005-05-29 14:56:34 -070010263static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10264 u64 *data)
10265{
Michael Chan566f86a2005-05-29 14:56:58 -070010266 struct tg3 *tp = netdev_priv(dev);
10267
Michael Chanbc1c7562006-03-20 17:48:03 -080010268 if (tp->link_config.phy_is_low_power)
10269 tg3_set_power_state(tp, PCI_D0);
10270
Michael Chan566f86a2005-05-29 14:56:58 -070010271 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10272
10273 if (tg3_test_nvram(tp) != 0) {
10274 etest->flags |= ETH_TEST_FL_FAILED;
10275 data[0] = 1;
10276 }
Michael Chanca430072005-05-29 14:57:23 -070010277 if (tg3_test_link(tp) != 0) {
10278 etest->flags |= ETH_TEST_FL_FAILED;
10279 data[1] = 1;
10280 }
Michael Chana71116d2005-05-29 14:58:11 -070010281 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010282 int err, err2 = 0, irq_sync = 0;
Michael Chana71116d2005-05-29 14:58:11 -070010283
Michael Chanbbe832c2005-06-24 20:20:04 -070010284 if (netif_running(dev)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010285 tg3_phy_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -070010286 tg3_netif_stop(tp);
10287 irq_sync = 1;
10288 }
10289
10290 tg3_full_lock(tp, irq_sync);
Michael Chana71116d2005-05-29 14:58:11 -070010291
10292 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
Michael Chanec41c7d2006-01-17 02:40:55 -080010293 err = tg3_nvram_lock(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010294 tg3_halt_cpu(tp, RX_CPU_BASE);
10295 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10296 tg3_halt_cpu(tp, TX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -080010297 if (!err)
10298 tg3_nvram_unlock(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010299
Michael Chand9ab5ad2006-03-20 22:27:35 -080010300 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10301 tg3_phy_reset(tp);
10302
Michael Chana71116d2005-05-29 14:58:11 -070010303 if (tg3_test_registers(tp) != 0) {
10304 etest->flags |= ETH_TEST_FL_FAILED;
10305 data[2] = 1;
10306 }
Michael Chan7942e1d2005-05-29 14:58:36 -070010307 if (tg3_test_memory(tp) != 0) {
10308 etest->flags |= ETH_TEST_FL_FAILED;
10309 data[3] = 1;
10310 }
Michael Chan9f40dea2005-09-05 17:53:06 -070010311 if ((data[4] = tg3_test_loopback(tp)) != 0)
Michael Chanc76949a2005-05-29 14:58:59 -070010312 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chana71116d2005-05-29 14:58:11 -070010313
David S. Millerf47c11e2005-06-24 20:18:35 -070010314 tg3_full_unlock(tp);
10315
Michael Chand4bc3922005-05-29 14:59:20 -070010316 if (tg3_test_interrupt(tp) != 0) {
10317 etest->flags |= ETH_TEST_FL_FAILED;
10318 data[5] = 1;
10319 }
David S. Millerf47c11e2005-06-24 20:18:35 -070010320
10321 tg3_full_lock(tp, 0);
Michael Chand4bc3922005-05-29 14:59:20 -070010322
Michael Chana71116d2005-05-29 14:58:11 -070010323 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10324 if (netif_running(dev)) {
10325 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010326 err2 = tg3_restart_hw(tp, 1);
10327 if (!err2)
Michael Chanb9ec6c12006-07-25 16:37:27 -070010328 tg3_netif_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010329 }
David S. Millerf47c11e2005-06-24 20:18:35 -070010330
10331 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010332
10333 if (irq_sync && !err2)
10334 tg3_phy_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010335 }
Michael Chanbc1c7562006-03-20 17:48:03 -080010336 if (tp->link_config.phy_is_low_power)
10337 tg3_set_power_state(tp, PCI_D3hot);
10338
Michael Chan4cafd3f2005-05-29 14:56:34 -070010339}
10340
Linus Torvalds1da177e2005-04-16 15:20:36 -070010341static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10342{
10343 struct mii_ioctl_data *data = if_mii(ifr);
10344 struct tg3 *tp = netdev_priv(dev);
10345 int err;
10346
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010347 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10348 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10349 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -070010350 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010351 }
10352
Linus Torvalds1da177e2005-04-16 15:20:36 -070010353 switch(cmd) {
10354 case SIOCGMIIPHY:
10355 data->phy_id = PHY_ADDR;
10356
10357 /* fallthru */
10358 case SIOCGMIIREG: {
10359 u32 mii_regval;
10360
10361 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10362 break; /* We have no PHY */
10363
Michael Chanbc1c7562006-03-20 17:48:03 -080010364 if (tp->link_config.phy_is_low_power)
10365 return -EAGAIN;
10366
David S. Millerf47c11e2005-06-24 20:18:35 -070010367 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010368 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
David S. Millerf47c11e2005-06-24 20:18:35 -070010369 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010370
10371 data->val_out = mii_regval;
10372
10373 return err;
10374 }
10375
10376 case SIOCSMIIREG:
10377 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10378 break; /* We have no PHY */
10379
10380 if (!capable(CAP_NET_ADMIN))
10381 return -EPERM;
10382
Michael Chanbc1c7562006-03-20 17:48:03 -080010383 if (tp->link_config.phy_is_low_power)
10384 return -EAGAIN;
10385
David S. Millerf47c11e2005-06-24 20:18:35 -070010386 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010387 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
David S. Millerf47c11e2005-06-24 20:18:35 -070010388 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010389
10390 return err;
10391
10392 default:
10393 /* do nothing */
10394 break;
10395 }
10396 return -EOPNOTSUPP;
10397}
10398
10399#if TG3_VLAN_TAG_USED
10400static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10401{
10402 struct tg3 *tp = netdev_priv(dev);
10403
Michael Chan29315e82006-06-29 20:12:30 -070010404 if (netif_running(dev))
10405 tg3_netif_stop(tp);
10406
David S. Millerf47c11e2005-06-24 20:18:35 -070010407 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010408
10409 tp->vlgrp = grp;
10410
10411 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10412 __tg3_set_rx_mode(dev);
10413
Michael Chan29315e82006-06-29 20:12:30 -070010414 if (netif_running(dev))
10415 tg3_netif_start(tp);
Michael Chan46966542007-07-11 19:47:19 -070010416
10417 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010418}
Linus Torvalds1da177e2005-04-16 15:20:36 -070010419#endif
10420
David S. Miller15f98502005-05-18 22:49:26 -070010421static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10422{
10423 struct tg3 *tp = netdev_priv(dev);
10424
10425 memcpy(ec, &tp->coal, sizeof(*ec));
10426 return 0;
10427}
10428
Michael Chand244c892005-07-05 14:42:33 -070010429static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10430{
10431 struct tg3 *tp = netdev_priv(dev);
10432 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10433 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10434
10435 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10436 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10437 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10438 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10439 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10440 }
10441
10442 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10443 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10444 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10445 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10446 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10447 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10448 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10449 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10450 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10451 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10452 return -EINVAL;
10453
10454 /* No rx interrupts will be generated if both are zero */
10455 if ((ec->rx_coalesce_usecs == 0) &&
10456 (ec->rx_max_coalesced_frames == 0))
10457 return -EINVAL;
10458
10459 /* No tx interrupts will be generated if both are zero */
10460 if ((ec->tx_coalesce_usecs == 0) &&
10461 (ec->tx_max_coalesced_frames == 0))
10462 return -EINVAL;
10463
10464 /* Only copy relevant parameters, ignore all others. */
10465 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10466 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10467 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10468 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10469 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10470 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10471 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10472 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10473 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10474
10475 if (netif_running(dev)) {
10476 tg3_full_lock(tp, 0);
10477 __tg3_set_coalesce(tp, &tp->coal);
10478 tg3_full_unlock(tp);
10479 }
10480 return 0;
10481}
10482
Jeff Garzik7282d492006-09-13 14:30:00 -040010483static const struct ethtool_ops tg3_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010484 .get_settings = tg3_get_settings,
10485 .set_settings = tg3_set_settings,
10486 .get_drvinfo = tg3_get_drvinfo,
10487 .get_regs_len = tg3_get_regs_len,
10488 .get_regs = tg3_get_regs,
10489 .get_wol = tg3_get_wol,
10490 .set_wol = tg3_set_wol,
10491 .get_msglevel = tg3_get_msglevel,
10492 .set_msglevel = tg3_set_msglevel,
10493 .nway_reset = tg3_nway_reset,
10494 .get_link = ethtool_op_get_link,
10495 .get_eeprom_len = tg3_get_eeprom_len,
10496 .get_eeprom = tg3_get_eeprom,
10497 .set_eeprom = tg3_set_eeprom,
10498 .get_ringparam = tg3_get_ringparam,
10499 .set_ringparam = tg3_set_ringparam,
10500 .get_pauseparam = tg3_get_pauseparam,
10501 .set_pauseparam = tg3_set_pauseparam,
10502 .get_rx_csum = tg3_get_rx_csum,
10503 .set_rx_csum = tg3_set_rx_csum,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010504 .set_tx_csum = tg3_set_tx_csum,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010505 .set_sg = ethtool_op_set_sg,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010506 .set_tso = tg3_set_tso,
Michael Chan4cafd3f2005-05-29 14:56:34 -070010507 .self_test = tg3_self_test,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010508 .get_strings = tg3_get_strings,
Michael Chan4009a932005-09-05 17:52:54 -070010509 .phys_id = tg3_phys_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010510 .get_ethtool_stats = tg3_get_ethtool_stats,
David S. Miller15f98502005-05-18 22:49:26 -070010511 .get_coalesce = tg3_get_coalesce,
Michael Chand244c892005-07-05 14:42:33 -070010512 .set_coalesce = tg3_set_coalesce,
Jeff Garzikb9f2c042007-10-03 18:07:32 -070010513 .get_sset_count = tg3_get_sset_count,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010514};
10515
10516static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10517{
Michael Chan1b277772006-03-20 22:27:48 -080010518 u32 cursize, val, magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010519
10520 tp->nvram_size = EEPROM_CHIP_SIZE;
10521
Michael Chan18201802006-03-20 22:29:15 -080010522 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010523 return;
10524
Michael Chanb16250e2006-09-27 16:10:14 -070010525 if ((magic != TG3_EEPROM_MAGIC) &&
10526 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10527 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
Linus Torvalds1da177e2005-04-16 15:20:36 -070010528 return;
10529
10530 /*
10531 * Size the chip by reading offsets at increasing powers of two.
10532 * When we encounter our validation signature, we know the addressing
10533 * has wrapped around, and thus have our chip size.
10534 */
Michael Chan1b277772006-03-20 22:27:48 -080010535 cursize = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010536
10537 while (cursize < tp->nvram_size) {
Michael Chan18201802006-03-20 22:29:15 -080010538 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010539 return;
10540
Michael Chan18201802006-03-20 22:29:15 -080010541 if (val == magic)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010542 break;
10543
10544 cursize <<= 1;
10545 }
10546
10547 tp->nvram_size = cursize;
10548}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010549
Linus Torvalds1da177e2005-04-16 15:20:36 -070010550static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10551{
10552 u32 val;
10553
Michael Chan18201802006-03-20 22:29:15 -080010554 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
Michael Chan1b277772006-03-20 22:27:48 -080010555 return;
10556
10557 /* Selfboot format */
Michael Chan18201802006-03-20 22:29:15 -080010558 if (val != TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -080010559 tg3_get_eeprom_size(tp);
10560 return;
10561 }
10562
Linus Torvalds1da177e2005-04-16 15:20:36 -070010563 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10564 if (val != 0) {
10565 tp->nvram_size = (val >> 16) * 1024;
10566 return;
10567 }
10568 }
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010569 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010570}
10571
10572static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10573{
10574 u32 nvcfg1;
10575
10576 nvcfg1 = tr32(NVRAM_CFG1);
10577 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10578 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10579 }
10580 else {
10581 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10582 tw32(NVRAM_CFG1, nvcfg1);
10583 }
10584
Michael Chan4c987482005-09-05 17:52:38 -070010585 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
Michael Chana4e2b342005-10-26 15:46:52 -070010586 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010587 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10588 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10589 tp->nvram_jedecnum = JEDEC_ATMEL;
10590 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10591 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10592 break;
10593 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10594 tp->nvram_jedecnum = JEDEC_ATMEL;
10595 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10596 break;
10597 case FLASH_VENDOR_ATMEL_EEPROM:
10598 tp->nvram_jedecnum = JEDEC_ATMEL;
10599 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10600 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10601 break;
10602 case FLASH_VENDOR_ST:
10603 tp->nvram_jedecnum = JEDEC_ST;
10604 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10605 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10606 break;
10607 case FLASH_VENDOR_SAIFUN:
10608 tp->nvram_jedecnum = JEDEC_SAIFUN;
10609 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10610 break;
10611 case FLASH_VENDOR_SST_SMALL:
10612 case FLASH_VENDOR_SST_LARGE:
10613 tp->nvram_jedecnum = JEDEC_SST;
10614 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10615 break;
10616 }
10617 }
10618 else {
10619 tp->nvram_jedecnum = JEDEC_ATMEL;
10620 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10621 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10622 }
10623}
10624
Michael Chan361b4ac2005-04-21 17:11:21 -070010625static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10626{
10627 u32 nvcfg1;
10628
10629 nvcfg1 = tr32(NVRAM_CFG1);
10630
Michael Chane6af3012005-04-21 17:12:05 -070010631 /* NVRAM protection for TPM */
10632 if (nvcfg1 & (1 << 27))
10633 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10634
Michael Chan361b4ac2005-04-21 17:11:21 -070010635 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10636 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10637 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10638 tp->nvram_jedecnum = JEDEC_ATMEL;
10639 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10640 break;
10641 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10642 tp->nvram_jedecnum = JEDEC_ATMEL;
10643 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10644 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10645 break;
10646 case FLASH_5752VENDOR_ST_M45PE10:
10647 case FLASH_5752VENDOR_ST_M45PE20:
10648 case FLASH_5752VENDOR_ST_M45PE40:
10649 tp->nvram_jedecnum = JEDEC_ST;
10650 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10651 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10652 break;
10653 }
10654
10655 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10656 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10657 case FLASH_5752PAGE_SIZE_256:
10658 tp->nvram_pagesize = 256;
10659 break;
10660 case FLASH_5752PAGE_SIZE_512:
10661 tp->nvram_pagesize = 512;
10662 break;
10663 case FLASH_5752PAGE_SIZE_1K:
10664 tp->nvram_pagesize = 1024;
10665 break;
10666 case FLASH_5752PAGE_SIZE_2K:
10667 tp->nvram_pagesize = 2048;
10668 break;
10669 case FLASH_5752PAGE_SIZE_4K:
10670 tp->nvram_pagesize = 4096;
10671 break;
10672 case FLASH_5752PAGE_SIZE_264:
10673 tp->nvram_pagesize = 264;
10674 break;
10675 }
10676 }
10677 else {
10678 /* For eeprom, set pagesize to maximum eeprom size */
10679 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10680
10681 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10682 tw32(NVRAM_CFG1, nvcfg1);
10683 }
10684}
10685
Michael Chand3c7b882006-03-23 01:28:25 -080010686static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10687{
Matt Carlson989a9d22007-05-05 11:51:05 -070010688 u32 nvcfg1, protect = 0;
Michael Chand3c7b882006-03-23 01:28:25 -080010689
10690 nvcfg1 = tr32(NVRAM_CFG1);
10691
10692 /* NVRAM protection for TPM */
Matt Carlson989a9d22007-05-05 11:51:05 -070010693 if (nvcfg1 & (1 << 27)) {
Michael Chand3c7b882006-03-23 01:28:25 -080010694 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
Matt Carlson989a9d22007-05-05 11:51:05 -070010695 protect = 1;
10696 }
Michael Chand3c7b882006-03-23 01:28:25 -080010697
Matt Carlson989a9d22007-05-05 11:51:05 -070010698 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10699 switch (nvcfg1) {
Michael Chand3c7b882006-03-23 01:28:25 -080010700 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10701 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10702 case FLASH_5755VENDOR_ATMEL_FLASH_3:
Matt Carlson70b65a22007-07-11 19:48:50 -070010703 case FLASH_5755VENDOR_ATMEL_FLASH_5:
Michael Chand3c7b882006-03-23 01:28:25 -080010704 tp->nvram_jedecnum = JEDEC_ATMEL;
10705 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10706 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10707 tp->nvram_pagesize = 264;
Matt Carlson70b65a22007-07-11 19:48:50 -070010708 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10709 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010710 tp->nvram_size = (protect ? 0x3e200 :
10711 TG3_NVRAM_SIZE_512KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010712 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010713 tp->nvram_size = (protect ? 0x1f200 :
10714 TG3_NVRAM_SIZE_256KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010715 else
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010716 tp->nvram_size = (protect ? 0x1f200 :
10717 TG3_NVRAM_SIZE_128KB);
Michael Chand3c7b882006-03-23 01:28:25 -080010718 break;
10719 case FLASH_5752VENDOR_ST_M45PE10:
10720 case FLASH_5752VENDOR_ST_M45PE20:
10721 case FLASH_5752VENDOR_ST_M45PE40:
10722 tp->nvram_jedecnum = JEDEC_ST;
10723 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10724 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10725 tp->nvram_pagesize = 256;
Matt Carlson989a9d22007-05-05 11:51:05 -070010726 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010727 tp->nvram_size = (protect ?
10728 TG3_NVRAM_SIZE_64KB :
10729 TG3_NVRAM_SIZE_128KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010730 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010731 tp->nvram_size = (protect ?
10732 TG3_NVRAM_SIZE_64KB :
10733 TG3_NVRAM_SIZE_256KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010734 else
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010735 tp->nvram_size = (protect ?
10736 TG3_NVRAM_SIZE_128KB :
10737 TG3_NVRAM_SIZE_512KB);
Michael Chand3c7b882006-03-23 01:28:25 -080010738 break;
10739 }
10740}
10741
Michael Chan1b277772006-03-20 22:27:48 -080010742static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10743{
10744 u32 nvcfg1;
10745
10746 nvcfg1 = tr32(NVRAM_CFG1);
10747
10748 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10749 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10750 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10751 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10752 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10753 tp->nvram_jedecnum = JEDEC_ATMEL;
10754 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10755 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10756
10757 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10758 tw32(NVRAM_CFG1, nvcfg1);
10759 break;
10760 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10761 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10762 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10763 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10764 tp->nvram_jedecnum = JEDEC_ATMEL;
10765 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10766 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10767 tp->nvram_pagesize = 264;
10768 break;
10769 case FLASH_5752VENDOR_ST_M45PE10:
10770 case FLASH_5752VENDOR_ST_M45PE20:
10771 case FLASH_5752VENDOR_ST_M45PE40:
10772 tp->nvram_jedecnum = JEDEC_ST;
10773 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10774 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10775 tp->nvram_pagesize = 256;
10776 break;
10777 }
10778}
10779
Matt Carlson6b91fa02007-10-10 18:01:09 -070010780static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10781{
10782 u32 nvcfg1, protect = 0;
10783
10784 nvcfg1 = tr32(NVRAM_CFG1);
10785
10786 /* NVRAM protection for TPM */
10787 if (nvcfg1 & (1 << 27)) {
10788 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10789 protect = 1;
10790 }
10791
10792 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10793 switch (nvcfg1) {
10794 case FLASH_5761VENDOR_ATMEL_ADB021D:
10795 case FLASH_5761VENDOR_ATMEL_ADB041D:
10796 case FLASH_5761VENDOR_ATMEL_ADB081D:
10797 case FLASH_5761VENDOR_ATMEL_ADB161D:
10798 case FLASH_5761VENDOR_ATMEL_MDB021D:
10799 case FLASH_5761VENDOR_ATMEL_MDB041D:
10800 case FLASH_5761VENDOR_ATMEL_MDB081D:
10801 case FLASH_5761VENDOR_ATMEL_MDB161D:
10802 tp->nvram_jedecnum = JEDEC_ATMEL;
10803 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10804 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10805 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10806 tp->nvram_pagesize = 256;
10807 break;
10808 case FLASH_5761VENDOR_ST_A_M45PE20:
10809 case FLASH_5761VENDOR_ST_A_M45PE40:
10810 case FLASH_5761VENDOR_ST_A_M45PE80:
10811 case FLASH_5761VENDOR_ST_A_M45PE16:
10812 case FLASH_5761VENDOR_ST_M_M45PE20:
10813 case FLASH_5761VENDOR_ST_M_M45PE40:
10814 case FLASH_5761VENDOR_ST_M_M45PE80:
10815 case FLASH_5761VENDOR_ST_M_M45PE16:
10816 tp->nvram_jedecnum = JEDEC_ST;
10817 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10818 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10819 tp->nvram_pagesize = 256;
10820 break;
10821 }
10822
10823 if (protect) {
10824 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10825 } else {
10826 switch (nvcfg1) {
10827 case FLASH_5761VENDOR_ATMEL_ADB161D:
10828 case FLASH_5761VENDOR_ATMEL_MDB161D:
10829 case FLASH_5761VENDOR_ST_A_M45PE16:
10830 case FLASH_5761VENDOR_ST_M_M45PE16:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010831 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010832 break;
10833 case FLASH_5761VENDOR_ATMEL_ADB081D:
10834 case FLASH_5761VENDOR_ATMEL_MDB081D:
10835 case FLASH_5761VENDOR_ST_A_M45PE80:
10836 case FLASH_5761VENDOR_ST_M_M45PE80:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010837 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010838 break;
10839 case FLASH_5761VENDOR_ATMEL_ADB041D:
10840 case FLASH_5761VENDOR_ATMEL_MDB041D:
10841 case FLASH_5761VENDOR_ST_A_M45PE40:
10842 case FLASH_5761VENDOR_ST_M_M45PE40:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010843 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010844 break;
10845 case FLASH_5761VENDOR_ATMEL_ADB021D:
10846 case FLASH_5761VENDOR_ATMEL_MDB021D:
10847 case FLASH_5761VENDOR_ST_A_M45PE20:
10848 case FLASH_5761VENDOR_ST_M_M45PE20:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010849 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010850 break;
10851 }
10852 }
10853}
10854
Michael Chanb5d37722006-09-27 16:06:21 -070010855static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10856{
10857 tp->nvram_jedecnum = JEDEC_ATMEL;
10858 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10859 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10860}
10861
Linus Torvalds1da177e2005-04-16 15:20:36 -070010862/* Chips other than 5700/5701 use the NVRAM for fetching info. */
10863static void __devinit tg3_nvram_init(struct tg3 *tp)
10864{
Linus Torvalds1da177e2005-04-16 15:20:36 -070010865 tw32_f(GRC_EEPROM_ADDR,
10866 (EEPROM_ADDR_FSM_RESET |
10867 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10868 EEPROM_ADDR_CLKPERD_SHIFT)));
10869
Michael Chan9d57f012006-12-07 00:23:25 -080010870 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010871
10872 /* Enable seeprom accesses. */
10873 tw32_f(GRC_LOCAL_CTRL,
10874 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10875 udelay(100);
10876
10877 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10878 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10879 tp->tg3_flags |= TG3_FLAG_NVRAM;
10880
Michael Chanec41c7d2006-01-17 02:40:55 -080010881 if (tg3_nvram_lock(tp)) {
10882 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10883 "tg3_nvram_init failed.\n", tp->dev->name);
10884 return;
10885 }
Michael Chane6af3012005-04-21 17:12:05 -070010886 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010887
Matt Carlson989a9d22007-05-05 11:51:05 -070010888 tp->nvram_size = 0;
10889
Michael Chan361b4ac2005-04-21 17:11:21 -070010890 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10891 tg3_get_5752_nvram_info(tp);
Michael Chand3c7b882006-03-23 01:28:25 -080010892 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10893 tg3_get_5755_nvram_info(tp);
Matt Carlsond30cdd22007-10-07 23:28:35 -070010894 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010895 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10896 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan1b277772006-03-20 22:27:48 -080010897 tg3_get_5787_nvram_info(tp);
Matt Carlson6b91fa02007-10-10 18:01:09 -070010898 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10899 tg3_get_5761_nvram_info(tp);
Michael Chanb5d37722006-09-27 16:06:21 -070010900 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10901 tg3_get_5906_nvram_info(tp);
Michael Chan361b4ac2005-04-21 17:11:21 -070010902 else
10903 tg3_get_nvram_info(tp);
10904
Matt Carlson989a9d22007-05-05 11:51:05 -070010905 if (tp->nvram_size == 0)
10906 tg3_get_nvram_size(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010907
Michael Chane6af3012005-04-21 17:12:05 -070010908 tg3_disable_nvram_access(tp);
Michael Chan381291b2005-12-13 21:08:21 -080010909 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010910
10911 } else {
10912 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10913
10914 tg3_get_eeprom_size(tp);
10915 }
10916}
10917
10918static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10919 u32 offset, u32 *val)
10920{
10921 u32 tmp;
10922 int i;
10923
10924 if (offset > EEPROM_ADDR_ADDR_MASK ||
10925 (offset % 4) != 0)
10926 return -EINVAL;
10927
10928 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10929 EEPROM_ADDR_DEVID_MASK |
10930 EEPROM_ADDR_READ);
10931 tw32(GRC_EEPROM_ADDR,
10932 tmp |
10933 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10934 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10935 EEPROM_ADDR_ADDR_MASK) |
10936 EEPROM_ADDR_READ | EEPROM_ADDR_START);
10937
Michael Chan9d57f012006-12-07 00:23:25 -080010938 for (i = 0; i < 1000; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010939 tmp = tr32(GRC_EEPROM_ADDR);
10940
10941 if (tmp & EEPROM_ADDR_COMPLETE)
10942 break;
Michael Chan9d57f012006-12-07 00:23:25 -080010943 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010944 }
10945 if (!(tmp & EEPROM_ADDR_COMPLETE))
10946 return -EBUSY;
10947
10948 *val = tr32(GRC_EEPROM_DATA);
10949 return 0;
10950}
10951
10952#define NVRAM_CMD_TIMEOUT 10000
10953
10954static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10955{
10956 int i;
10957
10958 tw32(NVRAM_CMD, nvram_cmd);
10959 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10960 udelay(10);
10961 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10962 udelay(10);
10963 break;
10964 }
10965 }
10966 if (i == NVRAM_CMD_TIMEOUT) {
10967 return -EBUSY;
10968 }
10969 return 0;
10970}
10971
Michael Chan18201802006-03-20 22:29:15 -080010972static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10973{
10974 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10975 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10976 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
Matt Carlson6b91fa02007-10-10 18:01:09 -070010977 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
Michael Chan18201802006-03-20 22:29:15 -080010978 (tp->nvram_jedecnum == JEDEC_ATMEL))
10979
10980 addr = ((addr / tp->nvram_pagesize) <<
10981 ATMEL_AT45DB0X1B_PAGE_POS) +
10982 (addr % tp->nvram_pagesize);
10983
10984 return addr;
10985}
10986
Michael Chanc4e65752006-03-20 22:29:32 -080010987static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10988{
10989 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10990 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10991 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
Matt Carlson6b91fa02007-10-10 18:01:09 -070010992 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
Michael Chanc4e65752006-03-20 22:29:32 -080010993 (tp->nvram_jedecnum == JEDEC_ATMEL))
10994
10995 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10996 tp->nvram_pagesize) +
10997 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10998
10999 return addr;
11000}
11001
Linus Torvalds1da177e2005-04-16 15:20:36 -070011002static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
11003{
11004 int ret;
11005
Linus Torvalds1da177e2005-04-16 15:20:36 -070011006 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
11007 return tg3_nvram_read_using_eeprom(tp, offset, val);
11008
Michael Chan18201802006-03-20 22:29:15 -080011009 offset = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011010
11011 if (offset > NVRAM_ADDR_MSK)
11012 return -EINVAL;
11013
Michael Chanec41c7d2006-01-17 02:40:55 -080011014 ret = tg3_nvram_lock(tp);
11015 if (ret)
11016 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011017
Michael Chane6af3012005-04-21 17:12:05 -070011018 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011019
11020 tw32(NVRAM_ADDR, offset);
11021 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
11022 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
11023
11024 if (ret == 0)
11025 *val = swab32(tr32(NVRAM_RDDATA));
11026
Michael Chane6af3012005-04-21 17:12:05 -070011027 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011028
Michael Chan381291b2005-12-13 21:08:21 -080011029 tg3_nvram_unlock(tp);
11030
Linus Torvalds1da177e2005-04-16 15:20:36 -070011031 return ret;
11032}
11033
Al Virob9fc7dc2007-12-17 22:59:57 -080011034static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
11035{
11036 u32 v;
11037 int res = tg3_nvram_read(tp, offset, &v);
11038 if (!res)
11039 *val = cpu_to_le32(v);
11040 return res;
11041}
11042
Michael Chan18201802006-03-20 22:29:15 -080011043static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
11044{
11045 int err;
11046 u32 tmp;
11047
11048 err = tg3_nvram_read(tp, offset, &tmp);
11049 *val = swab32(tmp);
11050 return err;
11051}
11052
Linus Torvalds1da177e2005-04-16 15:20:36 -070011053static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11054 u32 offset, u32 len, u8 *buf)
11055{
11056 int i, j, rc = 0;
11057 u32 val;
11058
11059 for (i = 0; i < len; i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011060 u32 addr;
11061 __le32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011062
11063 addr = offset + i;
11064
11065 memcpy(&data, buf + i, 4);
11066
Al Virob9fc7dc2007-12-17 22:59:57 -080011067 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011068
11069 val = tr32(GRC_EEPROM_ADDR);
11070 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11071
11072 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11073 EEPROM_ADDR_READ);
11074 tw32(GRC_EEPROM_ADDR, val |
11075 (0 << EEPROM_ADDR_DEVID_SHIFT) |
11076 (addr & EEPROM_ADDR_ADDR_MASK) |
11077 EEPROM_ADDR_START |
11078 EEPROM_ADDR_WRITE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011079
Michael Chan9d57f012006-12-07 00:23:25 -080011080 for (j = 0; j < 1000; j++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011081 val = tr32(GRC_EEPROM_ADDR);
11082
11083 if (val & EEPROM_ADDR_COMPLETE)
11084 break;
Michael Chan9d57f012006-12-07 00:23:25 -080011085 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011086 }
11087 if (!(val & EEPROM_ADDR_COMPLETE)) {
11088 rc = -EBUSY;
11089 break;
11090 }
11091 }
11092
11093 return rc;
11094}
11095
11096/* offset and length are dword aligned */
11097static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11098 u8 *buf)
11099{
11100 int ret = 0;
11101 u32 pagesize = tp->nvram_pagesize;
11102 u32 pagemask = pagesize - 1;
11103 u32 nvram_cmd;
11104 u8 *tmp;
11105
11106 tmp = kmalloc(pagesize, GFP_KERNEL);
11107 if (tmp == NULL)
11108 return -ENOMEM;
11109
11110 while (len) {
11111 int j;
Michael Chane6af3012005-04-21 17:12:05 -070011112 u32 phy_addr, page_off, size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011113
11114 phy_addr = offset & ~pagemask;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011115
Linus Torvalds1da177e2005-04-16 15:20:36 -070011116 for (j = 0; j < pagesize; j += 4) {
Al Viro286e3102007-12-17 23:00:31 -080011117 if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
Al Virob9fc7dc2007-12-17 22:59:57 -080011118 (__le32 *) (tmp + j))))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011119 break;
11120 }
11121 if (ret)
11122 break;
11123
11124 page_off = offset & pagemask;
11125 size = pagesize;
11126 if (len < size)
11127 size = len;
11128
11129 len -= size;
11130
11131 memcpy(tmp + page_off, buf, size);
11132
11133 offset = offset + (pagesize - page_off);
11134
Michael Chane6af3012005-04-21 17:12:05 -070011135 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011136
11137 /*
11138 * Before we can erase the flash page, we need
11139 * to issue a special "write enable" command.
11140 */
11141 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11142
11143 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11144 break;
11145
11146 /* Erase the target page */
11147 tw32(NVRAM_ADDR, phy_addr);
11148
11149 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11150 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11151
11152 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11153 break;
11154
11155 /* Issue another write enable to start the write. */
11156 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11157
11158 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11159 break;
11160
11161 for (j = 0; j < pagesize; j += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011162 __be32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011163
Al Virob9fc7dc2007-12-17 22:59:57 -080011164 data = *((__be32 *) (tmp + j));
11165 /* swab32(le32_to_cpu(data)), actually */
11166 tw32(NVRAM_WRDATA, be32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011167
11168 tw32(NVRAM_ADDR, phy_addr + j);
11169
11170 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11171 NVRAM_CMD_WR;
11172
11173 if (j == 0)
11174 nvram_cmd |= NVRAM_CMD_FIRST;
11175 else if (j == (pagesize - 4))
11176 nvram_cmd |= NVRAM_CMD_LAST;
11177
11178 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11179 break;
11180 }
11181 if (ret)
11182 break;
11183 }
11184
11185 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11186 tg3_nvram_exec_cmd(tp, nvram_cmd);
11187
11188 kfree(tmp);
11189
11190 return ret;
11191}
11192
11193/* offset and length are dword aligned */
11194static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11195 u8 *buf)
11196{
11197 int i, ret = 0;
11198
11199 for (i = 0; i < len; i += 4, offset += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011200 u32 page_off, phy_addr, nvram_cmd;
11201 __be32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011202
11203 memcpy(&data, buf + i, 4);
Al Virob9fc7dc2007-12-17 22:59:57 -080011204 tw32(NVRAM_WRDATA, be32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011205
11206 page_off = offset % tp->nvram_pagesize;
11207
Michael Chan18201802006-03-20 22:29:15 -080011208 phy_addr = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011209
11210 tw32(NVRAM_ADDR, phy_addr);
11211
11212 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11213
11214 if ((page_off == 0) || (i == 0))
11215 nvram_cmd |= NVRAM_CMD_FIRST;
Michael Chanf6d9a252006-04-29 19:00:24 -070011216 if (page_off == (tp->nvram_pagesize - 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011217 nvram_cmd |= NVRAM_CMD_LAST;
11218
11219 if (i == (len - 4))
11220 nvram_cmd |= NVRAM_CMD_LAST;
11221
Michael Chan4c987482005-09-05 17:52:38 -070011222 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
Michael Chanaf36e6b2006-03-23 01:28:06 -080011223 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
Michael Chan1b277772006-03-20 22:27:48 -080011224 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
Matt Carlsond30cdd22007-10-07 23:28:35 -070011225 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
Matt Carlson9936bcf2007-10-10 18:03:07 -070011226 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
Matt Carlson57e69832008-05-25 23:48:31 -070011227 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
Michael Chan4c987482005-09-05 17:52:38 -070011228 (tp->nvram_jedecnum == JEDEC_ST) &&
11229 (nvram_cmd & NVRAM_CMD_FIRST)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011230
11231 if ((ret = tg3_nvram_exec_cmd(tp,
11232 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11233 NVRAM_CMD_DONE)))
11234
11235 break;
11236 }
11237 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11238 /* We always do complete word writes to eeprom. */
11239 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11240 }
11241
11242 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11243 break;
11244 }
11245 return ret;
11246}
11247
11248/* offset and length are dword aligned */
11249static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11250{
11251 int ret;
11252
Linus Torvalds1da177e2005-04-16 15:20:36 -070011253 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -070011254 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11255 ~GRC_LCLCTRL_GPIO_OUTPUT1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011256 udelay(40);
11257 }
11258
11259 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11260 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11261 }
11262 else {
11263 u32 grc_mode;
11264
Michael Chanec41c7d2006-01-17 02:40:55 -080011265 ret = tg3_nvram_lock(tp);
11266 if (ret)
11267 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011268
Michael Chane6af3012005-04-21 17:12:05 -070011269 tg3_enable_nvram_access(tp);
11270 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11271 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011272 tw32(NVRAM_WRITE1, 0x406);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011273
11274 grc_mode = tr32(GRC_MODE);
11275 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11276
11277 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11278 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11279
11280 ret = tg3_nvram_write_block_buffered(tp, offset, len,
11281 buf);
11282 }
11283 else {
11284 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11285 buf);
11286 }
11287
11288 grc_mode = tr32(GRC_MODE);
11289 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11290
Michael Chane6af3012005-04-21 17:12:05 -070011291 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011292 tg3_nvram_unlock(tp);
11293 }
11294
11295 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -070011296 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011297 udelay(40);
11298 }
11299
11300 return ret;
11301}
11302
11303struct subsys_tbl_ent {
11304 u16 subsys_vendor, subsys_devid;
11305 u32 phy_id;
11306};
11307
11308static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11309 /* Broadcom boards. */
11310 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11311 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11312 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11313 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
11314 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11315 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11316 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
11317 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11318 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11319 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11320 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11321
11322 /* 3com boards. */
11323 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11324 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11325 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
11326 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11327 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11328
11329 /* DELL boards. */
11330 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11331 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11332 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11333 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11334
11335 /* Compaq boards. */
11336 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11337 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11338 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
11339 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11340 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11341
11342 /* IBM boards. */
11343 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11344};
11345
11346static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11347{
11348 int i;
11349
11350 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11351 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11352 tp->pdev->subsystem_vendor) &&
11353 (subsys_id_to_phy_id[i].subsys_devid ==
11354 tp->pdev->subsystem_device))
11355 return &subsys_id_to_phy_id[i];
11356 }
11357 return NULL;
11358}
11359
Michael Chan7d0c41e2005-04-21 17:06:20 -070011360static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011361{
Linus Torvalds1da177e2005-04-16 15:20:36 -070011362 u32 val;
Michael Chancaf636c72006-03-22 01:05:31 -080011363 u16 pmcsr;
11364
11365 /* On some early chips the SRAM cannot be accessed in D3hot state,
11366 * so need make sure we're in D0.
11367 */
11368 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11369 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11370 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11371 msleep(1);
Michael Chan7d0c41e2005-04-21 17:06:20 -070011372
11373 /* Make sure register accesses (indirect or otherwise)
11374 * will function correctly.
11375 */
11376 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11377 tp->misc_host_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011378
David S. Millerf49639e2006-06-09 11:58:36 -070011379 /* The memory arbiter has to be enabled in order for SRAM accesses
11380 * to succeed. Normally on powerup the tg3 chip firmware will make
11381 * sure it is enabled, but other entities such as system netboot
11382 * code might disable it.
11383 */
11384 val = tr32(MEMARB_MODE);
11385 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11386
Linus Torvalds1da177e2005-04-16 15:20:36 -070011387 tp->phy_id = PHY_ID_INVALID;
Michael Chan7d0c41e2005-04-21 17:06:20 -070011388 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11389
Gary Zambranoa85feb82007-05-05 11:52:19 -070011390 /* Assume an onboard device and WOL capable by default. */
11391 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
David S. Miller72b845e2006-03-14 14:11:48 -080011392
Michael Chanb5d37722006-09-27 16:06:21 -070011393 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan9d26e212006-12-07 00:21:14 -080011394 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
Michael Chanb5d37722006-09-27 16:06:21 -070011395 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011396 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11397 }
Matt Carlson0527ba32007-10-10 18:03:30 -070011398 val = tr32(VCPU_CFGSHDW);
11399 if (val & VCPU_CFGSHDW_ASPM_DBNC)
Matt Carlson8ed5d972007-05-07 00:25:49 -070011400 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
Matt Carlson0527ba32007-10-10 18:03:30 -070011401 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070011402 (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11403 device_may_wakeup(&tp->pdev->dev))
Matt Carlson0527ba32007-10-10 18:03:30 -070011404 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
Matt Carlson05ac4cb2008-11-03 16:53:46 -080011405 goto done;
Michael Chanb5d37722006-09-27 16:06:21 -070011406 }
11407
Linus Torvalds1da177e2005-04-16 15:20:36 -070011408 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11409 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11410 u32 nic_cfg, led_cfg;
Matt Carlsona9daf362008-05-25 23:49:44 -070011411 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
Michael Chan7d0c41e2005-04-21 17:06:20 -070011412 int eeprom_phy_serdes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011413
11414 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11415 tp->nic_sram_data_cfg = nic_cfg;
11416
11417 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11418 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11419 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11420 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11421 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11422 (ver > 0) && (ver < 0x100))
11423 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11424
Matt Carlsona9daf362008-05-25 23:49:44 -070011425 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11426 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11427
Linus Torvalds1da177e2005-04-16 15:20:36 -070011428 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11429 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11430 eeprom_phy_serdes = 1;
11431
11432 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11433 if (nic_phy_id != 0) {
11434 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11435 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11436
11437 eeprom_phy_id = (id1 >> 16) << 10;
11438 eeprom_phy_id |= (id2 & 0xfc00) << 16;
11439 eeprom_phy_id |= (id2 & 0x03ff) << 0;
11440 } else
11441 eeprom_phy_id = 0;
11442
Michael Chan7d0c41e2005-04-21 17:06:20 -070011443 tp->phy_id = eeprom_phy_id;
Michael Chan747e8f82005-07-25 12:33:22 -070011444 if (eeprom_phy_serdes) {
Michael Chana4e2b342005-10-26 15:46:52 -070011445 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan747e8f82005-07-25 12:33:22 -070011446 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11447 else
11448 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11449 }
Michael Chan7d0c41e2005-04-21 17:06:20 -070011450
John W. Linvillecbf46852005-04-21 17:01:29 -070011451 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011452 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11453 SHASTA_EXT_LED_MODE_MASK);
John W. Linvillecbf46852005-04-21 17:01:29 -070011454 else
Linus Torvalds1da177e2005-04-16 15:20:36 -070011455 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11456
11457 switch (led_cfg) {
11458 default:
11459 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11460 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11461 break;
11462
11463 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11464 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11465 break;
11466
11467 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11468 tp->led_ctrl = LED_CTRL_MODE_MAC;
Michael Chan9ba27792005-06-06 15:16:20 -070011469
11470 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11471 * read on some older 5700/5701 bootcode.
11472 */
11473 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11474 ASIC_REV_5700 ||
11475 GET_ASIC_REV(tp->pci_chip_rev_id) ==
11476 ASIC_REV_5701)
11477 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11478
Linus Torvalds1da177e2005-04-16 15:20:36 -070011479 break;
11480
11481 case SHASTA_EXT_LED_SHARED:
11482 tp->led_ctrl = LED_CTRL_MODE_SHARED;
11483 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11484 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11485 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11486 LED_CTRL_MODE_PHY_2);
11487 break;
11488
11489 case SHASTA_EXT_LED_MAC:
11490 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11491 break;
11492
11493 case SHASTA_EXT_LED_COMBO:
11494 tp->led_ctrl = LED_CTRL_MODE_COMBO;
11495 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11496 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11497 LED_CTRL_MODE_PHY_2);
11498 break;
11499
Stephen Hemminger855e1112008-04-16 16:37:28 -070011500 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011501
11502 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11503 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11504 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11505 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11506
Matt Carlsonb2a5c192008-04-03 21:44:44 -070011507 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11508 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
Matt Carlson5f608912007-11-12 21:17:07 -080011509
Michael Chan9d26e212006-12-07 00:21:14 -080011510 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011511 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011512 if ((tp->pdev->subsystem_vendor ==
11513 PCI_VENDOR_ID_ARIMA) &&
11514 (tp->pdev->subsystem_device == 0x205a ||
11515 tp->pdev->subsystem_device == 0x2063))
11516 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11517 } else {
David S. Millerf49639e2006-06-09 11:58:36 -070011518 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011519 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11520 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011521
11522 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11523 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
John W. Linvillecbf46852005-04-21 17:01:29 -070011524 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011525 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11526 }
Matt Carlsonb2b98d42008-11-03 16:52:32 -080011527
11528 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11529 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
Matt Carlson0d3031d2007-10-10 18:02:43 -070011530 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
Matt Carlsonb2b98d42008-11-03 16:52:32 -080011531
Gary Zambranoa85feb82007-05-05 11:52:19 -070011532 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11533 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11534 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011535
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070011536 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
Matt Carlson05ac4cb2008-11-03 16:53:46 -080011537 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
Matt Carlson0527ba32007-10-10 18:03:30 -070011538 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11539
Linus Torvalds1da177e2005-04-16 15:20:36 -070011540 if (cfg2 & (1 << 17))
11541 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11542
11543 /* serdes signal pre-emphasis in register 0x590 set by */
11544 /* bootcode if bit 18 is set */
11545 if (cfg2 & (1 << 18))
11546 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
Matt Carlson8ed5d972007-05-07 00:25:49 -070011547
11548 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11549 u32 cfg3;
11550
11551 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11552 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11553 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11554 }
Matt Carlsona9daf362008-05-25 23:49:44 -070011555
11556 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11557 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11558 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11559 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11560 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11561 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011562 }
Matt Carlson05ac4cb2008-11-03 16:53:46 -080011563done:
11564 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
11565 device_set_wakeup_enable(&tp->pdev->dev,
11566 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
Michael Chan7d0c41e2005-04-21 17:06:20 -070011567}
11568
Matt Carlsonb2a5c192008-04-03 21:44:44 -070011569static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11570{
11571 int i;
11572 u32 val;
11573
11574 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11575 tw32(OTP_CTRL, cmd);
11576
11577 /* Wait for up to 1 ms for command to execute. */
11578 for (i = 0; i < 100; i++) {
11579 val = tr32(OTP_STATUS);
11580 if (val & OTP_STATUS_CMD_DONE)
11581 break;
11582 udelay(10);
11583 }
11584
11585 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11586}
11587
11588/* Read the gphy configuration from the OTP region of the chip. The gphy
11589 * configuration is a 32-bit value that straddles the alignment boundary.
11590 * We do two 32-bit reads and then shift and merge the results.
11591 */
11592static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11593{
11594 u32 bhalf_otp, thalf_otp;
11595
11596 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11597
11598 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11599 return 0;
11600
11601 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11602
11603 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11604 return 0;
11605
11606 thalf_otp = tr32(OTP_READ_DATA);
11607
11608 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11609
11610 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11611 return 0;
11612
11613 bhalf_otp = tr32(OTP_READ_DATA);
11614
11615 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11616}
11617
Michael Chan7d0c41e2005-04-21 17:06:20 -070011618static int __devinit tg3_phy_probe(struct tg3 *tp)
11619{
11620 u32 hw_phy_id_1, hw_phy_id_2;
11621 u32 hw_phy_id, hw_phy_id_masked;
11622 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011623
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011624 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11625 return tg3_phy_init(tp);
11626
Linus Torvalds1da177e2005-04-16 15:20:36 -070011627 /* Reading the PHY ID register can conflict with ASF
11628 * firwmare access to the PHY hardware.
11629 */
11630 err = 0;
Matt Carlson0d3031d2007-10-10 18:02:43 -070011631 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11632 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011633 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11634 } else {
11635 /* Now read the physical PHY_ID from the chip and verify
11636 * that it is sane. If it doesn't look good, we fall back
11637 * to either the hard-coded table based PHY_ID and failing
11638 * that the value found in the eeprom area.
11639 */
11640 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11641 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11642
11643 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
11644 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11645 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
11646
11647 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11648 }
11649
11650 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11651 tp->phy_id = hw_phy_id;
11652 if (hw_phy_id_masked == PHY_ID_BCM8002)
11653 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
Michael Chanda6b2d02005-08-19 12:54:29 -070011654 else
11655 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011656 } else {
Michael Chan7d0c41e2005-04-21 17:06:20 -070011657 if (tp->phy_id != PHY_ID_INVALID) {
11658 /* Do nothing, phy ID already set up in
11659 * tg3_get_eeprom_hw_cfg().
11660 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070011661 } else {
11662 struct subsys_tbl_ent *p;
11663
11664 /* No eeprom signature? Try the hardcoded
11665 * subsys device table.
11666 */
11667 p = lookup_by_subsys(tp);
11668 if (!p)
11669 return -ENODEV;
11670
11671 tp->phy_id = p->phy_id;
11672 if (!tp->phy_id ||
11673 tp->phy_id == PHY_ID_BCM8002)
11674 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11675 }
11676 }
11677
Michael Chan747e8f82005-07-25 12:33:22 -070011678 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
Matt Carlson0d3031d2007-10-10 18:02:43 -070011679 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070011680 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chan3600d912006-12-07 00:21:48 -080011681 u32 bmsr, adv_reg, tg3_ctrl, mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011682
11683 tg3_readphy(tp, MII_BMSR, &bmsr);
11684 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11685 (bmsr & BMSR_LSTATUS))
11686 goto skip_phy_reset;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011687
Linus Torvalds1da177e2005-04-16 15:20:36 -070011688 err = tg3_phy_reset(tp);
11689 if (err)
11690 return err;
11691
11692 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11693 ADVERTISE_100HALF | ADVERTISE_100FULL |
11694 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11695 tg3_ctrl = 0;
11696 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11697 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11698 MII_TG3_CTRL_ADV_1000_FULL);
11699 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11700 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11701 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11702 MII_TG3_CTRL_ENABLE_AS_MASTER);
11703 }
11704
Michael Chan3600d912006-12-07 00:21:48 -080011705 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11706 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11707 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11708 if (!tg3_copper_is_advertising_all(tp, mask)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011709 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11710
11711 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11712 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11713
11714 tg3_writephy(tp, MII_BMCR,
11715 BMCR_ANENABLE | BMCR_ANRESTART);
11716 }
11717 tg3_phy_set_wirespeed(tp);
11718
11719 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11720 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11721 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11722 }
11723
11724skip_phy_reset:
11725 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11726 err = tg3_init_5401phy_dsp(tp);
11727 if (err)
11728 return err;
11729 }
11730
11731 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11732 err = tg3_init_5401phy_dsp(tp);
11733 }
11734
Michael Chan747e8f82005-07-25 12:33:22 -070011735 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011736 tp->link_config.advertising =
11737 (ADVERTISED_1000baseT_Half |
11738 ADVERTISED_1000baseT_Full |
11739 ADVERTISED_Autoneg |
11740 ADVERTISED_FIBRE);
11741 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11742 tp->link_config.advertising &=
11743 ~(ADVERTISED_1000baseT_Half |
11744 ADVERTISED_1000baseT_Full);
11745
11746 return err;
11747}
11748
11749static void __devinit tg3_read_partno(struct tg3 *tp)
11750{
11751 unsigned char vpd_data[256];
Michael Chanaf2c6a42006-11-07 14:57:51 -080011752 unsigned int i;
Michael Chan1b277772006-03-20 22:27:48 -080011753 u32 magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011754
Michael Chan18201802006-03-20 22:29:15 -080011755 if (tg3_nvram_read_swab(tp, 0x0, &magic))
David S. Millerf49639e2006-06-09 11:58:36 -070011756 goto out_not_found;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011757
Michael Chan18201802006-03-20 22:29:15 -080011758 if (magic == TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -080011759 for (i = 0; i < 256; i += 4) {
11760 u32 tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011761
Michael Chan1b277772006-03-20 22:27:48 -080011762 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11763 goto out_not_found;
11764
11765 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
11766 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
11767 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11768 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11769 }
11770 } else {
11771 int vpd_cap;
11772
11773 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11774 for (i = 0; i < 256; i += 4) {
11775 u32 tmp, j = 0;
Al Virob9fc7dc2007-12-17 22:59:57 -080011776 __le32 v;
Michael Chan1b277772006-03-20 22:27:48 -080011777 u16 tmp16;
11778
11779 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11780 i);
11781 while (j++ < 100) {
11782 pci_read_config_word(tp->pdev, vpd_cap +
11783 PCI_VPD_ADDR, &tmp16);
11784 if (tmp16 & 0x8000)
11785 break;
11786 msleep(1);
11787 }
David S. Millerf49639e2006-06-09 11:58:36 -070011788 if (!(tmp16 & 0x8000))
11789 goto out_not_found;
11790
Michael Chan1b277772006-03-20 22:27:48 -080011791 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11792 &tmp);
Al Virob9fc7dc2007-12-17 22:59:57 -080011793 v = cpu_to_le32(tmp);
11794 memcpy(&vpd_data[i], &v, 4);
Michael Chan1b277772006-03-20 22:27:48 -080011795 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011796 }
11797
11798 /* Now parse and find the part number. */
Michael Chanaf2c6a42006-11-07 14:57:51 -080011799 for (i = 0; i < 254; ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011800 unsigned char val = vpd_data[i];
Michael Chanaf2c6a42006-11-07 14:57:51 -080011801 unsigned int block_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011802
11803 if (val == 0x82 || val == 0x91) {
11804 i = (i + 3 +
11805 (vpd_data[i + 1] +
11806 (vpd_data[i + 2] << 8)));
11807 continue;
11808 }
11809
11810 if (val != 0x90)
11811 goto out_not_found;
11812
11813 block_end = (i + 3 +
11814 (vpd_data[i + 1] +
11815 (vpd_data[i + 2] << 8)));
11816 i += 3;
Michael Chanaf2c6a42006-11-07 14:57:51 -080011817
11818 if (block_end > 256)
11819 goto out_not_found;
11820
11821 while (i < (block_end - 2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011822 if (vpd_data[i + 0] == 'P' &&
11823 vpd_data[i + 1] == 'N') {
11824 int partno_len = vpd_data[i + 2];
11825
Michael Chanaf2c6a42006-11-07 14:57:51 -080011826 i += 3;
11827 if (partno_len > 24 || (partno_len + i) > 256)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011828 goto out_not_found;
11829
11830 memcpy(tp->board_part_number,
Michael Chanaf2c6a42006-11-07 14:57:51 -080011831 &vpd_data[i], partno_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011832
11833 /* Success. */
11834 return;
11835 }
Michael Chanaf2c6a42006-11-07 14:57:51 -080011836 i += 3 + vpd_data[i + 2];
Linus Torvalds1da177e2005-04-16 15:20:36 -070011837 }
11838
11839 /* Part number not found. */
11840 goto out_not_found;
11841 }
11842
11843out_not_found:
Michael Chanb5d37722006-09-27 16:06:21 -070011844 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11845 strcpy(tp->board_part_number, "BCM95906");
11846 else
11847 strcpy(tp->board_part_number, "none");
Linus Torvalds1da177e2005-04-16 15:20:36 -070011848}
11849
Matt Carlson9c8a6202007-10-21 16:16:08 -070011850static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11851{
11852 u32 val;
11853
11854 if (tg3_nvram_read_swab(tp, offset, &val) ||
11855 (val & 0xfc000000) != 0x0c000000 ||
11856 tg3_nvram_read_swab(tp, offset + 4, &val) ||
11857 val != 0)
11858 return 0;
11859
11860 return 1;
11861}
11862
Michael Chanc4e65752006-03-20 22:29:32 -080011863static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11864{
11865 u32 val, offset, start;
Matt Carlson9c8a6202007-10-21 16:16:08 -070011866 u32 ver_offset;
11867 int i, bcnt;
Michael Chanc4e65752006-03-20 22:29:32 -080011868
11869 if (tg3_nvram_read_swab(tp, 0, &val))
11870 return;
11871
11872 if (val != TG3_EEPROM_MAGIC)
11873 return;
11874
11875 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11876 tg3_nvram_read_swab(tp, 0x4, &start))
11877 return;
11878
11879 offset = tg3_nvram_logical_addr(tp, offset);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011880
11881 if (!tg3_fw_img_is_valid(tp, offset) ||
11882 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
Michael Chanc4e65752006-03-20 22:29:32 -080011883 return;
11884
Matt Carlson9c8a6202007-10-21 16:16:08 -070011885 offset = offset + ver_offset - start;
11886 for (i = 0; i < 16; i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011887 __le32 v;
11888 if (tg3_nvram_read_le(tp, offset + i, &v))
Michael Chanc4e65752006-03-20 22:29:32 -080011889 return;
11890
Al Virob9fc7dc2007-12-17 22:59:57 -080011891 memcpy(tp->fw_ver + i, &v, 4);
Michael Chanc4e65752006-03-20 22:29:32 -080011892 }
Matt Carlson9c8a6202007-10-21 16:16:08 -070011893
11894 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
Matt Carlson84af67f2007-11-12 21:08:59 -080011895 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
Matt Carlson9c8a6202007-10-21 16:16:08 -070011896 return;
11897
11898 for (offset = TG3_NVM_DIR_START;
11899 offset < TG3_NVM_DIR_END;
11900 offset += TG3_NVM_DIRENT_SIZE) {
11901 if (tg3_nvram_read_swab(tp, offset, &val))
11902 return;
11903
11904 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11905 break;
11906 }
11907
11908 if (offset == TG3_NVM_DIR_END)
11909 return;
11910
11911 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11912 start = 0x08000000;
11913 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11914 return;
11915
11916 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11917 !tg3_fw_img_is_valid(tp, offset) ||
11918 tg3_nvram_read_swab(tp, offset + 8, &val))
11919 return;
11920
11921 offset += val - start;
11922
11923 bcnt = strlen(tp->fw_ver);
11924
11925 tp->fw_ver[bcnt++] = ',';
11926 tp->fw_ver[bcnt++] = ' ';
11927
11928 for (i = 0; i < 4; i++) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011929 __le32 v;
11930 if (tg3_nvram_read_le(tp, offset, &v))
Matt Carlson9c8a6202007-10-21 16:16:08 -070011931 return;
11932
Al Virob9fc7dc2007-12-17 22:59:57 -080011933 offset += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011934
Al Virob9fc7dc2007-12-17 22:59:57 -080011935 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11936 memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011937 break;
11938 }
11939
Al Virob9fc7dc2007-12-17 22:59:57 -080011940 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11941 bcnt += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011942 }
11943
11944 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
Michael Chanc4e65752006-03-20 22:29:32 -080011945}
11946
Michael Chan7544b092007-05-05 13:08:32 -070011947static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11948
Linus Torvalds1da177e2005-04-16 15:20:36 -070011949static int __devinit tg3_get_invariants(struct tg3 *tp)
11950{
11951 static struct pci_device_id write_reorder_chipsets[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011952 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11953 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
John W. Linvillec165b002006-07-08 13:28:53 -070011954 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11955 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
Michael Chan399de502005-10-03 14:02:39 -070011956 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11957 PCI_DEVICE_ID_VIA_8385_0) },
Linus Torvalds1da177e2005-04-16 15:20:36 -070011958 { },
11959 };
11960 u32 misc_ctrl_reg;
11961 u32 cacheline_sz_reg;
11962 u32 pci_state_reg, grc_misc_cfg;
11963 u32 val;
11964 u16 pci_cmd;
Michael Chanc7835a72006-11-15 21:14:42 -080011965 int err, pcie_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011966
Linus Torvalds1da177e2005-04-16 15:20:36 -070011967 /* Force memory write invalidate off. If we leave it on,
11968 * then on 5700_BX chips we have to enable a workaround.
11969 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11970 * to match the cacheline size. The Broadcom driver have this
11971 * workaround but turns MWI off all the times so never uses
11972 * it. This seems to suggest that the workaround is insufficient.
11973 */
11974 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11975 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11976 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11977
11978 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11979 * has the register indirect write enable bit set before
11980 * we try to access any of the MMIO registers. It is also
11981 * critical that the PCI-X hw workaround situation is decided
11982 * before that as well.
11983 */
11984 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11985 &misc_ctrl_reg);
11986
11987 tp->pci_chip_rev_id = (misc_ctrl_reg >>
11988 MISC_HOST_CTRL_CHIPREV_SHIFT);
Matt Carlson795d01c2007-10-07 23:28:17 -070011989 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11990 u32 prod_id_asic_rev;
11991
11992 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11993 &prod_id_asic_rev);
11994 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11995 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011996
Michael Chanff645be2005-04-21 17:09:53 -070011997 /* Wrong chip ID in 5752 A0. This code can be removed later
11998 * as A0 is not in production.
11999 */
12000 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12001 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12002
Michael Chan68929142005-08-09 20:17:14 -070012003 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12004 * we need to disable memory and use config. cycles
12005 * only to access all registers. The 5702/03 chips
12006 * can mistakenly decode the special cycles from the
12007 * ICH chipsets as memory write cycles, causing corruption
12008 * of register and memory space. Only certain ICH bridges
12009 * will drive special cycles with non-zero data during the
12010 * address phase which can fall within the 5703's address
12011 * range. This is not an ICH bug as the PCI spec allows
12012 * non-zero address during special cycles. However, only
12013 * these ICH bridges are known to drive non-zero addresses
12014 * during special cycles.
12015 *
12016 * Since special cycles do not cross PCI bridges, we only
12017 * enable this workaround if the 5703 is on the secondary
12018 * bus of these ICH bridges.
12019 */
12020 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12021 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12022 static struct tg3_dev_id {
12023 u32 vendor;
12024 u32 device;
12025 u32 rev;
12026 } ich_chipsets[] = {
12027 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12028 PCI_ANY_ID },
12029 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12030 PCI_ANY_ID },
12031 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12032 0xa },
12033 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12034 PCI_ANY_ID },
12035 { },
12036 };
12037 struct tg3_dev_id *pci_id = &ich_chipsets[0];
12038 struct pci_dev *bridge = NULL;
12039
12040 while (pci_id->vendor != 0) {
12041 bridge = pci_get_device(pci_id->vendor, pci_id->device,
12042 bridge);
12043 if (!bridge) {
12044 pci_id++;
12045 continue;
12046 }
12047 if (pci_id->rev != PCI_ANY_ID) {
Auke Kok44c10132007-06-08 15:46:36 -070012048 if (bridge->revision > pci_id->rev)
Michael Chan68929142005-08-09 20:17:14 -070012049 continue;
12050 }
12051 if (bridge->subordinate &&
12052 (bridge->subordinate->number ==
12053 tp->pdev->bus->number)) {
12054
12055 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
12056 pci_dev_put(bridge);
12057 break;
12058 }
12059 }
12060 }
12061
Matt Carlson41588ba2008-04-19 18:12:33 -070012062 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
12063 static struct tg3_dev_id {
12064 u32 vendor;
12065 u32 device;
12066 } bridge_chipsets[] = {
12067 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12068 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12069 { },
12070 };
12071 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12072 struct pci_dev *bridge = NULL;
12073
12074 while (pci_id->vendor != 0) {
12075 bridge = pci_get_device(pci_id->vendor,
12076 pci_id->device,
12077 bridge);
12078 if (!bridge) {
12079 pci_id++;
12080 continue;
12081 }
12082 if (bridge->subordinate &&
12083 (bridge->subordinate->number <=
12084 tp->pdev->bus->number) &&
12085 (bridge->subordinate->subordinate >=
12086 tp->pdev->bus->number)) {
12087 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12088 pci_dev_put(bridge);
12089 break;
12090 }
12091 }
12092 }
12093
Michael Chan4a29cc22006-03-19 13:21:12 -080012094 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12095 * DMA addresses > 40-bit. This bridge may have other additional
12096 * 57xx devices behind it in some 4-port NIC designs for example.
12097 * Any tg3 device found behind the bridge will also need the 40-bit
12098 * DMA workaround.
12099 */
Michael Chana4e2b342005-10-26 15:46:52 -070012100 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12101 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12102 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
Michael Chan4a29cc22006-03-19 13:21:12 -080012103 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
Michael Chan4cf78e42005-07-25 12:29:19 -070012104 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
Michael Chana4e2b342005-10-26 15:46:52 -070012105 }
Michael Chan4a29cc22006-03-19 13:21:12 -080012106 else {
12107 struct pci_dev *bridge = NULL;
12108
12109 do {
12110 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12111 PCI_DEVICE_ID_SERVERWORKS_EPB,
12112 bridge);
12113 if (bridge && bridge->subordinate &&
12114 (bridge->subordinate->number <=
12115 tp->pdev->bus->number) &&
12116 (bridge->subordinate->subordinate >=
12117 tp->pdev->bus->number)) {
12118 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12119 pci_dev_put(bridge);
12120 break;
12121 }
12122 } while (bridge);
12123 }
Michael Chan4cf78e42005-07-25 12:29:19 -070012124
Linus Torvalds1da177e2005-04-16 15:20:36 -070012125 /* Initialize misc host control in PCI block. */
12126 tp->misc_host_ctrl |= (misc_ctrl_reg &
12127 MISC_HOST_CTRL_CHIPREV);
12128 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12129 tp->misc_host_ctrl);
12130
12131 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12132 &cacheline_sz_reg);
12133
12134 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
12135 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
12136 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
12137 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
12138
Michael Chan7544b092007-05-05 13:08:32 -070012139 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12140 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12141 tp->pdev_peer = tg3_find_peer(tp);
12142
John W. Linville2052da92005-04-21 16:56:08 -070012143 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
Michael Chan4cf78e42005-07-25 12:29:19 -070012144 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -080012145 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chand9ab5ad2006-03-20 22:27:35 -080012146 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012147 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012148 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012149 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012150 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
Michael Chana4e2b342005-10-26 15:46:52 -070012151 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
John W. Linville6708e5c2005-04-21 17:00:52 -070012152 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12153
John W. Linville1b440c562005-04-21 17:03:18 -070012154 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12155 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12156 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12157
Michael Chan5a6f3072006-03-20 22:28:05 -080012158 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Michael Chan7544b092007-05-05 13:08:32 -070012159 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12160 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12161 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12162 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12163 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12164 tp->pdev_peer == tp->pdev))
12165 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12166
Michael Chanaf36e6b2006-03-23 01:28:06 -080012167 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012168 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012169 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012170 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012171 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012172 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan5a6f3072006-03-20 22:28:05 -080012173 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
Michael Chanfcfa0a32006-03-20 22:28:41 -080012174 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
Michael Chan52c0fd82006-06-29 20:15:54 -070012175 } else {
Michael Chan7f62ad52007-02-20 23:25:40 -080012176 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
Michael Chan52c0fd82006-06-29 20:15:54 -070012177 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12178 ASIC_REV_5750 &&
12179 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
Michael Chan7f62ad52007-02-20 23:25:40 -080012180 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
Michael Chan52c0fd82006-06-29 20:15:54 -070012181 }
Michael Chan5a6f3072006-03-20 22:28:05 -080012182 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012183
Matt Carlsonf51f3562008-05-25 23:45:08 -070012184 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12185 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan0f893dc2005-07-25 12:30:38 -070012186 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12187
Michael Chanc7835a72006-11-15 21:14:42 -080012188 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12189 if (pcie_cap != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012190 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
Matt Carlson5f5c51e2007-11-12 21:19:37 -080012191
12192 pcie_set_readrq(tp->pdev, 4096);
12193
Michael Chanc7835a72006-11-15 21:14:42 -080012194 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12195 u16 lnkctl;
12196
12197 pci_read_config_word(tp->pdev,
12198 pcie_cap + PCI_EXP_LNKCTL,
12199 &lnkctl);
12200 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
12201 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12202 }
Matt Carlsonfcb389d2008-11-03 16:55:44 -080012203 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12204 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012205
Michael Chan399de502005-10-03 14:02:39 -070012206 /* If we have an AMD 762 or VIA K8T800 chipset, write
12207 * reordering to the mailbox registers done by the host
12208 * controller can cause major troubles. We read back from
12209 * every mailbox register write to force the writes to be
12210 * posted to the chip in order.
12211 */
12212 if (pci_dev_present(write_reorder_chipsets) &&
12213 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12214 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12215
Linus Torvalds1da177e2005-04-16 15:20:36 -070012216 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12217 tp->pci_lat_timer < 64) {
12218 tp->pci_lat_timer = 64;
12219
12220 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
12221 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
12222 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
12223 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
12224
12225 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12226 cacheline_sz_reg);
12227 }
12228
Matt Carlson9974a352007-10-07 23:27:28 -070012229 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12230 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12231 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12232 if (!tp->pcix_cap) {
12233 printk(KERN_ERR PFX "Cannot find PCI-X "
12234 "capability, aborting.\n");
12235 return -EIO;
12236 }
12237 }
12238
Linus Torvalds1da177e2005-04-16 15:20:36 -070012239 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12240 &pci_state_reg);
12241
Matt Carlson9974a352007-10-07 23:27:28 -070012242 if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012243 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12244
12245 /* If this is a 5700 BX chipset, and we are in PCI-X
12246 * mode, enable register write workaround.
12247 *
12248 * The workaround is to use indirect register accesses
12249 * for all chip writes not to mailbox registers.
12250 */
12251 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12252 u32 pm_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012253
12254 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12255
12256 /* The chip can have it's power management PCI config
12257 * space registers clobbered due to this bug.
12258 * So explicitly force the chip into D0 here.
12259 */
Matt Carlson9974a352007-10-07 23:27:28 -070012260 pci_read_config_dword(tp->pdev,
12261 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012262 &pm_reg);
12263 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12264 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
Matt Carlson9974a352007-10-07 23:27:28 -070012265 pci_write_config_dword(tp->pdev,
12266 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012267 pm_reg);
12268
12269 /* Also, force SERR#/PERR# in PCI command. */
12270 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12271 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12272 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12273 }
12274 }
12275
Michael Chan087fe252005-08-09 20:17:41 -070012276 /* 5700 BX chips need to have their TX producer index mailboxes
12277 * written twice to workaround a bug.
12278 */
12279 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
12280 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12281
Linus Torvalds1da177e2005-04-16 15:20:36 -070012282 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12283 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12284 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12285 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12286
12287 /* Chip-specific fixup from Broadcom driver */
12288 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12289 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12290 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12291 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12292 }
12293
Michael Chan1ee582d2005-08-09 20:16:46 -070012294 /* Default fast path register access methods */
Michael Chan20094932005-08-09 20:16:32 -070012295 tp->read32 = tg3_read32;
Michael Chan1ee582d2005-08-09 20:16:46 -070012296 tp->write32 = tg3_write32;
Michael Chan09ee9292005-08-09 20:17:00 -070012297 tp->read32_mbox = tg3_read32;
Michael Chan20094932005-08-09 20:16:32 -070012298 tp->write32_mbox = tg3_write32;
Michael Chan1ee582d2005-08-09 20:16:46 -070012299 tp->write32_tx_mbox = tg3_write32;
12300 tp->write32_rx_mbox = tg3_write32;
12301
12302 /* Various workaround register access methods */
12303 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12304 tp->write32 = tg3_write_indirect_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070012305 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12306 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12307 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12308 /*
12309 * Back to back register writes can cause problems on these
12310 * chips, the workaround is to read back all reg writes
12311 * except those to mailbox regs.
12312 *
12313 * See tg3_write_indirect_reg32().
12314 */
Michael Chan1ee582d2005-08-09 20:16:46 -070012315 tp->write32 = tg3_write_flush_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070012316 }
12317
Michael Chan1ee582d2005-08-09 20:16:46 -070012318
12319 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12320 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12321 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12322 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12323 tp->write32_rx_mbox = tg3_write_flush_reg32;
12324 }
Michael Chan20094932005-08-09 20:16:32 -070012325
Michael Chan68929142005-08-09 20:17:14 -070012326 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12327 tp->read32 = tg3_read_indirect_reg32;
12328 tp->write32 = tg3_write_indirect_reg32;
12329 tp->read32_mbox = tg3_read_indirect_mbox;
12330 tp->write32_mbox = tg3_write_indirect_mbox;
12331 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12332 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12333
12334 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070012335 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070012336
12337 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12338 pci_cmd &= ~PCI_COMMAND_MEMORY;
12339 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12340 }
Michael Chanb5d37722006-09-27 16:06:21 -070012341 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12342 tp->read32_mbox = tg3_read32_mbox_5906;
12343 tp->write32_mbox = tg3_write32_mbox_5906;
12344 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12345 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12346 }
Michael Chan68929142005-08-09 20:17:14 -070012347
Michael Chanbbadf502006-04-06 21:46:34 -070012348 if (tp->write32 == tg3_write_indirect_reg32 ||
12349 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12350 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
David S. Millerf49639e2006-06-09 11:58:36 -070012351 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
Michael Chanbbadf502006-04-06 21:46:34 -070012352 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12353
Michael Chan7d0c41e2005-04-21 17:06:20 -070012354 /* Get eeprom hw config before calling tg3_set_power_state().
Michael Chan9d26e212006-12-07 00:21:14 -080012355 * In particular, the TG3_FLG2_IS_NIC flag must be
Michael Chan7d0c41e2005-04-21 17:06:20 -070012356 * determined before calling tg3_set_power_state() so that
12357 * we know whether or not to switch out of Vaux power.
12358 * When the flag is set, it means that GPIO1 is used for eeprom
12359 * write protect and also implies that it is a LOM where GPIOs
12360 * are not used to switch power.
Jeff Garzik6aa20a22006-09-13 13:24:59 -040012361 */
Michael Chan7d0c41e2005-04-21 17:06:20 -070012362 tg3_get_eeprom_hw_cfg(tp);
12363
Matt Carlson0d3031d2007-10-10 18:02:43 -070012364 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12365 /* Allow reads and writes to the
12366 * APE register and memory space.
12367 */
12368 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12369 PCISTATE_ALLOW_APE_SHMEM_WR;
12370 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12371 pci_state_reg);
12372 }
12373
Matt Carlson9936bcf2007-10-10 18:03:07 -070012374 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012375 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlsonbcb37f62008-11-03 16:52:09 -080012376 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsond30cdd22007-10-07 23:28:35 -070012377 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12378
Michael Chan314fba32005-04-21 17:07:04 -070012379 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12380 * GPIO1 driven high will bring 5700's external PHY out of reset.
12381 * It is also used as eeprom write protect on LOMs.
12382 */
12383 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12384 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12385 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12386 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12387 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan3e7d83b2005-04-21 17:10:36 -070012388 /* Unused GPIO3 must be driven as output on 5752 because there
12389 * are no pull-up resistors on unused GPIO pins.
12390 */
12391 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12392 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chan314fba32005-04-21 17:07:04 -070012393
Michael Chanaf36e6b2006-03-23 01:28:06 -080012394 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12395 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12396
Matt Carlson5f0c4a32008-06-09 15:41:12 -070012397 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12398 /* Turn off the debug UART. */
12399 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12400 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12401 /* Keep VMain power. */
12402 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12403 GRC_LCLCTRL_GPIO_OUTPUT0;
12404 }
12405
Linus Torvalds1da177e2005-04-16 15:20:36 -070012406 /* Force the chip into D0. */
Michael Chanbc1c7562006-03-20 17:48:03 -080012407 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012408 if (err) {
12409 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12410 pci_name(tp->pdev));
12411 return err;
12412 }
12413
12414 /* 5700 B0 chips do not support checksumming correctly due
12415 * to hardware bugs.
12416 */
12417 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12418 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12419
Linus Torvalds1da177e2005-04-16 15:20:36 -070012420 /* Derive initial jumbo mode from MTU assigned in
12421 * ether_setup() via the alloc_etherdev() call
12422 */
Michael Chan0f893dc2005-07-25 12:30:38 -070012423 if (tp->dev->mtu > ETH_DATA_LEN &&
Michael Chana4e2b342005-10-26 15:46:52 -070012424 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan0f893dc2005-07-25 12:30:38 -070012425 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012426
12427 /* Determine WakeOnLan speed to use. */
12428 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12429 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12430 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12431 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12432 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12433 } else {
12434 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12435 }
12436
12437 /* A few boards don't want Ethernet@WireSpeed phy feature */
12438 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12439 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12440 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
Michael Chan747e8f82005-07-25 12:33:22 -070012441 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
Michael Chanb5d37722006-09-27 16:06:21 -070012442 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
Michael Chan747e8f82005-07-25 12:33:22 -070012443 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
Linus Torvalds1da177e2005-04-16 15:20:36 -070012444 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12445
12446 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12447 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12448 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12449 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12450 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12451
Michael Chanc424cb22006-04-29 18:56:34 -070012452 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12453 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012454 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012455 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12456 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
Michael Chand4011ad2007-02-13 12:17:25 -080012457 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12458 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12459 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
Michael Chanc1d2a192007-01-08 19:57:20 -080012460 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12461 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
Matt Carlson57e69832008-05-25 23:48:31 -070012462 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12463 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
Michael Chanc424cb22006-04-29 18:56:34 -070012464 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12465 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012466
Matt Carlsonb2a5c192008-04-03 21:44:44 -070012467 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12468 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12469 tp->phy_otp = tg3_read_otp_phycfg(tp);
12470 if (tp->phy_otp == 0)
12471 tp->phy_otp = TG3_OTP_DEFAULT;
12472 }
12473
Matt Carlsonf51f3562008-05-25 23:45:08 -070012474 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
Matt Carlson8ef21422008-05-02 16:47:53 -070012475 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12476 else
12477 tp->mi_mode = MAC_MI_MODE_BASE;
12478
Linus Torvalds1da177e2005-04-16 15:20:36 -070012479 tp->coalesce_mode = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012480 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12481 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12482 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12483
Matt Carlson57e69832008-05-25 23:48:31 -070012484 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12485 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12486
Matt Carlson158d7ab2008-05-29 01:37:54 -070012487 err = tg3_mdio_init(tp);
12488 if (err)
12489 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012490
12491 /* Initialize data/descriptor byte/word swapping. */
12492 val = tr32(GRC_MODE);
12493 val &= GRC_MODE_HOST_STACKUP;
12494 tw32(GRC_MODE, val | tp->grc_mode);
12495
12496 tg3_switch_clocks(tp);
12497
12498 /* Clear this out for sanity. */
12499 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12500
12501 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12502 &pci_state_reg);
12503 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12504 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12505 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12506
12507 if (chiprevid == CHIPREV_ID_5701_A0 ||
12508 chiprevid == CHIPREV_ID_5701_B0 ||
12509 chiprevid == CHIPREV_ID_5701_B2 ||
12510 chiprevid == CHIPREV_ID_5701_B5) {
12511 void __iomem *sram_base;
12512
12513 /* Write some dummy words into the SRAM status block
12514 * area, see if it reads back correctly. If the return
12515 * value is bad, force enable the PCIX workaround.
12516 */
12517 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12518
12519 writel(0x00000000, sram_base);
12520 writel(0x00000000, sram_base + 4);
12521 writel(0xffffffff, sram_base + 4);
12522 if (readl(sram_base) != 0x00000000)
12523 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12524 }
12525 }
12526
12527 udelay(50);
12528 tg3_nvram_init(tp);
12529
12530 grc_misc_cfg = tr32(GRC_MISC_CFG);
12531 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12532
Linus Torvalds1da177e2005-04-16 15:20:36 -070012533 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12534 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12535 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12536 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12537
David S. Millerfac9b832005-05-18 22:46:34 -070012538 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12539 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12540 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12541 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12542 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12543 HOSTCC_MODE_CLRTICK_TXBD);
12544
12545 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12546 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12547 tp->misc_host_ctrl);
12548 }
12549
Matt Carlson3bda1252008-08-15 14:08:22 -070012550 /* Preserve the APE MAC_MODE bits */
12551 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12552 tp->mac_mode = tr32(MAC_MODE) |
12553 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12554 else
12555 tp->mac_mode = TG3_DEF_MAC_MODE;
12556
Linus Torvalds1da177e2005-04-16 15:20:36 -070012557 /* these are limited to 10/100 only */
12558 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12559 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12560 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12561 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12562 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12563 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12564 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12565 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12566 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
Michael Chan676917d2006-12-07 00:20:22 -080012567 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12568 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
Michael Chanb5d37722006-09-27 16:06:21 -070012569 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012570 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12571
12572 err = tg3_phy_probe(tp);
12573 if (err) {
12574 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12575 pci_name(tp->pdev), err);
12576 /* ... but do not return immediately ... */
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070012577 tg3_mdio_fini(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012578 }
12579
12580 tg3_read_partno(tp);
Michael Chanc4e65752006-03-20 22:29:32 -080012581 tg3_read_fw_ver(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012582
12583 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12584 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12585 } else {
12586 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12587 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12588 else
12589 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12590 }
12591
12592 /* 5700 {AX,BX} chips have a broken status block link
12593 * change bit implementation, so we must use the
12594 * status register in those cases.
12595 */
12596 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12597 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12598 else
12599 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12600
12601 /* The led_ctrl is set during tg3_phy_probe, here we might
12602 * have to force the link status polling mechanism based
12603 * upon subsystem IDs.
12604 */
12605 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
Michael Chan007a880d2007-05-31 14:49:51 -070012606 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070012607 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12608 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12609 TG3_FLAG_USE_LINKCHG_REG);
12610 }
12611
12612 /* For all SERDES we poll the MAC status register. */
12613 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12614 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12615 else
12616 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12617
Michael Chan5a6f3072006-03-20 22:28:05 -080012618 /* All chips before 5787 can get confused if TX buffers
Linus Torvalds1da177e2005-04-16 15:20:36 -070012619 * straddle the 4GB address boundary in some cases.
12620 */
Michael Chanaf36e6b2006-03-23 01:28:06 -080012621 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012622 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012623 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012624 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012625 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012626 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Michael Chan5a6f3072006-03-20 22:28:05 -080012627 tp->dev->hard_start_xmit = tg3_start_xmit;
12628 else
12629 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012630
12631 tp->rx_offset = 2;
12632 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12633 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12634 tp->rx_offset = 0;
12635
Michael Chanf92905d2006-06-29 20:14:29 -070012636 tp->rx_std_max_post = TG3_RX_RING_SIZE;
12637
12638 /* Increment the rx prod index on the rx std ring by at most
12639 * 8 for these chips to workaround hw errata.
12640 */
12641 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12642 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12643 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12644 tp->rx_std_max_post = 8;
12645
Matt Carlson8ed5d972007-05-07 00:25:49 -070012646 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12647 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12648 PCIE_PWR_MGMT_L1_THRESH_MSK;
12649
Linus Torvalds1da177e2005-04-16 15:20:36 -070012650 return err;
12651}
12652
David S. Miller49b6e95f2007-03-29 01:38:42 -070012653#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012654static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12655{
12656 struct net_device *dev = tp->dev;
12657 struct pci_dev *pdev = tp->pdev;
David S. Miller49b6e95f2007-03-29 01:38:42 -070012658 struct device_node *dp = pci_device_to_OF_node(pdev);
David S. Miller374d4ca2007-03-29 01:57:57 -070012659 const unsigned char *addr;
David S. Miller49b6e95f2007-03-29 01:38:42 -070012660 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012661
David S. Miller49b6e95f2007-03-29 01:38:42 -070012662 addr = of_get_property(dp, "local-mac-address", &len);
12663 if (addr && len == 6) {
12664 memcpy(dev->dev_addr, addr, 6);
12665 memcpy(dev->perm_addr, dev->dev_addr, 6);
12666 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012667 }
12668 return -ENODEV;
12669}
12670
12671static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12672{
12673 struct net_device *dev = tp->dev;
12674
12675 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
John W. Linville2ff43692005-09-12 14:44:20 -070012676 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012677 return 0;
12678}
12679#endif
12680
12681static int __devinit tg3_get_device_address(struct tg3 *tp)
12682{
12683 struct net_device *dev = tp->dev;
12684 u32 hi, lo, mac_offset;
Michael Chan008652b2006-03-27 23:14:53 -080012685 int addr_ok = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012686
David S. Miller49b6e95f2007-03-29 01:38:42 -070012687#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012688 if (!tg3_get_macaddr_sparc(tp))
12689 return 0;
12690#endif
12691
12692 mac_offset = 0x7c;
David S. Millerf49639e2006-06-09 11:58:36 -070012693 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
Michael Chana4e2b342005-10-26 15:46:52 -070012694 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012695 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12696 mac_offset = 0xcc;
12697 if (tg3_nvram_lock(tp))
12698 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12699 else
12700 tg3_nvram_unlock(tp);
12701 }
Michael Chanb5d37722006-09-27 16:06:21 -070012702 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12703 mac_offset = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012704
12705 /* First try to get it from MAC address mailbox. */
12706 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12707 if ((hi >> 16) == 0x484b) {
12708 dev->dev_addr[0] = (hi >> 8) & 0xff;
12709 dev->dev_addr[1] = (hi >> 0) & 0xff;
12710
12711 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12712 dev->dev_addr[2] = (lo >> 24) & 0xff;
12713 dev->dev_addr[3] = (lo >> 16) & 0xff;
12714 dev->dev_addr[4] = (lo >> 8) & 0xff;
12715 dev->dev_addr[5] = (lo >> 0) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012716
Michael Chan008652b2006-03-27 23:14:53 -080012717 /* Some old bootcode may report a 0 MAC address in SRAM */
12718 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12719 }
12720 if (!addr_ok) {
12721 /* Next, try NVRAM. */
David S. Millerf49639e2006-06-09 11:58:36 -070012722 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
Michael Chan008652b2006-03-27 23:14:53 -080012723 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12724 dev->dev_addr[0] = ((hi >> 16) & 0xff);
12725 dev->dev_addr[1] = ((hi >> 24) & 0xff);
12726 dev->dev_addr[2] = ((lo >> 0) & 0xff);
12727 dev->dev_addr[3] = ((lo >> 8) & 0xff);
12728 dev->dev_addr[4] = ((lo >> 16) & 0xff);
12729 dev->dev_addr[5] = ((lo >> 24) & 0xff);
12730 }
12731 /* Finally just fetch it out of the MAC control regs. */
12732 else {
12733 hi = tr32(MAC_ADDR_0_HIGH);
12734 lo = tr32(MAC_ADDR_0_LOW);
12735
12736 dev->dev_addr[5] = lo & 0xff;
12737 dev->dev_addr[4] = (lo >> 8) & 0xff;
12738 dev->dev_addr[3] = (lo >> 16) & 0xff;
12739 dev->dev_addr[2] = (lo >> 24) & 0xff;
12740 dev->dev_addr[1] = hi & 0xff;
12741 dev->dev_addr[0] = (hi >> 8) & 0xff;
12742 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012743 }
12744
12745 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
David S. Miller7582a332008-03-20 15:53:15 -070012746#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012747 if (!tg3_get_default_macaddr_sparc(tp))
12748 return 0;
12749#endif
12750 return -EINVAL;
12751 }
John W. Linville2ff43692005-09-12 14:44:20 -070012752 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012753 return 0;
12754}
12755
David S. Miller59e6b432005-05-18 22:50:10 -070012756#define BOUNDARY_SINGLE_CACHELINE 1
12757#define BOUNDARY_MULTI_CACHELINE 2
12758
12759static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12760{
12761 int cacheline_size;
12762 u8 byte;
12763 int goal;
12764
12765 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12766 if (byte == 0)
12767 cacheline_size = 1024;
12768 else
12769 cacheline_size = (int) byte * 4;
12770
12771 /* On 5703 and later chips, the boundary bits have no
12772 * effect.
12773 */
12774 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12775 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12776 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12777 goto out;
12778
12779#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12780 goal = BOUNDARY_MULTI_CACHELINE;
12781#else
12782#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12783 goal = BOUNDARY_SINGLE_CACHELINE;
12784#else
12785 goal = 0;
12786#endif
12787#endif
12788
12789 if (!goal)
12790 goto out;
12791
12792 /* PCI controllers on most RISC systems tend to disconnect
12793 * when a device tries to burst across a cache-line boundary.
12794 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12795 *
12796 * Unfortunately, for PCI-E there are only limited
12797 * write-side controls for this, and thus for reads
12798 * we will still get the disconnects. We'll also waste
12799 * these PCI cycles for both read and write for chips
12800 * other than 5700 and 5701 which do not implement the
12801 * boundary bits.
12802 */
12803 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12804 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12805 switch (cacheline_size) {
12806 case 16:
12807 case 32:
12808 case 64:
12809 case 128:
12810 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12811 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12812 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12813 } else {
12814 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12815 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12816 }
12817 break;
12818
12819 case 256:
12820 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12821 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12822 break;
12823
12824 default:
12825 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12826 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12827 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012828 }
David S. Miller59e6b432005-05-18 22:50:10 -070012829 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12830 switch (cacheline_size) {
12831 case 16:
12832 case 32:
12833 case 64:
12834 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12835 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12836 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12837 break;
12838 }
12839 /* fallthrough */
12840 case 128:
12841 default:
12842 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12843 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12844 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012845 }
David S. Miller59e6b432005-05-18 22:50:10 -070012846 } else {
12847 switch (cacheline_size) {
12848 case 16:
12849 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12850 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12851 DMA_RWCTRL_WRITE_BNDRY_16);
12852 break;
12853 }
12854 /* fallthrough */
12855 case 32:
12856 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12857 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12858 DMA_RWCTRL_WRITE_BNDRY_32);
12859 break;
12860 }
12861 /* fallthrough */
12862 case 64:
12863 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12864 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12865 DMA_RWCTRL_WRITE_BNDRY_64);
12866 break;
12867 }
12868 /* fallthrough */
12869 case 128:
12870 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12871 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12872 DMA_RWCTRL_WRITE_BNDRY_128);
12873 break;
12874 }
12875 /* fallthrough */
12876 case 256:
12877 val |= (DMA_RWCTRL_READ_BNDRY_256 |
12878 DMA_RWCTRL_WRITE_BNDRY_256);
12879 break;
12880 case 512:
12881 val |= (DMA_RWCTRL_READ_BNDRY_512 |
12882 DMA_RWCTRL_WRITE_BNDRY_512);
12883 break;
12884 case 1024:
12885 default:
12886 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12887 DMA_RWCTRL_WRITE_BNDRY_1024);
12888 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012889 }
David S. Miller59e6b432005-05-18 22:50:10 -070012890 }
12891
12892out:
12893 return val;
12894}
12895
Linus Torvalds1da177e2005-04-16 15:20:36 -070012896static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12897{
12898 struct tg3_internal_buffer_desc test_desc;
12899 u32 sram_dma_descs;
12900 int i, ret;
12901
12902 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12903
12904 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12905 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12906 tw32(RDMAC_STATUS, 0);
12907 tw32(WDMAC_STATUS, 0);
12908
12909 tw32(BUFMGR_MODE, 0);
12910 tw32(FTQ_RESET, 0);
12911
12912 test_desc.addr_hi = ((u64) buf_dma) >> 32;
12913 test_desc.addr_lo = buf_dma & 0xffffffff;
12914 test_desc.nic_mbuf = 0x00002100;
12915 test_desc.len = size;
12916
12917 /*
12918 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12919 * the *second* time the tg3 driver was getting loaded after an
12920 * initial scan.
12921 *
12922 * Broadcom tells me:
12923 * ...the DMA engine is connected to the GRC block and a DMA
12924 * reset may affect the GRC block in some unpredictable way...
12925 * The behavior of resets to individual blocks has not been tested.
12926 *
12927 * Broadcom noted the GRC reset will also reset all sub-components.
12928 */
12929 if (to_device) {
12930 test_desc.cqid_sqid = (13 << 8) | 2;
12931
12932 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12933 udelay(40);
12934 } else {
12935 test_desc.cqid_sqid = (16 << 8) | 7;
12936
12937 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12938 udelay(40);
12939 }
12940 test_desc.flags = 0x00000005;
12941
12942 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12943 u32 val;
12944
12945 val = *(((u32 *)&test_desc) + i);
12946 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12947 sram_dma_descs + (i * sizeof(u32)));
12948 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12949 }
12950 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12951
12952 if (to_device) {
12953 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12954 } else {
12955 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12956 }
12957
12958 ret = -ENODEV;
12959 for (i = 0; i < 40; i++) {
12960 u32 val;
12961
12962 if (to_device)
12963 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12964 else
12965 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12966 if ((val & 0xffff) == sram_dma_descs) {
12967 ret = 0;
12968 break;
12969 }
12970
12971 udelay(100);
12972 }
12973
12974 return ret;
12975}
12976
David S. Millerded73402005-05-23 13:59:47 -070012977#define TEST_BUFFER_SIZE 0x2000
Linus Torvalds1da177e2005-04-16 15:20:36 -070012978
12979static int __devinit tg3_test_dma(struct tg3 *tp)
12980{
12981 dma_addr_t buf_dma;
David S. Miller59e6b432005-05-18 22:50:10 -070012982 u32 *buf, saved_dma_rwctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012983 int ret;
12984
12985 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12986 if (!buf) {
12987 ret = -ENOMEM;
12988 goto out_nofree;
12989 }
12990
12991 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12992 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12993
David S. Miller59e6b432005-05-18 22:50:10 -070012994 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012995
12996 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12997 /* DMA read watermark not used on PCIE */
12998 tp->dma_rwctrl |= 0x00180000;
12999 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
Michael Chan85e94ce2005-04-21 17:05:28 -070013000 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13001 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013002 tp->dma_rwctrl |= 0x003f0000;
13003 else
13004 tp->dma_rwctrl |= 0x003f000f;
13005 } else {
13006 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13007 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
13008 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
Michael Chan49afdeb2007-02-13 12:17:03 -080013009 u32 read_water = 0x7;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013010
Michael Chan4a29cc22006-03-19 13:21:12 -080013011 /* If the 5704 is behind the EPB bridge, we can
13012 * do the less restrictive ONE_DMA workaround for
13013 * better performance.
13014 */
13015 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
13016 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13017 tp->dma_rwctrl |= 0x8000;
13018 else if (ccval == 0x6 || ccval == 0x7)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013019 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
13020
Michael Chan49afdeb2007-02-13 12:17:03 -080013021 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
13022 read_water = 4;
David S. Miller59e6b432005-05-18 22:50:10 -070013023 /* Set bit 23 to enable PCIX hw bug fix */
Michael Chan49afdeb2007-02-13 12:17:03 -080013024 tp->dma_rwctrl |=
13025 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
13026 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
13027 (1 << 23);
Michael Chan4cf78e42005-07-25 12:29:19 -070013028 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
13029 /* 5780 always in PCIX mode */
13030 tp->dma_rwctrl |= 0x00144000;
Michael Chana4e2b342005-10-26 15:46:52 -070013031 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13032 /* 5714 always in PCIX mode */
13033 tp->dma_rwctrl |= 0x00148000;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013034 } else {
13035 tp->dma_rwctrl |= 0x001b000f;
13036 }
13037 }
13038
13039 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13040 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13041 tp->dma_rwctrl &= 0xfffffff0;
13042
13043 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13044 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13045 /* Remove this if it causes problems for some boards. */
13046 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
13047
13048 /* On 5700/5701 chips, we need to set this bit.
13049 * Otherwise the chip will issue cacheline transactions
13050 * to streamable DMA memory with not all the byte
13051 * enables turned on. This is an error on several
13052 * RISC PCI controllers, in particular sparc64.
13053 *
13054 * On 5703/5704 chips, this bit has been reassigned
13055 * a different meaning. In particular, it is used
13056 * on those chips to enable a PCI-X workaround.
13057 */
13058 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
13059 }
13060
13061 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13062
13063#if 0
13064 /* Unneeded, already done by tg3_get_invariants. */
13065 tg3_switch_clocks(tp);
13066#endif
13067
13068 ret = 0;
13069 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13070 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13071 goto out;
13072
David S. Miller59e6b432005-05-18 22:50:10 -070013073 /* It is best to perform DMA test with maximum write burst size
13074 * to expose the 5700/5701 write DMA bug.
13075 */
13076 saved_dma_rwctrl = tp->dma_rwctrl;
13077 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13078 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13079
Linus Torvalds1da177e2005-04-16 15:20:36 -070013080 while (1) {
13081 u32 *p = buf, i;
13082
13083 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
13084 p[i] = i;
13085
13086 /* Send the buffer to the chip. */
13087 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13088 if (ret) {
13089 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
13090 break;
13091 }
13092
13093#if 0
13094 /* validate data reached card RAM correctly. */
13095 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13096 u32 val;
13097 tg3_read_mem(tp, 0x2100 + (i*4), &val);
13098 if (le32_to_cpu(val) != p[i]) {
13099 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
13100 /* ret = -ENODEV here? */
13101 }
13102 p[i] = 0;
13103 }
13104#endif
13105 /* Now read it back. */
13106 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13107 if (ret) {
13108 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13109
13110 break;
13111 }
13112
13113 /* Verify it. */
13114 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13115 if (p[i] == i)
13116 continue;
13117
David S. Miller59e6b432005-05-18 22:50:10 -070013118 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13119 DMA_RWCTRL_WRITE_BNDRY_16) {
13120 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013121 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13122 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13123 break;
13124 } else {
13125 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13126 ret = -ENODEV;
13127 goto out;
13128 }
13129 }
13130
13131 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13132 /* Success. */
13133 ret = 0;
13134 break;
13135 }
13136 }
David S. Miller59e6b432005-05-18 22:50:10 -070013137 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13138 DMA_RWCTRL_WRITE_BNDRY_16) {
Michael Chan6d1cfba2005-06-08 14:13:14 -070013139 static struct pci_device_id dma_wait_state_chipsets[] = {
13140 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13141 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13142 { },
13143 };
13144
David S. Miller59e6b432005-05-18 22:50:10 -070013145 /* DMA test passed without adjusting DMA boundary,
Michael Chan6d1cfba2005-06-08 14:13:14 -070013146 * now look for chipsets that are known to expose the
13147 * DMA bug without failing the test.
David S. Miller59e6b432005-05-18 22:50:10 -070013148 */
Michael Chan6d1cfba2005-06-08 14:13:14 -070013149 if (pci_dev_present(dma_wait_state_chipsets)) {
13150 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13151 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13152 }
13153 else
13154 /* Safe to use the calculated DMA boundary. */
13155 tp->dma_rwctrl = saved_dma_rwctrl;
13156
David S. Miller59e6b432005-05-18 22:50:10 -070013157 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13158 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013159
13160out:
13161 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13162out_nofree:
13163 return ret;
13164}
13165
13166static void __devinit tg3_init_link_config(struct tg3 *tp)
13167{
13168 tp->link_config.advertising =
13169 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13170 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13171 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13172 ADVERTISED_Autoneg | ADVERTISED_MII);
13173 tp->link_config.speed = SPEED_INVALID;
13174 tp->link_config.duplex = DUPLEX_INVALID;
13175 tp->link_config.autoneg = AUTONEG_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013176 tp->link_config.active_speed = SPEED_INVALID;
13177 tp->link_config.active_duplex = DUPLEX_INVALID;
13178 tp->link_config.phy_is_low_power = 0;
13179 tp->link_config.orig_speed = SPEED_INVALID;
13180 tp->link_config.orig_duplex = DUPLEX_INVALID;
13181 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13182}
13183
13184static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13185{
Michael Chanfdfec172005-07-25 12:31:48 -070013186 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13187 tp->bufmgr_config.mbuf_read_dma_low_water =
13188 DEFAULT_MB_RDMA_LOW_WATER_5705;
13189 tp->bufmgr_config.mbuf_mac_rx_low_water =
13190 DEFAULT_MB_MACRX_LOW_WATER_5705;
13191 tp->bufmgr_config.mbuf_high_water =
13192 DEFAULT_MB_HIGH_WATER_5705;
Michael Chanb5d37722006-09-27 16:06:21 -070013193 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13194 tp->bufmgr_config.mbuf_mac_rx_low_water =
13195 DEFAULT_MB_MACRX_LOW_WATER_5906;
13196 tp->bufmgr_config.mbuf_high_water =
13197 DEFAULT_MB_HIGH_WATER_5906;
13198 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013199
Michael Chanfdfec172005-07-25 12:31:48 -070013200 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13201 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13202 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13203 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13204 tp->bufmgr_config.mbuf_high_water_jumbo =
13205 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13206 } else {
13207 tp->bufmgr_config.mbuf_read_dma_low_water =
13208 DEFAULT_MB_RDMA_LOW_WATER;
13209 tp->bufmgr_config.mbuf_mac_rx_low_water =
13210 DEFAULT_MB_MACRX_LOW_WATER;
13211 tp->bufmgr_config.mbuf_high_water =
13212 DEFAULT_MB_HIGH_WATER;
13213
13214 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13215 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13216 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13217 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13218 tp->bufmgr_config.mbuf_high_water_jumbo =
13219 DEFAULT_MB_HIGH_WATER_JUMBO;
13220 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013221
13222 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13223 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13224}
13225
13226static char * __devinit tg3_phy_string(struct tg3 *tp)
13227{
13228 switch (tp->phy_id & PHY_ID_MASK) {
13229 case PHY_ID_BCM5400: return "5400";
13230 case PHY_ID_BCM5401: return "5401";
13231 case PHY_ID_BCM5411: return "5411";
13232 case PHY_ID_BCM5701: return "5701";
13233 case PHY_ID_BCM5703: return "5703";
13234 case PHY_ID_BCM5704: return "5704";
13235 case PHY_ID_BCM5705: return "5705";
13236 case PHY_ID_BCM5750: return "5750";
Michael Chan85e94ce2005-04-21 17:05:28 -070013237 case PHY_ID_BCM5752: return "5752";
Michael Chana4e2b342005-10-26 15:46:52 -070013238 case PHY_ID_BCM5714: return "5714";
Michael Chan4cf78e42005-07-25 12:29:19 -070013239 case PHY_ID_BCM5780: return "5780";
Michael Chanaf36e6b2006-03-23 01:28:06 -080013240 case PHY_ID_BCM5755: return "5755";
Michael Chand9ab5ad2006-03-20 22:27:35 -080013241 case PHY_ID_BCM5787: return "5787";
Matt Carlsond30cdd22007-10-07 23:28:35 -070013242 case PHY_ID_BCM5784: return "5784";
Michael Chan126a3362006-09-27 16:03:07 -070013243 case PHY_ID_BCM5756: return "5722/5756";
Michael Chanb5d37722006-09-27 16:06:21 -070013244 case PHY_ID_BCM5906: return "5906";
Matt Carlson9936bcf2007-10-10 18:03:07 -070013245 case PHY_ID_BCM5761: return "5761";
Linus Torvalds1da177e2005-04-16 15:20:36 -070013246 case PHY_ID_BCM8002: return "8002/serdes";
13247 case 0: return "serdes";
13248 default: return "unknown";
Stephen Hemminger855e1112008-04-16 16:37:28 -070013249 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013250}
13251
Michael Chanf9804dd2005-09-27 12:13:10 -070013252static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13253{
13254 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13255 strcpy(str, "PCI Express");
13256 return str;
13257 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13258 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13259
13260 strcpy(str, "PCIX:");
13261
13262 if ((clock_ctrl == 7) ||
13263 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13264 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13265 strcat(str, "133MHz");
13266 else if (clock_ctrl == 0)
13267 strcat(str, "33MHz");
13268 else if (clock_ctrl == 2)
13269 strcat(str, "50MHz");
13270 else if (clock_ctrl == 4)
13271 strcat(str, "66MHz");
13272 else if (clock_ctrl == 6)
13273 strcat(str, "100MHz");
Michael Chanf9804dd2005-09-27 12:13:10 -070013274 } else {
13275 strcpy(str, "PCI:");
13276 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13277 strcat(str, "66MHz");
13278 else
13279 strcat(str, "33MHz");
13280 }
13281 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13282 strcat(str, ":32-bit");
13283 else
13284 strcat(str, ":64-bit");
13285 return str;
13286}
13287
Michael Chan8c2dc7e2005-12-19 16:26:02 -080013288static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013289{
13290 struct pci_dev *peer;
13291 unsigned int func, devnr = tp->pdev->devfn & ~7;
13292
13293 for (func = 0; func < 8; func++) {
13294 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13295 if (peer && peer != tp->pdev)
13296 break;
13297 pci_dev_put(peer);
13298 }
Michael Chan16fe9d72005-12-13 21:09:54 -080013299 /* 5704 can be configured in single-port mode, set peer to
13300 * tp->pdev in that case.
13301 */
13302 if (!peer) {
13303 peer = tp->pdev;
13304 return peer;
13305 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013306
13307 /*
13308 * We don't need to keep the refcount elevated; there's no way
13309 * to remove one half of this device without removing the other
13310 */
13311 pci_dev_put(peer);
13312
13313 return peer;
13314}
13315
David S. Miller15f98502005-05-18 22:49:26 -070013316static void __devinit tg3_init_coal(struct tg3 *tp)
13317{
13318 struct ethtool_coalesce *ec = &tp->coal;
13319
13320 memset(ec, 0, sizeof(*ec));
13321 ec->cmd = ETHTOOL_GCOALESCE;
13322 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13323 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13324 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13325 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13326 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13327 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13328 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13329 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13330 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13331
13332 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13333 HOSTCC_MODE_CLRTICK_TXBD)) {
13334 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13335 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13336 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13337 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13338 }
Michael Chand244c892005-07-05 14:42:33 -070013339
13340 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13341 ec->rx_coalesce_usecs_irq = 0;
13342 ec->tx_coalesce_usecs_irq = 0;
13343 ec->stats_block_coalesce_usecs = 0;
13344 }
David S. Miller15f98502005-05-18 22:49:26 -070013345}
13346
Linus Torvalds1da177e2005-04-16 15:20:36 -070013347static int __devinit tg3_init_one(struct pci_dev *pdev,
13348 const struct pci_device_id *ent)
13349{
13350 static int tg3_version_printed = 0;
Matt Carlson63532392008-11-03 16:49:57 -080013351 resource_size_t tg3reg_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013352 struct net_device *dev;
13353 struct tg3 *tp;
Joe Perchesd6645372007-12-20 04:06:59 -080013354 int err, pm_cap;
Michael Chanf9804dd2005-09-27 12:13:10 -070013355 char str[40];
Michael Chan72f2afb2006-03-06 19:28:35 -080013356 u64 dma_mask, persist_dma_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013357
13358 if (tg3_version_printed++ == 0)
13359 printk(KERN_INFO "%s", version);
13360
13361 err = pci_enable_device(pdev);
13362 if (err) {
13363 printk(KERN_ERR PFX "Cannot enable PCI device, "
13364 "aborting.\n");
13365 return err;
13366 }
13367
Matt Carlson63532392008-11-03 16:49:57 -080013368 if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013369 printk(KERN_ERR PFX "Cannot find proper PCI device "
13370 "base address, aborting.\n");
13371 err = -ENODEV;
13372 goto err_out_disable_pdev;
13373 }
13374
13375 err = pci_request_regions(pdev, DRV_MODULE_NAME);
13376 if (err) {
13377 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13378 "aborting.\n");
13379 goto err_out_disable_pdev;
13380 }
13381
13382 pci_set_master(pdev);
13383
13384 /* Find power-management capability. */
13385 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13386 if (pm_cap == 0) {
13387 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13388 "aborting.\n");
13389 err = -EIO;
13390 goto err_out_free_res;
13391 }
13392
Linus Torvalds1da177e2005-04-16 15:20:36 -070013393 dev = alloc_etherdev(sizeof(*tp));
13394 if (!dev) {
13395 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13396 err = -ENOMEM;
13397 goto err_out_free_res;
13398 }
13399
Linus Torvalds1da177e2005-04-16 15:20:36 -070013400 SET_NETDEV_DEV(dev, &pdev->dev);
13401
Linus Torvalds1da177e2005-04-16 15:20:36 -070013402#if TG3_VLAN_TAG_USED
13403 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13404 dev->vlan_rx_register = tg3_vlan_rx_register;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013405#endif
13406
13407 tp = netdev_priv(dev);
13408 tp->pdev = pdev;
13409 tp->dev = dev;
13410 tp->pm_cap = pm_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013411 tp->rx_mode = TG3_DEF_RX_MODE;
13412 tp->tx_mode = TG3_DEF_TX_MODE;
Matt Carlson8ef21422008-05-02 16:47:53 -070013413
Linus Torvalds1da177e2005-04-16 15:20:36 -070013414 if (tg3_debug > 0)
13415 tp->msg_enable = tg3_debug;
13416 else
13417 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13418
13419 /* The word/byte swap controls here control register access byte
13420 * swapping. DMA data byte swapping is controlled in the GRC_MODE
13421 * setting below.
13422 */
13423 tp->misc_host_ctrl =
13424 MISC_HOST_CTRL_MASK_PCI_INT |
13425 MISC_HOST_CTRL_WORD_SWAP |
13426 MISC_HOST_CTRL_INDIR_ACCESS |
13427 MISC_HOST_CTRL_PCISTATE_RW;
13428
13429 /* The NONFRM (non-frame) byte/word swap controls take effect
13430 * on descriptor entries, anything which isn't packet data.
13431 *
13432 * The StrongARM chips on the board (one for tx, one for rx)
13433 * are running in big-endian mode.
13434 */
13435 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13436 GRC_MODE_WSWAP_NONFRM_DATA);
13437#ifdef __BIG_ENDIAN
13438 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13439#endif
13440 spin_lock_init(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013441 spin_lock_init(&tp->indirect_lock);
David Howellsc4028952006-11-22 14:57:56 +000013442 INIT_WORK(&tp->reset_task, tg3_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013443
Matt Carlson63532392008-11-03 16:49:57 -080013444 dev->mem_start = pci_resource_start(pdev, BAR_0);
13445 tg3reg_len = pci_resource_len(pdev, BAR_0);
13446 dev->mem_end = dev->mem_start + tg3reg_len;
13447
13448 tp->regs = ioremap_nocache(dev->mem_start, tg3reg_len);
Andy Gospodarekab0049b2007-09-06 20:42:14 +010013449 if (!tp->regs) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013450 printk(KERN_ERR PFX "Cannot map device registers, "
13451 "aborting.\n");
13452 err = -ENOMEM;
13453 goto err_out_free_dev;
13454 }
13455
13456 tg3_init_link_config(tp);
13457
Linus Torvalds1da177e2005-04-16 15:20:36 -070013458 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13459 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13460 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13461
13462 dev->open = tg3_open;
13463 dev->stop = tg3_close;
13464 dev->get_stats = tg3_get_stats;
13465 dev->set_multicast_list = tg3_set_rx_mode;
13466 dev->set_mac_address = tg3_set_mac_addr;
13467 dev->do_ioctl = tg3_ioctl;
13468 dev->tx_timeout = tg3_tx_timeout;
Stephen Hemmingerbea33482007-10-03 16:41:36 -070013469 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013470 dev->ethtool_ops = &tg3_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013471 dev->watchdog_timeo = TG3_TX_TIMEOUT;
13472 dev->change_mtu = tg3_change_mtu;
13473 dev->irq = pdev->irq;
13474#ifdef CONFIG_NET_POLL_CONTROLLER
13475 dev->poll_controller = tg3_poll_controller;
13476#endif
13477
13478 err = tg3_get_invariants(tp);
13479 if (err) {
13480 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13481 "aborting.\n");
13482 goto err_out_iounmap;
13483 }
13484
Michael Chan4a29cc22006-03-19 13:21:12 -080013485 /* The EPB bridge inside 5714, 5715, and 5780 and any
13486 * device behind the EPB cannot support DMA addresses > 40-bit.
Michael Chan72f2afb2006-03-06 19:28:35 -080013487 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13488 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13489 * do DMA address check in tg3_start_xmit().
13490 */
Michael Chan4a29cc22006-03-19 13:21:12 -080013491 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13492 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13493 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
Michael Chan72f2afb2006-03-06 19:28:35 -080013494 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13495#ifdef CONFIG_HIGHMEM
13496 dma_mask = DMA_64BIT_MASK;
13497#endif
Michael Chan4a29cc22006-03-19 13:21:12 -080013498 } else
Michael Chan72f2afb2006-03-06 19:28:35 -080013499 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13500
13501 /* Configure DMA attributes. */
13502 if (dma_mask > DMA_32BIT_MASK) {
13503 err = pci_set_dma_mask(pdev, dma_mask);
13504 if (!err) {
13505 dev->features |= NETIF_F_HIGHDMA;
13506 err = pci_set_consistent_dma_mask(pdev,
13507 persist_dma_mask);
13508 if (err < 0) {
13509 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13510 "DMA for consistent allocations\n");
13511 goto err_out_iounmap;
13512 }
13513 }
13514 }
13515 if (err || dma_mask == DMA_32BIT_MASK) {
13516 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13517 if (err) {
13518 printk(KERN_ERR PFX "No usable DMA configuration, "
13519 "aborting.\n");
13520 goto err_out_iounmap;
13521 }
13522 }
13523
Michael Chanfdfec172005-07-25 12:31:48 -070013524 tg3_init_bufmgr_config(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013525
Linus Torvalds1da177e2005-04-16 15:20:36 -070013526 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13527 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13528 }
13529 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13530 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13531 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
Michael Chanc7835a72006-11-15 21:14:42 -080013532 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -070013533 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13534 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13535 } else {
Michael Chan7f62ad52007-02-20 23:25:40 -080013536 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013537 }
13538
Michael Chan4e3a7aa2006-03-20 17:47:44 -080013539 /* TSO is on by default on chips that support hardware TSO.
13540 * Firmware TSO on older chips gives lower performance, so it
13541 * is off by default, but can be enabled using ethtool.
13542 */
Michael Chanb0026622006-07-03 19:42:14 -070013543 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013544 dev->features |= NETIF_F_TSO;
Michael Chanb5d37722006-09-27 16:06:21 -070013545 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13546 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
Michael Chanb0026622006-07-03 19:42:14 -070013547 dev->features |= NETIF_F_TSO6;
Matt Carlson57e69832008-05-25 23:48:31 -070013548 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13549 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13550 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13551 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlson9936bcf2007-10-10 18:03:07 -070013552 dev->features |= NETIF_F_TSO_ECN;
Michael Chanb0026622006-07-03 19:42:14 -070013553 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013554
Linus Torvalds1da177e2005-04-16 15:20:36 -070013555
13556 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13557 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13558 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13559 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13560 tp->rx_pending = 63;
13561 }
13562
Linus Torvalds1da177e2005-04-16 15:20:36 -070013563 err = tg3_get_device_address(tp);
13564 if (err) {
13565 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13566 "aborting.\n");
13567 goto err_out_iounmap;
13568 }
13569
Matt Carlson0d3031d2007-10-10 18:02:43 -070013570 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
Matt Carlson63532392008-11-03 16:49:57 -080013571 if (!(pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM)) {
Matt Carlson0d3031d2007-10-10 18:02:43 -070013572 printk(KERN_ERR PFX "Cannot find proper PCI device "
13573 "base address for APE, aborting.\n");
13574 err = -ENODEV;
13575 goto err_out_iounmap;
13576 }
13577
Matt Carlson63532392008-11-03 16:49:57 -080013578 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
Al Viro79ea13c2008-01-24 02:06:46 -080013579 if (!tp->aperegs) {
Matt Carlson0d3031d2007-10-10 18:02:43 -070013580 printk(KERN_ERR PFX "Cannot map APE registers, "
13581 "aborting.\n");
13582 err = -ENOMEM;
13583 goto err_out_iounmap;
13584 }
13585
13586 tg3_ape_lock_init(tp);
13587 }
13588
Matt Carlsonc88864d2007-11-12 21:07:01 -080013589 /*
13590 * Reset chip in case UNDI or EFI driver did not shutdown
13591 * DMA self test will enable WDMAC and we'll see (spurious)
13592 * pending DMA on the PCI bus at that point.
13593 */
13594 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13595 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13596 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13597 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13598 }
13599
13600 err = tg3_test_dma(tp);
13601 if (err) {
13602 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13603 goto err_out_apeunmap;
13604 }
13605
13606 /* Tigon3 can do ipv4 only... and some chips have buggy
13607 * checksumming.
13608 */
13609 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13610 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13611 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13612 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13613 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070013614 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13615 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsonc88864d2007-11-12 21:07:01 -080013616 dev->features |= NETIF_F_IPV6_CSUM;
13617
13618 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13619 } else
13620 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13621
13622 /* flow control autonegotiation is default behavior */
13623 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
Matt Carlson8d018622007-12-20 20:05:44 -080013624 tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
Matt Carlsonc88864d2007-11-12 21:07:01 -080013625
13626 tg3_init_coal(tp);
13627
Michael Chanc49a1562006-12-17 17:07:29 -080013628 pci_set_drvdata(pdev, dev);
13629
Linus Torvalds1da177e2005-04-16 15:20:36 -070013630 err = register_netdev(dev);
13631 if (err) {
13632 printk(KERN_ERR PFX "Cannot register net device, "
13633 "aborting.\n");
Matt Carlson0d3031d2007-10-10 18:02:43 -070013634 goto err_out_apeunmap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013635 }
13636
Matt Carlsondf59c942008-11-03 16:52:56 -080013637 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070013638 dev->name,
13639 tp->board_part_number,
13640 tp->pci_chip_rev_id,
Michael Chanf9804dd2005-09-27 12:13:10 -070013641 tg3_bus_string(tp, str),
Johannes Berge1749612008-10-27 15:59:26 -070013642 dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013643
Matt Carlsondf59c942008-11-03 16:52:56 -080013644 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
13645 printk(KERN_INFO
13646 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
13647 tp->dev->name,
13648 tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
13649 tp->mdio_bus->phy_map[PHY_ADDR]->dev.bus_id);
13650 else
13651 printk(KERN_INFO
13652 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
13653 tp->dev->name, tg3_phy_string(tp),
13654 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13655 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13656 "10/100/1000Base-T")),
13657 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
13658
13659 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070013660 dev->name,
13661 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13662 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13663 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13664 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -070013665 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
Michael Chan4a29cc22006-03-19 13:21:12 -080013666 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13667 dev->name, tp->dma_rwctrl,
13668 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13669 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
Linus Torvalds1da177e2005-04-16 15:20:36 -070013670
13671 return 0;
13672
Matt Carlson0d3031d2007-10-10 18:02:43 -070013673err_out_apeunmap:
13674 if (tp->aperegs) {
13675 iounmap(tp->aperegs);
13676 tp->aperegs = NULL;
13677 }
13678
Linus Torvalds1da177e2005-04-16 15:20:36 -070013679err_out_iounmap:
Michael Chan68929142005-08-09 20:17:14 -070013680 if (tp->regs) {
13681 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070013682 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070013683 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013684
13685err_out_free_dev:
13686 free_netdev(dev);
13687
13688err_out_free_res:
13689 pci_release_regions(pdev);
13690
13691err_out_disable_pdev:
13692 pci_disable_device(pdev);
13693 pci_set_drvdata(pdev, NULL);
13694 return err;
13695}
13696
13697static void __devexit tg3_remove_one(struct pci_dev *pdev)
13698{
13699 struct net_device *dev = pci_get_drvdata(pdev);
13700
13701 if (dev) {
13702 struct tg3 *tp = netdev_priv(dev);
13703
Michael Chan7faa0062006-02-02 17:29:28 -080013704 flush_scheduled_work();
Matt Carlson158d7ab2008-05-29 01:37:54 -070013705
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013706 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13707 tg3_phy_fini(tp);
Matt Carlson158d7ab2008-05-29 01:37:54 -070013708 tg3_mdio_fini(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013709 }
Matt Carlson158d7ab2008-05-29 01:37:54 -070013710
Linus Torvalds1da177e2005-04-16 15:20:36 -070013711 unregister_netdev(dev);
Matt Carlson0d3031d2007-10-10 18:02:43 -070013712 if (tp->aperegs) {
13713 iounmap(tp->aperegs);
13714 tp->aperegs = NULL;
13715 }
Michael Chan68929142005-08-09 20:17:14 -070013716 if (tp->regs) {
13717 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070013718 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070013719 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013720 free_netdev(dev);
13721 pci_release_regions(pdev);
13722 pci_disable_device(pdev);
13723 pci_set_drvdata(pdev, NULL);
13724 }
13725}
13726
13727static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13728{
13729 struct net_device *dev = pci_get_drvdata(pdev);
13730 struct tg3 *tp = netdev_priv(dev);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070013731 pci_power_t target_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013732 int err;
13733
Michael Chan3e0c95f2007-08-03 20:56:54 -070013734 /* PCI register 4 needs to be saved whether netif_running() or not.
13735 * MSI address and data need to be saved if using MSI and
13736 * netif_running().
13737 */
13738 pci_save_state(pdev);
13739
Linus Torvalds1da177e2005-04-16 15:20:36 -070013740 if (!netif_running(dev))
13741 return 0;
13742
Michael Chan7faa0062006-02-02 17:29:28 -080013743 flush_scheduled_work();
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013744 tg3_phy_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013745 tg3_netif_stop(tp);
13746
13747 del_timer_sync(&tp->timer);
13748
David S. Millerf47c11e2005-06-24 20:18:35 -070013749 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013750 tg3_disable_ints(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -070013751 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013752
13753 netif_device_detach(dev);
13754
David S. Millerf47c11e2005-06-24 20:18:35 -070013755 tg3_full_lock(tp, 0);
Michael Chan944d9802005-05-29 14:57:48 -070013756 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan6a9eba12005-12-13 21:08:58 -080013757 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
David S. Millerf47c11e2005-06-24 20:18:35 -070013758 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013759
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070013760 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13761
13762 err = tg3_set_power_state(tp, target_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013763 if (err) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013764 int err2;
13765
David S. Millerf47c11e2005-06-24 20:18:35 -070013766 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013767
Michael Chan6a9eba12005-12-13 21:08:58 -080013768 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013769 err2 = tg3_restart_hw(tp, 1);
13770 if (err2)
Michael Chanb9ec6c12006-07-25 16:37:27 -070013771 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013772
13773 tp->timer.expires = jiffies + tp->timer_offset;
13774 add_timer(&tp->timer);
13775
13776 netif_device_attach(dev);
13777 tg3_netif_start(tp);
13778
Michael Chanb9ec6c12006-07-25 16:37:27 -070013779out:
David S. Millerf47c11e2005-06-24 20:18:35 -070013780 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013781
13782 if (!err2)
13783 tg3_phy_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013784 }
13785
13786 return err;
13787}
13788
13789static int tg3_resume(struct pci_dev *pdev)
13790{
13791 struct net_device *dev = pci_get_drvdata(pdev);
13792 struct tg3 *tp = netdev_priv(dev);
13793 int err;
13794
Michael Chan3e0c95f2007-08-03 20:56:54 -070013795 pci_restore_state(tp->pdev);
13796
Linus Torvalds1da177e2005-04-16 15:20:36 -070013797 if (!netif_running(dev))
13798 return 0;
13799
Michael Chanbc1c7562006-03-20 17:48:03 -080013800 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013801 if (err)
13802 return err;
13803
13804 netif_device_attach(dev);
13805
David S. Millerf47c11e2005-06-24 20:18:35 -070013806 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013807
Michael Chan6a9eba12005-12-13 21:08:58 -080013808 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Michael Chanb9ec6c12006-07-25 16:37:27 -070013809 err = tg3_restart_hw(tp, 1);
13810 if (err)
13811 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013812
13813 tp->timer.expires = jiffies + tp->timer_offset;
13814 add_timer(&tp->timer);
13815
Linus Torvalds1da177e2005-04-16 15:20:36 -070013816 tg3_netif_start(tp);
13817
Michael Chanb9ec6c12006-07-25 16:37:27 -070013818out:
David S. Millerf47c11e2005-06-24 20:18:35 -070013819 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013820
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013821 if (!err)
13822 tg3_phy_start(tp);
13823
Michael Chanb9ec6c12006-07-25 16:37:27 -070013824 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013825}
13826
13827static struct pci_driver tg3_driver = {
13828 .name = DRV_MODULE_NAME,
13829 .id_table = tg3_pci_tbl,
13830 .probe = tg3_init_one,
13831 .remove = __devexit_p(tg3_remove_one),
13832 .suspend = tg3_suspend,
13833 .resume = tg3_resume
13834};
13835
13836static int __init tg3_init(void)
13837{
Jeff Garzik29917622006-08-19 17:48:59 -040013838 return pci_register_driver(&tg3_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013839}
13840
13841static void __exit tg3_cleanup(void)
13842{
13843 pci_unregister_driver(&tg3_driver);
13844}
13845
13846module_init(tg3_init);
13847module_exit(tg3_cleanup);