blob: 9bd1be48d0f52f5b61a4491299157477c380b3ca [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
Michael Chan65610fb2007-02-13 12:18:46 -08007 * Copyright (C) 2005-2007 Broadcom Corporation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Firmware is:
Michael Chan49cabf42005-06-06 15:15:17 -070010 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/compiler.h>
24#include <linux/slab.h>
25#include <linux/delay.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020026#include <linux/in.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
Matt Carlson158d7ab2008-05-29 01:37:54 -070035#include <linux/phy.h>
Matt Carlsona9daf362008-05-25 23:49:44 -070036#include <linux/brcmphy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/if_vlan.h>
38#include <linux/ip.h>
39#include <linux/tcp.h>
40#include <linux/workqueue.h>
Michael Chan61487482005-09-05 17:53:19 -070041#include <linux/prefetch.h>
Tobias Klauserf9a5f7d2005-10-29 15:09:26 +020042#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44#include <net/checksum.h>
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -030045#include <net/ip.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47#include <asm/system.h>
48#include <asm/io.h>
49#include <asm/byteorder.h>
50#include <asm/uaccess.h>
51
David S. Miller49b6e95f2007-03-29 01:38:42 -070052#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <asm/idprom.h>
David S. Miller49b6e95f2007-03-29 01:38:42 -070054#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#endif
56
Matt Carlson63532392008-11-03 16:49:57 -080057#define BAR_0 0
58#define BAR_2 2
59
Linus Torvalds1da177e2005-04-16 15:20:36 -070060#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
61#define TG3_VLAN_TAG_USED 1
62#else
63#define TG3_VLAN_TAG_USED 0
64#endif
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#define TG3_TSO_SUPPORT 1
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
68#include "tg3.h"
69
70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": "
Matt Carlson23197912008-08-15 14:11:19 -070072#define DRV_MODULE_VERSION "3.94"
73#define DRV_MODULE_RELDATE "August 14, 2008"
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0
77#define TG3_DEF_TX_MODE 0
78#define TG3_DEF_MSG_ENABLE \
79 (NETIF_MSG_DRV | \
80 NETIF_MSG_PROBE | \
81 NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | \
83 NETIF_MSG_IFDOWN | \
84 NETIF_MSG_IFUP | \
85 NETIF_MSG_RX_ERR | \
86 NETIF_MSG_TX_ERR)
87
88/* length of time before we decide the hardware is borked,
89 * and dev->tx_timeout() should be called to fix the problem
90 */
91#define TG3_TX_TIMEOUT (5 * HZ)
92
93/* hardware minimum and maximum for a single frame's data payload */
94#define TG3_MIN_MTU 60
95#define TG3_MAX_MTU(tp) \
Michael Chan0f893dc2005-07-25 12:30:38 -070096 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
98/* These numbers seem to be hard coded in the NIC firmware somehow.
99 * You can't change the ring sizes, but you can change where you place
100 * them in the NIC onboard memory.
101 */
102#define TG3_RX_RING_SIZE 512
103#define TG3_DEF_RX_RING_PENDING 200
104#define TG3_RX_JUMBO_RING_SIZE 256
105#define TG3_DEF_RX_JUMBO_RING_PENDING 100
106
107/* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
112 */
113#define TG3_RX_RCB_RING_SIZE(tp) \
114 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
115
116#define TG3_TX_RING_SIZE 512
117#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
118
119#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120 TG3_RX_RING_SIZE)
121#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 TG3_RX_JUMBO_RING_SIZE)
123#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RCB_RING_SIZE(tp))
125#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
126 TG3_TX_RING_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128
129#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
130#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
131
132/* minimum number of free TX descriptors required to wake up TX process */
Ranjit Manomohan42952232006-10-18 20:54:26 -0700133#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
135/* number of ETHTOOL_GSTATS u64's */
136#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
137
Michael Chan4cafd3f2005-05-29 14:56:34 -0700138#define TG3_NUM_TEST 6
139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140static char version[] __devinitdata =
141 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
142
143MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
144MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
145MODULE_LICENSE("GPL");
146MODULE_VERSION(DRV_MODULE_VERSION);
147
148static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
149module_param(tg3_debug, int, 0);
150MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
151
152static struct pci_device_id tg3_pci_tbl[] = {
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
Michael Chan126a3362006-09-27 16:03:07 -0700177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
Michael Chan126a3362006-09-27 16:03:07 -0700192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
Michael Chan676917d2006-12-07 00:20:22 -0800196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
Michael Chanb5d37722006-09-27 16:06:21 -0700204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
Matt Carlsond30cdd22007-10-07 23:28:35 -0700206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
Matt Carlson6c7af272007-10-21 16:12:02 -0700208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
Matt Carlson9936bcf2007-10-10 18:03:07 -0700209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
Matt Carlsonc88e6682008-11-03 16:49:18 -0800211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
Matt Carlson57e69832008-05-25 23:48:31 -0700213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700214 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
215 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
216 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
217 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
218 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
219 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
220 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
221 {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222};
223
224MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
225
Andreas Mohr50da8592006-08-14 23:54:30 -0700226static const struct {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 const char string[ETH_GSTRING_LEN];
228} ethtool_stats_keys[TG3_NUM_STATS] = {
229 { "rx_octets" },
230 { "rx_fragments" },
231 { "rx_ucast_packets" },
232 { "rx_mcast_packets" },
233 { "rx_bcast_packets" },
234 { "rx_fcs_errors" },
235 { "rx_align_errors" },
236 { "rx_xon_pause_rcvd" },
237 { "rx_xoff_pause_rcvd" },
238 { "rx_mac_ctrl_rcvd" },
239 { "rx_xoff_entered" },
240 { "rx_frame_too_long_errors" },
241 { "rx_jabbers" },
242 { "rx_undersize_packets" },
243 { "rx_in_length_errors" },
244 { "rx_out_length_errors" },
245 { "rx_64_or_less_octet_packets" },
246 { "rx_65_to_127_octet_packets" },
247 { "rx_128_to_255_octet_packets" },
248 { "rx_256_to_511_octet_packets" },
249 { "rx_512_to_1023_octet_packets" },
250 { "rx_1024_to_1522_octet_packets" },
251 { "rx_1523_to_2047_octet_packets" },
252 { "rx_2048_to_4095_octet_packets" },
253 { "rx_4096_to_8191_octet_packets" },
254 { "rx_8192_to_9022_octet_packets" },
255
256 { "tx_octets" },
257 { "tx_collisions" },
258
259 { "tx_xon_sent" },
260 { "tx_xoff_sent" },
261 { "tx_flow_control" },
262 { "tx_mac_errors" },
263 { "tx_single_collisions" },
264 { "tx_mult_collisions" },
265 { "tx_deferred" },
266 { "tx_excessive_collisions" },
267 { "tx_late_collisions" },
268 { "tx_collide_2times" },
269 { "tx_collide_3times" },
270 { "tx_collide_4times" },
271 { "tx_collide_5times" },
272 { "tx_collide_6times" },
273 { "tx_collide_7times" },
274 { "tx_collide_8times" },
275 { "tx_collide_9times" },
276 { "tx_collide_10times" },
277 { "tx_collide_11times" },
278 { "tx_collide_12times" },
279 { "tx_collide_13times" },
280 { "tx_collide_14times" },
281 { "tx_collide_15times" },
282 { "tx_ucast_packets" },
283 { "tx_mcast_packets" },
284 { "tx_bcast_packets" },
285 { "tx_carrier_sense_errors" },
286 { "tx_discards" },
287 { "tx_errors" },
288
289 { "dma_writeq_full" },
290 { "dma_write_prioq_full" },
291 { "rxbds_empty" },
292 { "rx_discards" },
293 { "rx_errors" },
294 { "rx_threshold_hit" },
295
296 { "dma_readq_full" },
297 { "dma_read_prioq_full" },
298 { "tx_comp_queue_full" },
299
300 { "ring_set_send_prod_index" },
301 { "ring_status_update" },
302 { "nic_irqs" },
303 { "nic_avoided_irqs" },
304 { "nic_tx_threshold_hit" }
305};
306
Andreas Mohr50da8592006-08-14 23:54:30 -0700307static const struct {
Michael Chan4cafd3f2005-05-29 14:56:34 -0700308 const char string[ETH_GSTRING_LEN];
309} ethtool_test_keys[TG3_NUM_TEST] = {
310 { "nvram test (online) " },
311 { "link test (online) " },
312 { "register test (offline)" },
313 { "memory test (offline)" },
314 { "loopback test (offline)" },
315 { "interrupt test (offline)" },
316};
317
Michael Chanb401e9e2005-12-19 16:27:04 -0800318static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
319{
320 writel(val, tp->regs + off);
321}
322
323static u32 tg3_read32(struct tg3 *tp, u32 off)
324{
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400325 return (readl(tp->regs + off));
Michael Chanb401e9e2005-12-19 16:27:04 -0800326}
327
Matt Carlson0d3031d2007-10-10 18:02:43 -0700328static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
329{
330 writel(val, tp->aperegs + off);
331}
332
333static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
334{
335 return (readl(tp->aperegs + off));
336}
337
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
339{
Michael Chan68929142005-08-09 20:17:14 -0700340 unsigned long flags;
341
342 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700343 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
344 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
Michael Chan68929142005-08-09 20:17:14 -0700345 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700346}
347
348static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
349{
350 writel(val, tp->regs + off);
351 readl(tp->regs + off);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352}
353
Michael Chan68929142005-08-09 20:17:14 -0700354static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
355{
356 unsigned long flags;
357 u32 val;
358
359 spin_lock_irqsave(&tp->indirect_lock, flags);
360 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
361 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
362 spin_unlock_irqrestore(&tp->indirect_lock, flags);
363 return val;
364}
365
366static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
367{
368 unsigned long flags;
369
370 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
371 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
372 TG3_64BIT_REG_LOW, val);
373 return;
374 }
375 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
376 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
377 TG3_64BIT_REG_LOW, val);
378 return;
379 }
380
381 spin_lock_irqsave(&tp->indirect_lock, flags);
382 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
383 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
384 spin_unlock_irqrestore(&tp->indirect_lock, flags);
385
386 /* In indirect mode when disabling interrupts, we also need
387 * to clear the interrupt bit in the GRC local ctrl register.
388 */
389 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
390 (val == 0x1)) {
391 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
392 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
393 }
394}
395
396static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
397{
398 unsigned long flags;
399 u32 val;
400
401 spin_lock_irqsave(&tp->indirect_lock, flags);
402 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
403 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
404 spin_unlock_irqrestore(&tp->indirect_lock, flags);
405 return val;
406}
407
Michael Chanb401e9e2005-12-19 16:27:04 -0800408/* usec_wait specifies the wait time in usec when writing to certain registers
409 * where it is unsafe to read back the register without some delay.
410 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
411 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
412 */
413static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414{
Michael Chanb401e9e2005-12-19 16:27:04 -0800415 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
416 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
417 /* Non-posted methods */
418 tp->write32(tp, off, val);
419 else {
420 /* Posted method */
421 tg3_write32(tp, off, val);
422 if (usec_wait)
423 udelay(usec_wait);
424 tp->read32(tp, off);
425 }
426 /* Wait again after the read for the posted method to guarantee that
427 * the wait time is met.
428 */
429 if (usec_wait)
430 udelay(usec_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431}
432
Michael Chan09ee9292005-08-09 20:17:00 -0700433static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
434{
435 tp->write32_mbox(tp, off, val);
Michael Chan68929142005-08-09 20:17:14 -0700436 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
437 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
438 tp->read32_mbox(tp, off);
Michael Chan09ee9292005-08-09 20:17:00 -0700439}
440
Michael Chan20094932005-08-09 20:16:32 -0700441static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442{
443 void __iomem *mbox = tp->regs + off;
444 writel(val, mbox);
445 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
446 writel(val, mbox);
447 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
448 readl(mbox);
449}
450
Michael Chanb5d37722006-09-27 16:06:21 -0700451static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
452{
453 return (readl(tp->regs + off + GRCMBOX_BASE));
454}
455
456static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
457{
458 writel(val, tp->regs + off + GRCMBOX_BASE);
459}
460
Michael Chan20094932005-08-09 20:16:32 -0700461#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700462#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
Michael Chan20094932005-08-09 20:16:32 -0700463#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
464#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700465#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
Michael Chan20094932005-08-09 20:16:32 -0700466
467#define tw32(reg,val) tp->write32(tp, reg, val)
Michael Chanb401e9e2005-12-19 16:27:04 -0800468#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
469#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
Michael Chan20094932005-08-09 20:16:32 -0700470#define tr32(reg) tp->read32(tp, reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
472static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
473{
Michael Chan68929142005-08-09 20:17:14 -0700474 unsigned long flags;
475
Michael Chanb5d37722006-09-27 16:06:21 -0700476 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
477 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
478 return;
479
Michael Chan68929142005-08-09 20:17:14 -0700480 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700481 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
482 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
483 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484
Michael Chanbbadf502006-04-06 21:46:34 -0700485 /* Always leave this as zero. */
486 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
487 } else {
488 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
489 tw32_f(TG3PCI_MEM_WIN_DATA, val);
490
491 /* Always leave this as zero. */
492 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
493 }
Michael Chan68929142005-08-09 20:17:14 -0700494 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495}
496
497static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
498{
Michael Chan68929142005-08-09 20:17:14 -0700499 unsigned long flags;
500
Michael Chanb5d37722006-09-27 16:06:21 -0700501 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
502 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
503 *val = 0;
504 return;
505 }
506
Michael Chan68929142005-08-09 20:17:14 -0700507 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700508 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
509 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
510 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511
Michael Chanbbadf502006-04-06 21:46:34 -0700512 /* Always leave this as zero. */
513 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
514 } else {
515 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
516 *val = tr32(TG3PCI_MEM_WIN_DATA);
517
518 /* Always leave this as zero. */
519 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
520 }
Michael Chan68929142005-08-09 20:17:14 -0700521 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522}
523
Matt Carlson0d3031d2007-10-10 18:02:43 -0700524static void tg3_ape_lock_init(struct tg3 *tp)
525{
526 int i;
527
528 /* Make sure the driver hasn't any stale locks. */
529 for (i = 0; i < 8; i++)
530 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
531 APE_LOCK_GRANT_DRIVER);
532}
533
534static int tg3_ape_lock(struct tg3 *tp, int locknum)
535{
536 int i, off;
537 int ret = 0;
538 u32 status;
539
540 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
541 return 0;
542
543 switch (locknum) {
Matt Carlson77b483f2008-08-15 14:07:24 -0700544 case TG3_APE_LOCK_GRC:
Matt Carlson0d3031d2007-10-10 18:02:43 -0700545 case TG3_APE_LOCK_MEM:
546 break;
547 default:
548 return -EINVAL;
549 }
550
551 off = 4 * locknum;
552
553 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
554
555 /* Wait for up to 1 millisecond to acquire lock. */
556 for (i = 0; i < 100; i++) {
557 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
558 if (status == APE_LOCK_GRANT_DRIVER)
559 break;
560 udelay(10);
561 }
562
563 if (status != APE_LOCK_GRANT_DRIVER) {
564 /* Revoke the lock request. */
565 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
566 APE_LOCK_GRANT_DRIVER);
567
568 ret = -EBUSY;
569 }
570
571 return ret;
572}
573
574static void tg3_ape_unlock(struct tg3 *tp, int locknum)
575{
576 int off;
577
578 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
579 return;
580
581 switch (locknum) {
Matt Carlson77b483f2008-08-15 14:07:24 -0700582 case TG3_APE_LOCK_GRC:
Matt Carlson0d3031d2007-10-10 18:02:43 -0700583 case TG3_APE_LOCK_MEM:
584 break;
585 default:
586 return;
587 }
588
589 off = 4 * locknum;
590 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
591}
592
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593static void tg3_disable_ints(struct tg3 *tp)
594{
595 tw32(TG3PCI_MISC_HOST_CTRL,
596 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700597 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598}
599
600static inline void tg3_cond_int(struct tg3 *tp)
601{
Michael Chan38f38432005-09-05 17:53:32 -0700602 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
603 (tp->hw_status->status & SD_STATUS_UPDATED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
Michael Chanb5d37722006-09-27 16:06:21 -0700605 else
606 tw32(HOSTCC_MODE, tp->coalesce_mode |
607 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608}
609
610static void tg3_enable_ints(struct tg3 *tp)
611{
Michael Chanbbe832c2005-06-24 20:20:04 -0700612 tp->irq_sync = 0;
613 wmb();
614
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 tw32(TG3PCI_MISC_HOST_CTRL,
616 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700617 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
618 (tp->last_tag << 24));
Michael Chanfcfa0a32006-03-20 22:28:41 -0800619 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
620 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
621 (tp->last_tag << 24));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 tg3_cond_int(tp);
623}
624
Michael Chan04237dd2005-04-25 15:17:17 -0700625static inline unsigned int tg3_has_work(struct tg3 *tp)
626{
627 struct tg3_hw_status *sblk = tp->hw_status;
628 unsigned int work_exists = 0;
629
630 /* check for phy events */
631 if (!(tp->tg3_flags &
632 (TG3_FLAG_USE_LINKCHG_REG |
633 TG3_FLAG_POLL_SERDES))) {
634 if (sblk->status & SD_STATUS_LINK_CHG)
635 work_exists = 1;
636 }
637 /* check for RX/TX work to do */
638 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
639 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
640 work_exists = 1;
641
642 return work_exists;
643}
644
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645/* tg3_restart_ints
Michael Chan04237dd2005-04-25 15:17:17 -0700646 * similar to tg3_enable_ints, but it accurately determines whether there
647 * is new work pending and can return without flushing the PIO write
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400648 * which reenables interrupts
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 */
650static void tg3_restart_ints(struct tg3 *tp)
651{
David S. Millerfac9b832005-05-18 22:46:34 -0700652 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
653 tp->last_tag << 24);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 mmiowb();
655
David S. Millerfac9b832005-05-18 22:46:34 -0700656 /* When doing tagged status, this work check is unnecessary.
657 * The last_tag we write above tells the chip which piece of
658 * work we've completed.
659 */
660 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
661 tg3_has_work(tp))
Michael Chan04237dd2005-04-25 15:17:17 -0700662 tw32(HOSTCC_MODE, tp->coalesce_mode |
663 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664}
665
666static inline void tg3_netif_stop(struct tg3 *tp)
667{
Michael Chanbbe832c2005-06-24 20:20:04 -0700668 tp->dev->trans_start = jiffies; /* prevent tx timeout */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700669 napi_disable(&tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 netif_tx_disable(tp->dev);
671}
672
673static inline void tg3_netif_start(struct tg3 *tp)
674{
675 netif_wake_queue(tp->dev);
676 /* NOTE: unconditional netif_wake_queue is only appropriate
677 * so long as all callers are assured to have free tx slots
678 * (such as after tg3_init_hw)
679 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700680 napi_enable(&tp->napi);
David S. Millerf47c11e2005-06-24 20:18:35 -0700681 tp->hw_status->status |= SD_STATUS_UPDATED;
682 tg3_enable_ints(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683}
684
685static void tg3_switch_clocks(struct tg3 *tp)
686{
687 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
688 u32 orig_clock_ctrl;
689
Matt Carlson795d01c2007-10-07 23:28:17 -0700690 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
691 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan4cf78e42005-07-25 12:29:19 -0700692 return;
693
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 orig_clock_ctrl = clock_ctrl;
695 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
696 CLOCK_CTRL_CLKRUN_OENABLE |
697 0x1f);
698 tp->pci_clock_ctrl = clock_ctrl;
699
700 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
701 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800702 tw32_wait_f(TG3PCI_CLOCK_CTRL,
703 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 }
705 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800706 tw32_wait_f(TG3PCI_CLOCK_CTRL,
707 clock_ctrl |
708 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
709 40);
710 tw32_wait_f(TG3PCI_CLOCK_CTRL,
711 clock_ctrl | (CLOCK_CTRL_ALTCLK),
712 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 }
Michael Chanb401e9e2005-12-19 16:27:04 -0800714 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715}
716
717#define PHY_BUSY_LOOPS 5000
718
719static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
720{
721 u32 frame_val;
722 unsigned int loops;
723 int ret;
724
725 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
726 tw32_f(MAC_MI_MODE,
727 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
728 udelay(80);
729 }
730
731 *val = 0x0;
732
733 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
734 MI_COM_PHY_ADDR_MASK);
735 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
736 MI_COM_REG_ADDR_MASK);
737 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400738
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 tw32_f(MAC_MI_COM, frame_val);
740
741 loops = PHY_BUSY_LOOPS;
742 while (loops != 0) {
743 udelay(10);
744 frame_val = tr32(MAC_MI_COM);
745
746 if ((frame_val & MI_COM_BUSY) == 0) {
747 udelay(5);
748 frame_val = tr32(MAC_MI_COM);
749 break;
750 }
751 loops -= 1;
752 }
753
754 ret = -EBUSY;
755 if (loops != 0) {
756 *val = frame_val & MI_COM_DATA_MASK;
757 ret = 0;
758 }
759
760 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
761 tw32_f(MAC_MI_MODE, tp->mi_mode);
762 udelay(80);
763 }
764
765 return ret;
766}
767
768static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
769{
770 u32 frame_val;
771 unsigned int loops;
772 int ret;
773
Michael Chanb5d37722006-09-27 16:06:21 -0700774 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
775 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
776 return 0;
777
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
779 tw32_f(MAC_MI_MODE,
780 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
781 udelay(80);
782 }
783
784 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
785 MI_COM_PHY_ADDR_MASK);
786 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
787 MI_COM_REG_ADDR_MASK);
788 frame_val |= (val & MI_COM_DATA_MASK);
789 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400790
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 tw32_f(MAC_MI_COM, frame_val);
792
793 loops = PHY_BUSY_LOOPS;
794 while (loops != 0) {
795 udelay(10);
796 frame_val = tr32(MAC_MI_COM);
797 if ((frame_val & MI_COM_BUSY) == 0) {
798 udelay(5);
799 frame_val = tr32(MAC_MI_COM);
800 break;
801 }
802 loops -= 1;
803 }
804
805 ret = -EBUSY;
806 if (loops != 0)
807 ret = 0;
808
809 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
810 tw32_f(MAC_MI_MODE, tp->mi_mode);
811 udelay(80);
812 }
813
814 return ret;
815}
816
Matt Carlson95e28692008-05-25 23:44:14 -0700817static int tg3_bmcr_reset(struct tg3 *tp)
818{
819 u32 phy_control;
820 int limit, err;
821
822 /* OK, reset it, and poll the BMCR_RESET bit until it
823 * clears or we time out.
824 */
825 phy_control = BMCR_RESET;
826 err = tg3_writephy(tp, MII_BMCR, phy_control);
827 if (err != 0)
828 return -EBUSY;
829
830 limit = 5000;
831 while (limit--) {
832 err = tg3_readphy(tp, MII_BMCR, &phy_control);
833 if (err != 0)
834 return -EBUSY;
835
836 if ((phy_control & BMCR_RESET) == 0) {
837 udelay(40);
838 break;
839 }
840 udelay(10);
841 }
842 if (limit <= 0)
843 return -EBUSY;
844
845 return 0;
846}
847
Matt Carlson158d7ab2008-05-29 01:37:54 -0700848static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
849{
850 struct tg3 *tp = (struct tg3 *)bp->priv;
851 u32 val;
852
853 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
854 return -EAGAIN;
855
856 if (tg3_readphy(tp, reg, &val))
857 return -EIO;
858
859 return val;
860}
861
862static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
863{
864 struct tg3 *tp = (struct tg3 *)bp->priv;
865
866 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
867 return -EAGAIN;
868
869 if (tg3_writephy(tp, reg, val))
870 return -EIO;
871
872 return 0;
873}
874
875static int tg3_mdio_reset(struct mii_bus *bp)
876{
877 return 0;
878}
879
Matt Carlsona9daf362008-05-25 23:49:44 -0700880static void tg3_mdio_config(struct tg3 *tp)
881{
882 u32 val;
883
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700884 if (tp->mdio_bus->phy_map[PHY_ADDR]->interface !=
Matt Carlsona9daf362008-05-25 23:49:44 -0700885 PHY_INTERFACE_MODE_RGMII)
886 return;
887
888 val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
889 MAC_PHYCFG1_RGMII_SND_STAT_EN);
890 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
891 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
892 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
893 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
894 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
895 }
896 tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
897
898 val = tr32(MAC_PHYCFG2) & ~(MAC_PHYCFG2_INBAND_ENABLE);
899 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
900 val |= MAC_PHYCFG2_INBAND_ENABLE;
901 tw32(MAC_PHYCFG2, val);
902
903 val = tr32(MAC_EXT_RGMII_MODE);
904 val &= ~(MAC_RGMII_MODE_RX_INT_B |
905 MAC_RGMII_MODE_RX_QUALITY |
906 MAC_RGMII_MODE_RX_ACTIVITY |
907 MAC_RGMII_MODE_RX_ENG_DET |
908 MAC_RGMII_MODE_TX_ENABLE |
909 MAC_RGMII_MODE_TX_LOWPWR |
910 MAC_RGMII_MODE_TX_RESET);
911 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
912 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
913 val |= MAC_RGMII_MODE_RX_INT_B |
914 MAC_RGMII_MODE_RX_QUALITY |
915 MAC_RGMII_MODE_RX_ACTIVITY |
916 MAC_RGMII_MODE_RX_ENG_DET;
917 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
918 val |= MAC_RGMII_MODE_TX_ENABLE |
919 MAC_RGMII_MODE_TX_LOWPWR |
920 MAC_RGMII_MODE_TX_RESET;
921 }
922 tw32(MAC_EXT_RGMII_MODE, val);
923}
924
Matt Carlson158d7ab2008-05-29 01:37:54 -0700925static void tg3_mdio_start(struct tg3 *tp)
926{
927 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700928 mutex_lock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700929 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700930 mutex_unlock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700931 }
932
933 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
934 tw32_f(MAC_MI_MODE, tp->mi_mode);
935 udelay(80);
Matt Carlsona9daf362008-05-25 23:49:44 -0700936
937 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)
938 tg3_mdio_config(tp);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700939}
940
941static void tg3_mdio_stop(struct tg3 *tp)
942{
943 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700944 mutex_lock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700945 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700946 mutex_unlock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700947 }
948}
949
950static int tg3_mdio_init(struct tg3 *tp)
951{
952 int i;
953 u32 reg;
Matt Carlsona9daf362008-05-25 23:49:44 -0700954 struct phy_device *phydev;
Matt Carlson158d7ab2008-05-29 01:37:54 -0700955
956 tg3_mdio_start(tp);
957
958 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
959 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
960 return 0;
961
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700962 tp->mdio_bus = mdiobus_alloc();
963 if (tp->mdio_bus == NULL)
964 return -ENOMEM;
Matt Carlson158d7ab2008-05-29 01:37:54 -0700965
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700966 tp->mdio_bus->name = "tg3 mdio bus";
967 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
Matt Carlson158d7ab2008-05-29 01:37:54 -0700968 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700969 tp->mdio_bus->priv = tp;
970 tp->mdio_bus->parent = &tp->pdev->dev;
971 tp->mdio_bus->read = &tg3_mdio_read;
972 tp->mdio_bus->write = &tg3_mdio_write;
973 tp->mdio_bus->reset = &tg3_mdio_reset;
974 tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
975 tp->mdio_bus->irq = &tp->mdio_irq[0];
Matt Carlson158d7ab2008-05-29 01:37:54 -0700976
977 for (i = 0; i < PHY_MAX_ADDR; i++)
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700978 tp->mdio_bus->irq[i] = PHY_POLL;
Matt Carlson158d7ab2008-05-29 01:37:54 -0700979
980 /* The bus registration will look for all the PHYs on the mdio bus.
981 * Unfortunately, it does not ensure the PHY is powered up before
982 * accessing the PHY ID registers. A chip reset is the
983 * quickest way to bring the device back to an operational state..
984 */
985 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
986 tg3_bmcr_reset(tp);
987
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700988 i = mdiobus_register(tp->mdio_bus);
Matt Carlsona9daf362008-05-25 23:49:44 -0700989 if (i) {
Matt Carlson158d7ab2008-05-29 01:37:54 -0700990 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
991 tp->dev->name, i);
Matt Carlsona9daf362008-05-25 23:49:44 -0700992 return i;
993 }
Matt Carlson158d7ab2008-05-29 01:37:54 -0700994
Matt Carlsona9daf362008-05-25 23:49:44 -0700995 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
996
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700997 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsona9daf362008-05-25 23:49:44 -0700998
999 switch (phydev->phy_id) {
1000 case TG3_PHY_ID_BCM50610:
1001 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1002 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1003 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1004 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1005 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1006 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1007 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1008 break;
1009 case TG3_PHY_ID_BCMAC131:
1010 phydev->interface = PHY_INTERFACE_MODE_MII;
1011 break;
1012 }
1013
1014 tg3_mdio_config(tp);
1015
1016 return 0;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001017}
1018
1019static void tg3_mdio_fini(struct tg3 *tp)
1020{
1021 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1022 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001023 mdiobus_unregister(tp->mdio_bus);
1024 mdiobus_free(tp->mdio_bus);
Matt Carlson158d7ab2008-05-29 01:37:54 -07001025 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1026 }
1027}
1028
Matt Carlson95e28692008-05-25 23:44:14 -07001029/* tp->lock is held. */
Matt Carlson4ba526c2008-08-15 14:10:04 -07001030static inline void tg3_generate_fw_event(struct tg3 *tp)
1031{
1032 u32 val;
1033
1034 val = tr32(GRC_RX_CPU_EVENT);
1035 val |= GRC_RX_CPU_DRIVER_EVENT;
1036 tw32_f(GRC_RX_CPU_EVENT, val);
1037
1038 tp->last_event_jiffies = jiffies;
1039}
1040
1041#define TG3_FW_EVENT_TIMEOUT_USEC 2500
1042
1043/* tp->lock is held. */
Matt Carlson95e28692008-05-25 23:44:14 -07001044static void tg3_wait_for_event_ack(struct tg3 *tp)
1045{
1046 int i;
Matt Carlson4ba526c2008-08-15 14:10:04 -07001047 unsigned int delay_cnt;
1048 long time_remain;
Matt Carlson95e28692008-05-25 23:44:14 -07001049
Matt Carlson4ba526c2008-08-15 14:10:04 -07001050 /* If enough time has passed, no wait is necessary. */
1051 time_remain = (long)(tp->last_event_jiffies + 1 +
1052 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1053 (long)jiffies;
1054 if (time_remain < 0)
1055 return;
1056
1057 /* Check if we can shorten the wait time. */
1058 delay_cnt = jiffies_to_usecs(time_remain);
1059 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1060 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1061 delay_cnt = (delay_cnt >> 3) + 1;
1062
1063 for (i = 0; i < delay_cnt; i++) {
Matt Carlson95e28692008-05-25 23:44:14 -07001064 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1065 break;
Matt Carlson4ba526c2008-08-15 14:10:04 -07001066 udelay(8);
Matt Carlson95e28692008-05-25 23:44:14 -07001067 }
1068}
1069
1070/* tp->lock is held. */
1071static void tg3_ump_link_report(struct tg3 *tp)
1072{
1073 u32 reg;
1074 u32 val;
1075
1076 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1077 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1078 return;
1079
1080 tg3_wait_for_event_ack(tp);
1081
1082 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1083
1084 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1085
1086 val = 0;
1087 if (!tg3_readphy(tp, MII_BMCR, &reg))
1088 val = reg << 16;
1089 if (!tg3_readphy(tp, MII_BMSR, &reg))
1090 val |= (reg & 0xffff);
1091 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1092
1093 val = 0;
1094 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1095 val = reg << 16;
1096 if (!tg3_readphy(tp, MII_LPA, &reg))
1097 val |= (reg & 0xffff);
1098 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1099
1100 val = 0;
1101 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1102 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1103 val = reg << 16;
1104 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1105 val |= (reg & 0xffff);
1106 }
1107 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1108
1109 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1110 val = reg << 16;
1111 else
1112 val = 0;
1113 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1114
Matt Carlson4ba526c2008-08-15 14:10:04 -07001115 tg3_generate_fw_event(tp);
Matt Carlson95e28692008-05-25 23:44:14 -07001116}
1117
1118static void tg3_link_report(struct tg3 *tp)
1119{
1120 if (!netif_carrier_ok(tp->dev)) {
1121 if (netif_msg_link(tp))
1122 printk(KERN_INFO PFX "%s: Link is down.\n",
1123 tp->dev->name);
1124 tg3_ump_link_report(tp);
1125 } else if (netif_msg_link(tp)) {
1126 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1127 tp->dev->name,
1128 (tp->link_config.active_speed == SPEED_1000 ?
1129 1000 :
1130 (tp->link_config.active_speed == SPEED_100 ?
1131 100 : 10)),
1132 (tp->link_config.active_duplex == DUPLEX_FULL ?
1133 "full" : "half"));
1134
1135 printk(KERN_INFO PFX
1136 "%s: Flow control is %s for TX and %s for RX.\n",
1137 tp->dev->name,
1138 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1139 "on" : "off",
1140 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1141 "on" : "off");
1142 tg3_ump_link_report(tp);
1143 }
1144}
1145
1146static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1147{
1148 u16 miireg;
1149
1150 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1151 miireg = ADVERTISE_PAUSE_CAP;
1152 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1153 miireg = ADVERTISE_PAUSE_ASYM;
1154 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1155 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1156 else
1157 miireg = 0;
1158
1159 return miireg;
1160}
1161
1162static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1163{
1164 u16 miireg;
1165
1166 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1167 miireg = ADVERTISE_1000XPAUSE;
1168 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1169 miireg = ADVERTISE_1000XPSE_ASYM;
1170 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1171 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1172 else
1173 miireg = 0;
1174
1175 return miireg;
1176}
1177
1178static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1179{
1180 u8 cap = 0;
1181
1182 if (lcladv & ADVERTISE_PAUSE_CAP) {
1183 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1184 if (rmtadv & LPA_PAUSE_CAP)
1185 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1186 else if (rmtadv & LPA_PAUSE_ASYM)
1187 cap = TG3_FLOW_CTRL_RX;
1188 } else {
1189 if (rmtadv & LPA_PAUSE_CAP)
1190 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1191 }
1192 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1193 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1194 cap = TG3_FLOW_CTRL_TX;
1195 }
1196
1197 return cap;
1198}
1199
1200static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1201{
1202 u8 cap = 0;
1203
1204 if (lcladv & ADVERTISE_1000XPAUSE) {
1205 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1206 if (rmtadv & LPA_1000XPAUSE)
1207 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1208 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1209 cap = TG3_FLOW_CTRL_RX;
1210 } else {
1211 if (rmtadv & LPA_1000XPAUSE)
1212 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1213 }
1214 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1215 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1216 cap = TG3_FLOW_CTRL_TX;
1217 }
1218
1219 return cap;
1220}
1221
Matt Carlsonf51f3562008-05-25 23:45:08 -07001222static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
Matt Carlson95e28692008-05-25 23:44:14 -07001223{
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001224 u8 autoneg;
Matt Carlsonf51f3562008-05-25 23:45:08 -07001225 u8 flowctrl = 0;
Matt Carlson95e28692008-05-25 23:44:14 -07001226 u32 old_rx_mode = tp->rx_mode;
1227 u32 old_tx_mode = tp->tx_mode;
1228
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001229 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001230 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001231 else
1232 autoneg = tp->link_config.autoneg;
1233
1234 if (autoneg == AUTONEG_ENABLE &&
Matt Carlson95e28692008-05-25 23:44:14 -07001235 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1236 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Matt Carlsonf51f3562008-05-25 23:45:08 -07001237 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
Matt Carlson95e28692008-05-25 23:44:14 -07001238 else
Matt Carlsonf51f3562008-05-25 23:45:08 -07001239 flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1240 } else
1241 flowctrl = tp->link_config.flowctrl;
Matt Carlson95e28692008-05-25 23:44:14 -07001242
Matt Carlsonf51f3562008-05-25 23:45:08 -07001243 tp->link_config.active_flowctrl = flowctrl;
Matt Carlson95e28692008-05-25 23:44:14 -07001244
Matt Carlsonf51f3562008-05-25 23:45:08 -07001245 if (flowctrl & TG3_FLOW_CTRL_RX)
Matt Carlson95e28692008-05-25 23:44:14 -07001246 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1247 else
1248 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1249
Matt Carlsonf51f3562008-05-25 23:45:08 -07001250 if (old_rx_mode != tp->rx_mode)
Matt Carlson95e28692008-05-25 23:44:14 -07001251 tw32_f(MAC_RX_MODE, tp->rx_mode);
Matt Carlson95e28692008-05-25 23:44:14 -07001252
Matt Carlsonf51f3562008-05-25 23:45:08 -07001253 if (flowctrl & TG3_FLOW_CTRL_TX)
Matt Carlson95e28692008-05-25 23:44:14 -07001254 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1255 else
1256 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1257
Matt Carlsonf51f3562008-05-25 23:45:08 -07001258 if (old_tx_mode != tp->tx_mode)
Matt Carlson95e28692008-05-25 23:44:14 -07001259 tw32_f(MAC_TX_MODE, tp->tx_mode);
Matt Carlson95e28692008-05-25 23:44:14 -07001260}
1261
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001262static void tg3_adjust_link(struct net_device *dev)
1263{
1264 u8 oldflowctrl, linkmesg = 0;
1265 u32 mac_mode, lcl_adv, rmt_adv;
1266 struct tg3 *tp = netdev_priv(dev);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001267 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001268
1269 spin_lock(&tp->lock);
1270
1271 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1272 MAC_MODE_HALF_DUPLEX);
1273
1274 oldflowctrl = tp->link_config.active_flowctrl;
1275
1276 if (phydev->link) {
1277 lcl_adv = 0;
1278 rmt_adv = 0;
1279
1280 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1281 mac_mode |= MAC_MODE_PORT_MODE_MII;
1282 else
1283 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1284
1285 if (phydev->duplex == DUPLEX_HALF)
1286 mac_mode |= MAC_MODE_HALF_DUPLEX;
1287 else {
1288 lcl_adv = tg3_advert_flowctrl_1000T(
1289 tp->link_config.flowctrl);
1290
1291 if (phydev->pause)
1292 rmt_adv = LPA_PAUSE_CAP;
1293 if (phydev->asym_pause)
1294 rmt_adv |= LPA_PAUSE_ASYM;
1295 }
1296
1297 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1298 } else
1299 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1300
1301 if (mac_mode != tp->mac_mode) {
1302 tp->mac_mode = mac_mode;
1303 tw32_f(MAC_MODE, tp->mac_mode);
1304 udelay(40);
1305 }
1306
1307 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1308 tw32(MAC_TX_LENGTHS,
1309 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1310 (6 << TX_LENGTHS_IPG_SHIFT) |
1311 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1312 else
1313 tw32(MAC_TX_LENGTHS,
1314 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1315 (6 << TX_LENGTHS_IPG_SHIFT) |
1316 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1317
1318 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1319 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1320 phydev->speed != tp->link_config.active_speed ||
1321 phydev->duplex != tp->link_config.active_duplex ||
1322 oldflowctrl != tp->link_config.active_flowctrl)
1323 linkmesg = 1;
1324
1325 tp->link_config.active_speed = phydev->speed;
1326 tp->link_config.active_duplex = phydev->duplex;
1327
1328 spin_unlock(&tp->lock);
1329
1330 if (linkmesg)
1331 tg3_link_report(tp);
1332}
1333
1334static int tg3_phy_init(struct tg3 *tp)
1335{
1336 struct phy_device *phydev;
1337
1338 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1339 return 0;
1340
1341 /* Bring the PHY back to a known state. */
1342 tg3_bmcr_reset(tp);
1343
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001344 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001345
1346 /* Attach the MAC to the PHY. */
Matt Carlsona9daf362008-05-25 23:49:44 -07001347 phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link,
1348 phydev->dev_flags, phydev->interface);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001349 if (IS_ERR(phydev)) {
1350 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1351 return PTR_ERR(phydev);
1352 }
1353
1354 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1355
1356 /* Mask with MAC supported features. */
1357 phydev->supported &= (PHY_GBIT_FEATURES |
1358 SUPPORTED_Pause |
1359 SUPPORTED_Asym_Pause);
1360
1361 phydev->advertising = phydev->supported;
1362
1363 printk(KERN_INFO
1364 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
1365 tp->dev->name, phydev->drv->name, phydev->dev.bus_id);
1366
1367 return 0;
1368}
1369
1370static void tg3_phy_start(struct tg3 *tp)
1371{
1372 struct phy_device *phydev;
1373
1374 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1375 return;
1376
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001377 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001378
1379 if (tp->link_config.phy_is_low_power) {
1380 tp->link_config.phy_is_low_power = 0;
1381 phydev->speed = tp->link_config.orig_speed;
1382 phydev->duplex = tp->link_config.orig_duplex;
1383 phydev->autoneg = tp->link_config.orig_autoneg;
1384 phydev->advertising = tp->link_config.orig_advertising;
1385 }
1386
1387 phy_start(phydev);
1388
1389 phy_start_aneg(phydev);
1390}
1391
1392static void tg3_phy_stop(struct tg3 *tp)
1393{
1394 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1395 return;
1396
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001397 phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001398}
1399
1400static void tg3_phy_fini(struct tg3 *tp)
1401{
1402 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001403 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001404 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1405 }
1406}
1407
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001408static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1409{
1410 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1411 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1412}
1413
Matt Carlson9ef8ca92007-07-11 19:48:29 -07001414static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1415{
1416 u32 phy;
1417
1418 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1419 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1420 return;
1421
1422 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1423 u32 ephy;
1424
1425 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1426 tg3_writephy(tp, MII_TG3_EPHY_TEST,
1427 ephy | MII_TG3_EPHY_SHADOW_EN);
1428 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1429 if (enable)
1430 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1431 else
1432 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1433 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1434 }
1435 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1436 }
1437 } else {
1438 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1439 MII_TG3_AUXCTL_SHDWSEL_MISC;
1440 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1441 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1442 if (enable)
1443 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1444 else
1445 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1446 phy |= MII_TG3_AUXCTL_MISC_WREN;
1447 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1448 }
1449 }
1450}
1451
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452static void tg3_phy_set_wirespeed(struct tg3 *tp)
1453{
1454 u32 val;
1455
1456 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1457 return;
1458
1459 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1460 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1461 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1462 (val | (1 << 15) | (1 << 4)));
1463}
1464
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001465static void tg3_phy_apply_otp(struct tg3 *tp)
1466{
1467 u32 otp, phy;
1468
1469 if (!tp->phy_otp)
1470 return;
1471
1472 otp = tp->phy_otp;
1473
1474 /* Enable SM_DSP clock and tx 6dB coding. */
1475 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1476 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1477 MII_TG3_AUXCTL_ACTL_TX_6DB;
1478 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1479
1480 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1481 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1482 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1483
1484 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1485 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1486 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1487
1488 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1489 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1490 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1491
1492 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1493 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1494
1495 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1496 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1497
1498 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1499 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1500 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1501
1502 /* Turn off SM_DSP clock. */
1503 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1504 MII_TG3_AUXCTL_ACTL_TX_6DB;
1505 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1506}
1507
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508static int tg3_wait_macro_done(struct tg3 *tp)
1509{
1510 int limit = 100;
1511
1512 while (limit--) {
1513 u32 tmp32;
1514
1515 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1516 if ((tmp32 & 0x1000) == 0)
1517 break;
1518 }
1519 }
1520 if (limit <= 0)
1521 return -EBUSY;
1522
1523 return 0;
1524}
1525
1526static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1527{
1528 static const u32 test_pat[4][6] = {
1529 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1530 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1531 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1532 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1533 };
1534 int chan;
1535
1536 for (chan = 0; chan < 4; chan++) {
1537 int i;
1538
1539 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1540 (chan * 0x2000) | 0x0200);
1541 tg3_writephy(tp, 0x16, 0x0002);
1542
1543 for (i = 0; i < 6; i++)
1544 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1545 test_pat[chan][i]);
1546
1547 tg3_writephy(tp, 0x16, 0x0202);
1548 if (tg3_wait_macro_done(tp)) {
1549 *resetp = 1;
1550 return -EBUSY;
1551 }
1552
1553 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1554 (chan * 0x2000) | 0x0200);
1555 tg3_writephy(tp, 0x16, 0x0082);
1556 if (tg3_wait_macro_done(tp)) {
1557 *resetp = 1;
1558 return -EBUSY;
1559 }
1560
1561 tg3_writephy(tp, 0x16, 0x0802);
1562 if (tg3_wait_macro_done(tp)) {
1563 *resetp = 1;
1564 return -EBUSY;
1565 }
1566
1567 for (i = 0; i < 6; i += 2) {
1568 u32 low, high;
1569
1570 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1571 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1572 tg3_wait_macro_done(tp)) {
1573 *resetp = 1;
1574 return -EBUSY;
1575 }
1576 low &= 0x7fff;
1577 high &= 0x000f;
1578 if (low != test_pat[chan][i] ||
1579 high != test_pat[chan][i+1]) {
1580 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1581 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1582 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1583
1584 return -EBUSY;
1585 }
1586 }
1587 }
1588
1589 return 0;
1590}
1591
1592static int tg3_phy_reset_chanpat(struct tg3 *tp)
1593{
1594 int chan;
1595
1596 for (chan = 0; chan < 4; chan++) {
1597 int i;
1598
1599 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1600 (chan * 0x2000) | 0x0200);
1601 tg3_writephy(tp, 0x16, 0x0002);
1602 for (i = 0; i < 6; i++)
1603 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1604 tg3_writephy(tp, 0x16, 0x0202);
1605 if (tg3_wait_macro_done(tp))
1606 return -EBUSY;
1607 }
1608
1609 return 0;
1610}
1611
1612static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1613{
1614 u32 reg32, phy9_orig;
1615 int retries, do_phy_reset, err;
1616
1617 retries = 10;
1618 do_phy_reset = 1;
1619 do {
1620 if (do_phy_reset) {
1621 err = tg3_bmcr_reset(tp);
1622 if (err)
1623 return err;
1624 do_phy_reset = 0;
1625 }
1626
1627 /* Disable transmitter and interrupt. */
1628 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1629 continue;
1630
1631 reg32 |= 0x3000;
1632 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1633
1634 /* Set full-duplex, 1000 mbps. */
1635 tg3_writephy(tp, MII_BMCR,
1636 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1637
1638 /* Set to master mode. */
1639 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1640 continue;
1641
1642 tg3_writephy(tp, MII_TG3_CTRL,
1643 (MII_TG3_CTRL_AS_MASTER |
1644 MII_TG3_CTRL_ENABLE_AS_MASTER));
1645
1646 /* Enable SM_DSP_CLOCK and 6dB. */
1647 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1648
1649 /* Block the PHY control access. */
1650 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1651 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1652
1653 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1654 if (!err)
1655 break;
1656 } while (--retries);
1657
1658 err = tg3_phy_reset_chanpat(tp);
1659 if (err)
1660 return err;
1661
1662 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1663 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1664
1665 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1666 tg3_writephy(tp, 0x16, 0x0000);
1667
1668 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1669 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1670 /* Set Extended packet length bit for jumbo frames */
1671 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1672 }
1673 else {
1674 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1675 }
1676
1677 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1678
1679 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1680 reg32 &= ~0x3000;
1681 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1682 } else if (!err)
1683 err = -EBUSY;
1684
1685 return err;
1686}
1687
1688/* This will reset the tigon3 PHY if there is no valid
1689 * link unless the FORCE argument is non-zero.
1690 */
1691static int tg3_phy_reset(struct tg3 *tp)
1692{
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001693 u32 cpmuctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694 u32 phy_status;
1695 int err;
1696
Michael Chan60189dd2006-12-17 17:08:07 -08001697 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1698 u32 val;
1699
1700 val = tr32(GRC_MISC_CFG);
1701 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1702 udelay(40);
1703 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1705 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1706 if (err != 0)
1707 return -EBUSY;
1708
Michael Chanc8e1e822006-04-29 18:55:17 -07001709 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1710 netif_carrier_off(tp->dev);
1711 tg3_link_report(tp);
1712 }
1713
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1715 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1716 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1717 err = tg3_phy_reset_5703_4_5(tp);
1718 if (err)
1719 return err;
1720 goto out;
1721 }
1722
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001723 cpmuctrl = 0;
1724 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1725 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1726 cpmuctrl = tr32(TG3_CPMU_CTRL);
1727 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1728 tw32(TG3_CPMU_CTRL,
1729 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1730 }
1731
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 err = tg3_bmcr_reset(tp);
1733 if (err)
1734 return err;
1735
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001736 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1737 u32 phy;
1738
1739 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1740 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1741
1742 tw32(TG3_CPMU_CTRL, cpmuctrl);
1743 }
1744
Matt Carlsonb5af7122007-11-12 21:22:02 -08001745 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
Matt Carlsonce057f02007-11-12 21:08:03 -08001746 u32 val;
1747
1748 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1749 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1750 CPMU_LSPD_1000MB_MACCLK_12_5) {
1751 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1752 udelay(40);
1753 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1754 }
Matt Carlson662f38d2007-11-12 21:16:17 -08001755
1756 /* Disable GPHY autopowerdown. */
1757 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1758 MII_TG3_MISC_SHDW_WREN |
1759 MII_TG3_MISC_SHDW_APD_SEL |
1760 MII_TG3_MISC_SHDW_APD_WKTM_84MS);
Matt Carlsonce057f02007-11-12 21:08:03 -08001761 }
1762
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001763 tg3_phy_apply_otp(tp);
1764
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765out:
1766 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1767 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1768 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1769 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1770 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1771 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1772 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1773 }
1774 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1775 tg3_writephy(tp, 0x1c, 0x8d68);
1776 tg3_writephy(tp, 0x1c, 0x8d68);
1777 }
1778 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1779 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1780 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1781 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1782 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1783 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1784 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1785 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1786 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1787 }
Michael Chanc424cb22006-04-29 18:56:34 -07001788 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1789 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1790 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
Michael Chanc1d2a192007-01-08 19:57:20 -08001791 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1792 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1793 tg3_writephy(tp, MII_TG3_TEST1,
1794 MII_TG3_TEST1_TRIM_EN | 0x4);
1795 } else
1796 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
Michael Chanc424cb22006-04-29 18:56:34 -07001797 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1798 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 /* Set Extended packet length bit (bit 14) on all chips that */
1800 /* support jumbo frames */
1801 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1802 /* Cannot do read-modify-write on 5401 */
1803 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
Michael Chan0f893dc2005-07-25 12:30:38 -07001804 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 u32 phy_reg;
1806
1807 /* Set bit 14 with read-modify-write to preserve other bits */
1808 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1809 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1810 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1811 }
1812
1813 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1814 * jumbo frames transmission.
1815 */
Michael Chan0f893dc2005-07-25 12:30:38 -07001816 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817 u32 phy_reg;
1818
1819 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1820 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1821 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1822 }
1823
Michael Chan715116a2006-09-27 16:09:25 -07001824 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan715116a2006-09-27 16:09:25 -07001825 /* adjust output voltage */
1826 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
Michael Chan715116a2006-09-27 16:09:25 -07001827 }
1828
Matt Carlson9ef8ca92007-07-11 19:48:29 -07001829 tg3_phy_toggle_automdix(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830 tg3_phy_set_wirespeed(tp);
1831 return 0;
1832}
1833
1834static void tg3_frob_aux_power(struct tg3 *tp)
1835{
1836 struct tg3 *tp_peer = tp;
1837
Michael Chan9d26e212006-12-07 00:21:14 -08001838 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839 return;
1840
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001841 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1842 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1843 struct net_device *dev_peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001845 dev_peer = pci_get_drvdata(tp->pdev_peer);
Michael Chanbc1c7562006-03-20 17:48:03 -08001846 /* remove_one() may have been run on the peer. */
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001847 if (!dev_peer)
Michael Chanbc1c7562006-03-20 17:48:03 -08001848 tp_peer = tp;
1849 else
1850 tp_peer = netdev_priv(dev_peer);
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001851 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852
1853 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
Michael Chan6921d202005-12-13 21:15:53 -08001854 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1855 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1856 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1858 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
Michael Chanb401e9e2005-12-19 16:27:04 -08001859 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1860 (GRC_LCLCTRL_GPIO_OE0 |
1861 GRC_LCLCTRL_GPIO_OE1 |
1862 GRC_LCLCTRL_GPIO_OE2 |
1863 GRC_LCLCTRL_GPIO_OUTPUT0 |
1864 GRC_LCLCTRL_GPIO_OUTPUT1),
1865 100);
Matt Carlson5f0c4a32008-06-09 15:41:12 -07001866 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1867 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1868 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1869 GRC_LCLCTRL_GPIO_OE1 |
1870 GRC_LCLCTRL_GPIO_OE2 |
1871 GRC_LCLCTRL_GPIO_OUTPUT0 |
1872 GRC_LCLCTRL_GPIO_OUTPUT1 |
1873 tp->grc_local_ctrl;
1874 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1875
1876 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1877 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1878
1879 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1880 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 } else {
1882 u32 no_gpio2;
Michael Chandc56b7d2005-12-19 16:26:28 -08001883 u32 grc_local_ctrl = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884
1885 if (tp_peer != tp &&
1886 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1887 return;
1888
Michael Chandc56b7d2005-12-19 16:26:28 -08001889 /* Workaround to prevent overdrawing Amps. */
1890 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1891 ASIC_REV_5714) {
1892 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chanb401e9e2005-12-19 16:27:04 -08001893 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1894 grc_local_ctrl, 100);
Michael Chandc56b7d2005-12-19 16:26:28 -08001895 }
1896
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897 /* On 5753 and variants, GPIO2 cannot be used. */
1898 no_gpio2 = tp->nic_sram_data_cfg &
1899 NIC_SRAM_DATA_CFG_NO_GPIO2;
1900
Michael Chandc56b7d2005-12-19 16:26:28 -08001901 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 GRC_LCLCTRL_GPIO_OE1 |
1903 GRC_LCLCTRL_GPIO_OE2 |
1904 GRC_LCLCTRL_GPIO_OUTPUT1 |
1905 GRC_LCLCTRL_GPIO_OUTPUT2;
1906 if (no_gpio2) {
1907 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1908 GRC_LCLCTRL_GPIO_OUTPUT2);
1909 }
Michael Chanb401e9e2005-12-19 16:27:04 -08001910 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1911 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912
1913 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1914
Michael Chanb401e9e2005-12-19 16:27:04 -08001915 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1916 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917
1918 if (!no_gpio2) {
1919 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chanb401e9e2005-12-19 16:27:04 -08001920 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1921 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 }
1923 }
1924 } else {
1925 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1926 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1927 if (tp_peer != tp &&
1928 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1929 return;
1930
Michael Chanb401e9e2005-12-19 16:27:04 -08001931 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1932 (GRC_LCLCTRL_GPIO_OE1 |
1933 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934
Michael Chanb401e9e2005-12-19 16:27:04 -08001935 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1936 GRC_LCLCTRL_GPIO_OE1, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937
Michael Chanb401e9e2005-12-19 16:27:04 -08001938 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1939 (GRC_LCLCTRL_GPIO_OE1 |
1940 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 }
1942 }
1943}
1944
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07001945static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1946{
1947 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1948 return 1;
1949 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1950 if (speed != SPEED_10)
1951 return 1;
1952 } else if (speed == SPEED_10)
1953 return 1;
1954
1955 return 0;
1956}
1957
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958static int tg3_setup_phy(struct tg3 *, int);
1959
1960#define RESET_KIND_SHUTDOWN 0
1961#define RESET_KIND_INIT 1
1962#define RESET_KIND_SUSPEND 2
1963
1964static void tg3_write_sig_post_reset(struct tg3 *, int);
1965static int tg3_halt_cpu(struct tg3 *, u32);
Michael Chan6921d202005-12-13 21:15:53 -08001966static int tg3_nvram_lock(struct tg3 *);
1967static void tg3_nvram_unlock(struct tg3 *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968
Michael Chan15c3b692006-03-22 01:06:52 -08001969static void tg3_power_down_phy(struct tg3 *tp)
1970{
Matt Carlsonce057f02007-11-12 21:08:03 -08001971 u32 val;
1972
Michael Chan51297242007-02-13 12:17:57 -08001973 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1974 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1975 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1976 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1977
1978 sg_dig_ctrl |=
1979 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1980 tw32(SG_DIG_CTRL, sg_dig_ctrl);
1981 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1982 }
Michael Chan3f7045c2006-09-27 16:02:29 -07001983 return;
Michael Chan51297242007-02-13 12:17:57 -08001984 }
Michael Chan3f7045c2006-09-27 16:02:29 -07001985
Michael Chan60189dd2006-12-17 17:08:07 -08001986 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan60189dd2006-12-17 17:08:07 -08001987 tg3_bmcr_reset(tp);
1988 val = tr32(GRC_MISC_CFG);
1989 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1990 udelay(40);
1991 return;
Matt Carlsondd477002008-05-25 23:45:58 -07001992 } else if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
Michael Chan715116a2006-09-27 16:09:25 -07001993 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1994 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1995 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1996 }
Michael Chan3f7045c2006-09-27 16:02:29 -07001997
Michael Chan15c3b692006-03-22 01:06:52 -08001998 /* The PHY should not be powered down on some chips because
1999 * of bugs.
2000 */
2001 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2002 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2003 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2004 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2005 return;
Matt Carlsonce057f02007-11-12 21:08:03 -08002006
Matt Carlsonb5af7122007-11-12 21:22:02 -08002007 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
Matt Carlsonce057f02007-11-12 21:08:03 -08002008 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2009 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2010 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2011 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2012 }
2013
Michael Chan15c3b692006-03-22 01:06:52 -08002014 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2015}
2016
Matt Carlson3f007892008-11-03 16:51:36 -08002017/* tp->lock is held. */
2018static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2019{
2020 u32 addr_high, addr_low;
2021 int i;
2022
2023 addr_high = ((tp->dev->dev_addr[0] << 8) |
2024 tp->dev->dev_addr[1]);
2025 addr_low = ((tp->dev->dev_addr[2] << 24) |
2026 (tp->dev->dev_addr[3] << 16) |
2027 (tp->dev->dev_addr[4] << 8) |
2028 (tp->dev->dev_addr[5] << 0));
2029 for (i = 0; i < 4; i++) {
2030 if (i == 1 && skip_mac_1)
2031 continue;
2032 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2033 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2034 }
2035
2036 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2037 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2038 for (i = 0; i < 12; i++) {
2039 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2040 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2041 }
2042 }
2043
2044 addr_high = (tp->dev->dev_addr[0] +
2045 tp->dev->dev_addr[1] +
2046 tp->dev->dev_addr[2] +
2047 tp->dev->dev_addr[3] +
2048 tp->dev->dev_addr[4] +
2049 tp->dev->dev_addr[5]) &
2050 TX_BACKOFF_SEED_MASK;
2051 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2052}
2053
Michael Chanbc1c7562006-03-20 17:48:03 -08002054static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055{
2056 u32 misc_host_ctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057
2058 /* Make sure register accesses (indirect or otherwise)
2059 * will function correctly.
2060 */
2061 pci_write_config_dword(tp->pdev,
2062 TG3PCI_MISC_HOST_CTRL,
2063 tp->misc_host_ctrl);
2064
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065 switch (state) {
Michael Chanbc1c7562006-03-20 17:48:03 -08002066 case PCI_D0:
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002067 pci_enable_wake(tp->pdev, state, false);
2068 pci_set_power_state(tp->pdev, PCI_D0);
Michael Chan8c6bda12005-04-21 17:09:08 -07002069
Michael Chan9d26e212006-12-07 00:21:14 -08002070 /* Switch out of Vaux if it is a NIC */
2071 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
Michael Chanb401e9e2005-12-19 16:27:04 -08002072 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073
2074 return 0;
2075
Michael Chanbc1c7562006-03-20 17:48:03 -08002076 case PCI_D1:
Michael Chanbc1c7562006-03-20 17:48:03 -08002077 case PCI_D2:
Michael Chanbc1c7562006-03-20 17:48:03 -08002078 case PCI_D3hot:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 break;
2080
2081 default:
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002082 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2083 tp->dev->name, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084 return -EINVAL;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002085 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2087 tw32(TG3PCI_MISC_HOST_CTRL,
2088 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2089
Matt Carlsondd477002008-05-25 23:45:58 -07002090 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002091 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2092 !tp->link_config.phy_is_low_power) {
2093 struct phy_device *phydev;
2094 u32 advertising;
2095
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07002096 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002097
2098 tp->link_config.phy_is_low_power = 1;
2099
2100 tp->link_config.orig_speed = phydev->speed;
2101 tp->link_config.orig_duplex = phydev->duplex;
2102 tp->link_config.orig_autoneg = phydev->autoneg;
2103 tp->link_config.orig_advertising = phydev->advertising;
2104
2105 advertising = ADVERTISED_TP |
2106 ADVERTISED_Pause |
2107 ADVERTISED_Autoneg |
2108 ADVERTISED_10baseT_Half;
2109
2110 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2111 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
2112 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2113 advertising |=
2114 ADVERTISED_100baseT_Half |
2115 ADVERTISED_100baseT_Full |
2116 ADVERTISED_10baseT_Full;
2117 else
2118 advertising |= ADVERTISED_10baseT_Full;
2119 }
2120
2121 phydev->advertising = advertising;
2122
2123 phy_start_aneg(phydev);
2124 }
Matt Carlsondd477002008-05-25 23:45:58 -07002125 } else {
2126 if (tp->link_config.phy_is_low_power == 0) {
2127 tp->link_config.phy_is_low_power = 1;
2128 tp->link_config.orig_speed = tp->link_config.speed;
2129 tp->link_config.orig_duplex = tp->link_config.duplex;
2130 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2131 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132
Matt Carlsondd477002008-05-25 23:45:58 -07002133 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2134 tp->link_config.speed = SPEED_10;
2135 tp->link_config.duplex = DUPLEX_HALF;
2136 tp->link_config.autoneg = AUTONEG_ENABLE;
2137 tg3_setup_phy(tp, 0);
2138 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 }
2140
Matt Carlson3f007892008-11-03 16:51:36 -08002141 __tg3_set_mac_addr(tp, 0);
2142
Michael Chanb5d37722006-09-27 16:06:21 -07002143 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2144 u32 val;
2145
2146 val = tr32(GRC_VCPU_EXT_CTRL);
2147 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2148 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chan6921d202005-12-13 21:15:53 -08002149 int i;
2150 u32 val;
2151
2152 for (i = 0; i < 200; i++) {
2153 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2154 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2155 break;
2156 msleep(1);
2157 }
2158 }
Gary Zambranoa85feb82007-05-05 11:52:19 -07002159 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2160 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2161 WOL_DRV_STATE_SHUTDOWN |
2162 WOL_DRV_WOL |
2163 WOL_SET_MAGIC_PKT);
Michael Chan6921d202005-12-13 21:15:53 -08002164
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
2166 u32 mac_mode;
2167
2168 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
Matt Carlsondd477002008-05-25 23:45:58 -07002169 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
2170 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2171 udelay(40);
2172 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173
Michael Chan3f7045c2006-09-27 16:02:29 -07002174 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2175 mac_mode = MAC_MODE_PORT_MODE_GMII;
2176 else
2177 mac_mode = MAC_MODE_PORT_MODE_MII;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002179 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2180 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2181 ASIC_REV_5700) {
2182 u32 speed = (tp->tg3_flags &
2183 TG3_FLAG_WOL_SPEED_100MB) ?
2184 SPEED_100 : SPEED_10;
2185 if (tg3_5700_link_polarity(tp, speed))
2186 mac_mode |= MAC_MODE_LINK_POLARITY;
2187 else
2188 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2189 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 } else {
2191 mac_mode = MAC_MODE_PORT_MODE_TBI;
2192 }
2193
John W. Linvillecbf46852005-04-21 17:01:29 -07002194 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195 tw32(MAC_LED_CTRL, tp->led_ctrl);
2196
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002197 if (pci_pme_capable(tp->pdev, state) &&
Matt Carlsonb2aee152008-11-03 16:51:11 -08002198 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
Matt Carlsonb2aee152008-11-03 16:51:11 -08002200 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2201 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2202 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2203 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2204 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2205 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206
Matt Carlson3bda1252008-08-15 14:08:22 -07002207 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2208 mac_mode |= tp->mac_mode &
2209 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2210 if (mac_mode & MAC_MODE_APE_TX_EN)
2211 mac_mode |= MAC_MODE_TDE_ENABLE;
2212 }
2213
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214 tw32_f(MAC_MODE, mac_mode);
2215 udelay(100);
2216
2217 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2218 udelay(10);
2219 }
2220
2221 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2222 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2223 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2224 u32 base_val;
2225
2226 base_val = tp->pci_clock_ctrl;
2227 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2228 CLOCK_CTRL_TXCLK_DISABLE);
2229
Michael Chanb401e9e2005-12-19 16:27:04 -08002230 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2231 CLOCK_CTRL_PWRDOWN_PLL133, 40);
Michael Chand7b0a852007-02-13 12:17:38 -08002232 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
Matt Carlson795d01c2007-10-07 23:28:17 -07002233 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
Michael Chand7b0a852007-02-13 12:17:38 -08002234 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
Michael Chan4cf78e42005-07-25 12:29:19 -07002235 /* do nothing */
Michael Chan85e94ce2005-04-21 17:05:28 -07002236 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2238 u32 newbits1, newbits2;
2239
2240 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2241 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2242 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2243 CLOCK_CTRL_TXCLK_DISABLE |
2244 CLOCK_CTRL_ALTCLK);
2245 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2246 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2247 newbits1 = CLOCK_CTRL_625_CORE;
2248 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2249 } else {
2250 newbits1 = CLOCK_CTRL_ALTCLK;
2251 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2252 }
2253
Michael Chanb401e9e2005-12-19 16:27:04 -08002254 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2255 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256
Michael Chanb401e9e2005-12-19 16:27:04 -08002257 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2258 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259
2260 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2261 u32 newbits3;
2262
2263 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2264 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2265 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2266 CLOCK_CTRL_TXCLK_DISABLE |
2267 CLOCK_CTRL_44MHZ_CORE);
2268 } else {
2269 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2270 }
2271
Michael Chanb401e9e2005-12-19 16:27:04 -08002272 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2273 tp->pci_clock_ctrl | newbits3, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 }
2275 }
2276
Michael Chan6921d202005-12-13 21:15:53 -08002277 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
Matt Carlson0d3031d2007-10-10 18:02:43 -07002278 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
2279 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
Michael Chan3f7045c2006-09-27 16:02:29 -07002280 tg3_power_down_phy(tp);
Michael Chan6921d202005-12-13 21:15:53 -08002281
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 tg3_frob_aux_power(tp);
2283
2284 /* Workaround for unstable PLL clock */
2285 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2286 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2287 u32 val = tr32(0x7d00);
2288
2289 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2290 tw32(0x7d00, val);
Michael Chan6921d202005-12-13 21:15:53 -08002291 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chanec41c7d2006-01-17 02:40:55 -08002292 int err;
2293
2294 err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 tg3_halt_cpu(tp, RX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -08002296 if (!err)
2297 tg3_nvram_unlock(tp);
Michael Chan6921d202005-12-13 21:15:53 -08002298 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 }
2300
Michael Chanbbadf502006-04-06 21:46:34 -07002301 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2302
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002303 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
2304 pci_enable_wake(tp->pdev, state, true);
2305
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306 /* Finally, set the new power state. */
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002307 pci_set_power_state(tp->pdev, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309 return 0;
2310}
2311
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2313{
2314 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2315 case MII_TG3_AUX_STAT_10HALF:
2316 *speed = SPEED_10;
2317 *duplex = DUPLEX_HALF;
2318 break;
2319
2320 case MII_TG3_AUX_STAT_10FULL:
2321 *speed = SPEED_10;
2322 *duplex = DUPLEX_FULL;
2323 break;
2324
2325 case MII_TG3_AUX_STAT_100HALF:
2326 *speed = SPEED_100;
2327 *duplex = DUPLEX_HALF;
2328 break;
2329
2330 case MII_TG3_AUX_STAT_100FULL:
2331 *speed = SPEED_100;
2332 *duplex = DUPLEX_FULL;
2333 break;
2334
2335 case MII_TG3_AUX_STAT_1000HALF:
2336 *speed = SPEED_1000;
2337 *duplex = DUPLEX_HALF;
2338 break;
2339
2340 case MII_TG3_AUX_STAT_1000FULL:
2341 *speed = SPEED_1000;
2342 *duplex = DUPLEX_FULL;
2343 break;
2344
2345 default:
Michael Chan715116a2006-09-27 16:09:25 -07002346 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2347 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2348 SPEED_10;
2349 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2350 DUPLEX_HALF;
2351 break;
2352 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353 *speed = SPEED_INVALID;
2354 *duplex = DUPLEX_INVALID;
2355 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002356 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357}
2358
2359static void tg3_phy_copper_begin(struct tg3 *tp)
2360{
2361 u32 new_adv;
2362 int i;
2363
2364 if (tp->link_config.phy_is_low_power) {
2365 /* Entering low power mode. Disable gigabit and
2366 * 100baseT advertisements.
2367 */
2368 tg3_writephy(tp, MII_TG3_CTRL, 0);
2369
2370 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2371 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2372 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2373 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2374
2375 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2376 } else if (tp->link_config.speed == SPEED_INVALID) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2378 tp->link_config.advertising &=
2379 ~(ADVERTISED_1000baseT_Half |
2380 ADVERTISED_1000baseT_Full);
2381
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002382 new_adv = ADVERTISE_CSMA;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2384 new_adv |= ADVERTISE_10HALF;
2385 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2386 new_adv |= ADVERTISE_10FULL;
2387 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2388 new_adv |= ADVERTISE_100HALF;
2389 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2390 new_adv |= ADVERTISE_100FULL;
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002391
2392 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2393
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2395
2396 if (tp->link_config.advertising &
2397 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2398 new_adv = 0;
2399 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2400 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2401 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2402 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2403 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2404 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2405 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2406 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2407 MII_TG3_CTRL_ENABLE_AS_MASTER);
2408 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2409 } else {
2410 tg3_writephy(tp, MII_TG3_CTRL, 0);
2411 }
2412 } else {
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002413 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2414 new_adv |= ADVERTISE_CSMA;
2415
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 /* Asking for a specific link mode. */
2417 if (tp->link_config.speed == SPEED_1000) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2419
2420 if (tp->link_config.duplex == DUPLEX_FULL)
2421 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2422 else
2423 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2424 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2425 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2426 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2427 MII_TG3_CTRL_ENABLE_AS_MASTER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 if (tp->link_config.speed == SPEED_100) {
2430 if (tp->link_config.duplex == DUPLEX_FULL)
2431 new_adv |= ADVERTISE_100FULL;
2432 else
2433 new_adv |= ADVERTISE_100HALF;
2434 } else {
2435 if (tp->link_config.duplex == DUPLEX_FULL)
2436 new_adv |= ADVERTISE_10FULL;
2437 else
2438 new_adv |= ADVERTISE_10HALF;
2439 }
2440 tg3_writephy(tp, MII_ADVERTISE, new_adv);
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002441
2442 new_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443 }
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002444
2445 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 }
2447
2448 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2449 tp->link_config.speed != SPEED_INVALID) {
2450 u32 bmcr, orig_bmcr;
2451
2452 tp->link_config.active_speed = tp->link_config.speed;
2453 tp->link_config.active_duplex = tp->link_config.duplex;
2454
2455 bmcr = 0;
2456 switch (tp->link_config.speed) {
2457 default:
2458 case SPEED_10:
2459 break;
2460
2461 case SPEED_100:
2462 bmcr |= BMCR_SPEED100;
2463 break;
2464
2465 case SPEED_1000:
2466 bmcr |= TG3_BMCR_SPEED1000;
2467 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002468 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469
2470 if (tp->link_config.duplex == DUPLEX_FULL)
2471 bmcr |= BMCR_FULLDPLX;
2472
2473 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2474 (bmcr != orig_bmcr)) {
2475 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2476 for (i = 0; i < 1500; i++) {
2477 u32 tmp;
2478
2479 udelay(10);
2480 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2481 tg3_readphy(tp, MII_BMSR, &tmp))
2482 continue;
2483 if (!(tmp & BMSR_LSTATUS)) {
2484 udelay(40);
2485 break;
2486 }
2487 }
2488 tg3_writephy(tp, MII_BMCR, bmcr);
2489 udelay(40);
2490 }
2491 } else {
2492 tg3_writephy(tp, MII_BMCR,
2493 BMCR_ANENABLE | BMCR_ANRESTART);
2494 }
2495}
2496
2497static int tg3_init_5401phy_dsp(struct tg3 *tp)
2498{
2499 int err;
2500
2501 /* Turn off tap power management. */
2502 /* Set Extended packet length bit */
2503 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2504
2505 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2506 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2507
2508 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2509 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2510
2511 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2512 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2513
2514 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2515 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2516
2517 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2518 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2519
2520 udelay(40);
2521
2522 return err;
2523}
2524
Michael Chan3600d912006-12-07 00:21:48 -08002525static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526{
Michael Chan3600d912006-12-07 00:21:48 -08002527 u32 adv_reg, all_mask = 0;
2528
2529 if (mask & ADVERTISED_10baseT_Half)
2530 all_mask |= ADVERTISE_10HALF;
2531 if (mask & ADVERTISED_10baseT_Full)
2532 all_mask |= ADVERTISE_10FULL;
2533 if (mask & ADVERTISED_100baseT_Half)
2534 all_mask |= ADVERTISE_100HALF;
2535 if (mask & ADVERTISED_100baseT_Full)
2536 all_mask |= ADVERTISE_100FULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537
2538 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2539 return 0;
2540
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541 if ((adv_reg & all_mask) != all_mask)
2542 return 0;
2543 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2544 u32 tg3_ctrl;
2545
Michael Chan3600d912006-12-07 00:21:48 -08002546 all_mask = 0;
2547 if (mask & ADVERTISED_1000baseT_Half)
2548 all_mask |= ADVERTISE_1000HALF;
2549 if (mask & ADVERTISED_1000baseT_Full)
2550 all_mask |= ADVERTISE_1000FULL;
2551
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2553 return 0;
2554
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555 if ((tg3_ctrl & all_mask) != all_mask)
2556 return 0;
2557 }
2558 return 1;
2559}
2560
Matt Carlsonef167e22007-12-20 20:10:01 -08002561static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2562{
2563 u32 curadv, reqadv;
2564
2565 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2566 return 1;
2567
2568 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2569 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2570
2571 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2572 if (curadv != reqadv)
2573 return 0;
2574
2575 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2576 tg3_readphy(tp, MII_LPA, rmtadv);
2577 } else {
2578 /* Reprogram the advertisement register, even if it
2579 * does not affect the current link. If the link
2580 * gets renegotiated in the future, we can save an
2581 * additional renegotiation cycle by advertising
2582 * it correctly in the first place.
2583 */
2584 if (curadv != reqadv) {
2585 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2586 ADVERTISE_PAUSE_ASYM);
2587 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2588 }
2589 }
2590
2591 return 1;
2592}
2593
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2595{
2596 int current_link_up;
2597 u32 bmsr, dummy;
Matt Carlsonef167e22007-12-20 20:10:01 -08002598 u32 lcl_adv, rmt_adv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599 u16 current_speed;
2600 u8 current_duplex;
2601 int i, err;
2602
2603 tw32(MAC_EVENT, 0);
2604
2605 tw32_f(MAC_STATUS,
2606 (MAC_STATUS_SYNC_CHANGED |
2607 MAC_STATUS_CFG_CHANGED |
2608 MAC_STATUS_MI_COMPLETION |
2609 MAC_STATUS_LNKSTATE_CHANGED));
2610 udelay(40);
2611
Matt Carlson8ef21422008-05-02 16:47:53 -07002612 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2613 tw32_f(MAC_MI_MODE,
2614 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2615 udelay(80);
2616 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617
2618 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2619
2620 /* Some third-party PHYs need to be reset on link going
2621 * down.
2622 */
2623 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2624 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2625 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2626 netif_carrier_ok(tp->dev)) {
2627 tg3_readphy(tp, MII_BMSR, &bmsr);
2628 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2629 !(bmsr & BMSR_LSTATUS))
2630 force_reset = 1;
2631 }
2632 if (force_reset)
2633 tg3_phy_reset(tp);
2634
2635 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2636 tg3_readphy(tp, MII_BMSR, &bmsr);
2637 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2638 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2639 bmsr = 0;
2640
2641 if (!(bmsr & BMSR_LSTATUS)) {
2642 err = tg3_init_5401phy_dsp(tp);
2643 if (err)
2644 return err;
2645
2646 tg3_readphy(tp, MII_BMSR, &bmsr);
2647 for (i = 0; i < 1000; i++) {
2648 udelay(10);
2649 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2650 (bmsr & BMSR_LSTATUS)) {
2651 udelay(40);
2652 break;
2653 }
2654 }
2655
2656 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2657 !(bmsr & BMSR_LSTATUS) &&
2658 tp->link_config.active_speed == SPEED_1000) {
2659 err = tg3_phy_reset(tp);
2660 if (!err)
2661 err = tg3_init_5401phy_dsp(tp);
2662 if (err)
2663 return err;
2664 }
2665 }
2666 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2667 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2668 /* 5701 {A0,B0} CRC bug workaround */
2669 tg3_writephy(tp, 0x15, 0x0a75);
2670 tg3_writephy(tp, 0x1c, 0x8c68);
2671 tg3_writephy(tp, 0x1c, 0x8d68);
2672 tg3_writephy(tp, 0x1c, 0x8c68);
2673 }
2674
2675 /* Clear pending interrupts... */
2676 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2677 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2678
2679 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2680 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
Michael Chan715116a2006-09-27 16:09:25 -07002681 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2683
2684 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2685 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2686 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2687 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2688 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2689 else
2690 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2691 }
2692
2693 current_link_up = 0;
2694 current_speed = SPEED_INVALID;
2695 current_duplex = DUPLEX_INVALID;
2696
2697 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2698 u32 val;
2699
2700 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2701 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2702 if (!(val & (1 << 10))) {
2703 val |= (1 << 10);
2704 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2705 goto relink;
2706 }
2707 }
2708
2709 bmsr = 0;
2710 for (i = 0; i < 100; i++) {
2711 tg3_readphy(tp, MII_BMSR, &bmsr);
2712 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2713 (bmsr & BMSR_LSTATUS))
2714 break;
2715 udelay(40);
2716 }
2717
2718 if (bmsr & BMSR_LSTATUS) {
2719 u32 aux_stat, bmcr;
2720
2721 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2722 for (i = 0; i < 2000; i++) {
2723 udelay(10);
2724 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2725 aux_stat)
2726 break;
2727 }
2728
2729 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2730 &current_speed,
2731 &current_duplex);
2732
2733 bmcr = 0;
2734 for (i = 0; i < 200; i++) {
2735 tg3_readphy(tp, MII_BMCR, &bmcr);
2736 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2737 continue;
2738 if (bmcr && bmcr != 0x7fff)
2739 break;
2740 udelay(10);
2741 }
2742
Matt Carlsonef167e22007-12-20 20:10:01 -08002743 lcl_adv = 0;
2744 rmt_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745
Matt Carlsonef167e22007-12-20 20:10:01 -08002746 tp->link_config.active_speed = current_speed;
2747 tp->link_config.active_duplex = current_duplex;
2748
2749 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2750 if ((bmcr & BMCR_ANENABLE) &&
2751 tg3_copper_is_advertising_all(tp,
2752 tp->link_config.advertising)) {
2753 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2754 &rmt_adv))
2755 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756 }
2757 } else {
2758 if (!(bmcr & BMCR_ANENABLE) &&
2759 tp->link_config.speed == current_speed &&
Matt Carlsonef167e22007-12-20 20:10:01 -08002760 tp->link_config.duplex == current_duplex &&
2761 tp->link_config.flowctrl ==
2762 tp->link_config.active_flowctrl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002763 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 }
2765 }
2766
Matt Carlsonef167e22007-12-20 20:10:01 -08002767 if (current_link_up == 1 &&
2768 tp->link_config.active_duplex == DUPLEX_FULL)
2769 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770 }
2771
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772relink:
Michael Chan6921d202005-12-13 21:15:53 -08002773 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774 u32 tmp;
2775
2776 tg3_phy_copper_begin(tp);
2777
2778 tg3_readphy(tp, MII_BMSR, &tmp);
2779 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2780 (tmp & BMSR_LSTATUS))
2781 current_link_up = 1;
2782 }
2783
2784 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2785 if (current_link_up == 1) {
2786 if (tp->link_config.active_speed == SPEED_100 ||
2787 tp->link_config.active_speed == SPEED_10)
2788 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2789 else
2790 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2791 } else
2792 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2793
2794 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2795 if (tp->link_config.active_duplex == DUPLEX_HALF)
2796 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2797
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002799 if (current_link_up == 1 &&
2800 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002802 else
2803 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804 }
2805
2806 /* ??? Without this setting Netgear GA302T PHY does not
2807 * ??? send/receive packets...
2808 */
2809 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2810 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2811 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2812 tw32_f(MAC_MI_MODE, tp->mi_mode);
2813 udelay(80);
2814 }
2815
2816 tw32_f(MAC_MODE, tp->mac_mode);
2817 udelay(40);
2818
2819 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2820 /* Polled via timer. */
2821 tw32_f(MAC_EVENT, 0);
2822 } else {
2823 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2824 }
2825 udelay(40);
2826
2827 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2828 current_link_up == 1 &&
2829 tp->link_config.active_speed == SPEED_1000 &&
2830 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2831 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2832 udelay(120);
2833 tw32_f(MAC_STATUS,
2834 (MAC_STATUS_SYNC_CHANGED |
2835 MAC_STATUS_CFG_CHANGED));
2836 udelay(40);
2837 tg3_write_mem(tp,
2838 NIC_SRAM_FIRMWARE_MBOX,
2839 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2840 }
2841
2842 if (current_link_up != netif_carrier_ok(tp->dev)) {
2843 if (current_link_up)
2844 netif_carrier_on(tp->dev);
2845 else
2846 netif_carrier_off(tp->dev);
2847 tg3_link_report(tp);
2848 }
2849
2850 return 0;
2851}
2852
2853struct tg3_fiber_aneginfo {
2854 int state;
2855#define ANEG_STATE_UNKNOWN 0
2856#define ANEG_STATE_AN_ENABLE 1
2857#define ANEG_STATE_RESTART_INIT 2
2858#define ANEG_STATE_RESTART 3
2859#define ANEG_STATE_DISABLE_LINK_OK 4
2860#define ANEG_STATE_ABILITY_DETECT_INIT 5
2861#define ANEG_STATE_ABILITY_DETECT 6
2862#define ANEG_STATE_ACK_DETECT_INIT 7
2863#define ANEG_STATE_ACK_DETECT 8
2864#define ANEG_STATE_COMPLETE_ACK_INIT 9
2865#define ANEG_STATE_COMPLETE_ACK 10
2866#define ANEG_STATE_IDLE_DETECT_INIT 11
2867#define ANEG_STATE_IDLE_DETECT 12
2868#define ANEG_STATE_LINK_OK 13
2869#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2870#define ANEG_STATE_NEXT_PAGE_WAIT 15
2871
2872 u32 flags;
2873#define MR_AN_ENABLE 0x00000001
2874#define MR_RESTART_AN 0x00000002
2875#define MR_AN_COMPLETE 0x00000004
2876#define MR_PAGE_RX 0x00000008
2877#define MR_NP_LOADED 0x00000010
2878#define MR_TOGGLE_TX 0x00000020
2879#define MR_LP_ADV_FULL_DUPLEX 0x00000040
2880#define MR_LP_ADV_HALF_DUPLEX 0x00000080
2881#define MR_LP_ADV_SYM_PAUSE 0x00000100
2882#define MR_LP_ADV_ASYM_PAUSE 0x00000200
2883#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2884#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2885#define MR_LP_ADV_NEXT_PAGE 0x00001000
2886#define MR_TOGGLE_RX 0x00002000
2887#define MR_NP_RX 0x00004000
2888
2889#define MR_LINK_OK 0x80000000
2890
2891 unsigned long link_time, cur_time;
2892
2893 u32 ability_match_cfg;
2894 int ability_match_count;
2895
2896 char ability_match, idle_match, ack_match;
2897
2898 u32 txconfig, rxconfig;
2899#define ANEG_CFG_NP 0x00000080
2900#define ANEG_CFG_ACK 0x00000040
2901#define ANEG_CFG_RF2 0x00000020
2902#define ANEG_CFG_RF1 0x00000010
2903#define ANEG_CFG_PS2 0x00000001
2904#define ANEG_CFG_PS1 0x00008000
2905#define ANEG_CFG_HD 0x00004000
2906#define ANEG_CFG_FD 0x00002000
2907#define ANEG_CFG_INVAL 0x00001f06
2908
2909};
2910#define ANEG_OK 0
2911#define ANEG_DONE 1
2912#define ANEG_TIMER_ENAB 2
2913#define ANEG_FAILED -1
2914
2915#define ANEG_STATE_SETTLE_TIME 10000
2916
2917static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2918 struct tg3_fiber_aneginfo *ap)
2919{
Matt Carlson5be73b42007-12-20 20:09:29 -08002920 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921 unsigned long delta;
2922 u32 rx_cfg_reg;
2923 int ret;
2924
2925 if (ap->state == ANEG_STATE_UNKNOWN) {
2926 ap->rxconfig = 0;
2927 ap->link_time = 0;
2928 ap->cur_time = 0;
2929 ap->ability_match_cfg = 0;
2930 ap->ability_match_count = 0;
2931 ap->ability_match = 0;
2932 ap->idle_match = 0;
2933 ap->ack_match = 0;
2934 }
2935 ap->cur_time++;
2936
2937 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2938 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2939
2940 if (rx_cfg_reg != ap->ability_match_cfg) {
2941 ap->ability_match_cfg = rx_cfg_reg;
2942 ap->ability_match = 0;
2943 ap->ability_match_count = 0;
2944 } else {
2945 if (++ap->ability_match_count > 1) {
2946 ap->ability_match = 1;
2947 ap->ability_match_cfg = rx_cfg_reg;
2948 }
2949 }
2950 if (rx_cfg_reg & ANEG_CFG_ACK)
2951 ap->ack_match = 1;
2952 else
2953 ap->ack_match = 0;
2954
2955 ap->idle_match = 0;
2956 } else {
2957 ap->idle_match = 1;
2958 ap->ability_match_cfg = 0;
2959 ap->ability_match_count = 0;
2960 ap->ability_match = 0;
2961 ap->ack_match = 0;
2962
2963 rx_cfg_reg = 0;
2964 }
2965
2966 ap->rxconfig = rx_cfg_reg;
2967 ret = ANEG_OK;
2968
2969 switch(ap->state) {
2970 case ANEG_STATE_UNKNOWN:
2971 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2972 ap->state = ANEG_STATE_AN_ENABLE;
2973
2974 /* fallthru */
2975 case ANEG_STATE_AN_ENABLE:
2976 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2977 if (ap->flags & MR_AN_ENABLE) {
2978 ap->link_time = 0;
2979 ap->cur_time = 0;
2980 ap->ability_match_cfg = 0;
2981 ap->ability_match_count = 0;
2982 ap->ability_match = 0;
2983 ap->idle_match = 0;
2984 ap->ack_match = 0;
2985
2986 ap->state = ANEG_STATE_RESTART_INIT;
2987 } else {
2988 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2989 }
2990 break;
2991
2992 case ANEG_STATE_RESTART_INIT:
2993 ap->link_time = ap->cur_time;
2994 ap->flags &= ~(MR_NP_LOADED);
2995 ap->txconfig = 0;
2996 tw32(MAC_TX_AUTO_NEG, 0);
2997 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2998 tw32_f(MAC_MODE, tp->mac_mode);
2999 udelay(40);
3000
3001 ret = ANEG_TIMER_ENAB;
3002 ap->state = ANEG_STATE_RESTART;
3003
3004 /* fallthru */
3005 case ANEG_STATE_RESTART:
3006 delta = ap->cur_time - ap->link_time;
3007 if (delta > ANEG_STATE_SETTLE_TIME) {
3008 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3009 } else {
3010 ret = ANEG_TIMER_ENAB;
3011 }
3012 break;
3013
3014 case ANEG_STATE_DISABLE_LINK_OK:
3015 ret = ANEG_DONE;
3016 break;
3017
3018 case ANEG_STATE_ABILITY_DETECT_INIT:
3019 ap->flags &= ~(MR_TOGGLE_TX);
Matt Carlson5be73b42007-12-20 20:09:29 -08003020 ap->txconfig = ANEG_CFG_FD;
3021 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3022 if (flowctrl & ADVERTISE_1000XPAUSE)
3023 ap->txconfig |= ANEG_CFG_PS1;
3024 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3025 ap->txconfig |= ANEG_CFG_PS2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3027 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3028 tw32_f(MAC_MODE, tp->mac_mode);
3029 udelay(40);
3030
3031 ap->state = ANEG_STATE_ABILITY_DETECT;
3032 break;
3033
3034 case ANEG_STATE_ABILITY_DETECT:
3035 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3036 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3037 }
3038 break;
3039
3040 case ANEG_STATE_ACK_DETECT_INIT:
3041 ap->txconfig |= ANEG_CFG_ACK;
3042 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3043 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3044 tw32_f(MAC_MODE, tp->mac_mode);
3045 udelay(40);
3046
3047 ap->state = ANEG_STATE_ACK_DETECT;
3048
3049 /* fallthru */
3050 case ANEG_STATE_ACK_DETECT:
3051 if (ap->ack_match != 0) {
3052 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3053 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3054 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3055 } else {
3056 ap->state = ANEG_STATE_AN_ENABLE;
3057 }
3058 } else if (ap->ability_match != 0 &&
3059 ap->rxconfig == 0) {
3060 ap->state = ANEG_STATE_AN_ENABLE;
3061 }
3062 break;
3063
3064 case ANEG_STATE_COMPLETE_ACK_INIT:
3065 if (ap->rxconfig & ANEG_CFG_INVAL) {
3066 ret = ANEG_FAILED;
3067 break;
3068 }
3069 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3070 MR_LP_ADV_HALF_DUPLEX |
3071 MR_LP_ADV_SYM_PAUSE |
3072 MR_LP_ADV_ASYM_PAUSE |
3073 MR_LP_ADV_REMOTE_FAULT1 |
3074 MR_LP_ADV_REMOTE_FAULT2 |
3075 MR_LP_ADV_NEXT_PAGE |
3076 MR_TOGGLE_RX |
3077 MR_NP_RX);
3078 if (ap->rxconfig & ANEG_CFG_FD)
3079 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3080 if (ap->rxconfig & ANEG_CFG_HD)
3081 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3082 if (ap->rxconfig & ANEG_CFG_PS1)
3083 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3084 if (ap->rxconfig & ANEG_CFG_PS2)
3085 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3086 if (ap->rxconfig & ANEG_CFG_RF1)
3087 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3088 if (ap->rxconfig & ANEG_CFG_RF2)
3089 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3090 if (ap->rxconfig & ANEG_CFG_NP)
3091 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3092
3093 ap->link_time = ap->cur_time;
3094
3095 ap->flags ^= (MR_TOGGLE_TX);
3096 if (ap->rxconfig & 0x0008)
3097 ap->flags |= MR_TOGGLE_RX;
3098 if (ap->rxconfig & ANEG_CFG_NP)
3099 ap->flags |= MR_NP_RX;
3100 ap->flags |= MR_PAGE_RX;
3101
3102 ap->state = ANEG_STATE_COMPLETE_ACK;
3103 ret = ANEG_TIMER_ENAB;
3104 break;
3105
3106 case ANEG_STATE_COMPLETE_ACK:
3107 if (ap->ability_match != 0 &&
3108 ap->rxconfig == 0) {
3109 ap->state = ANEG_STATE_AN_ENABLE;
3110 break;
3111 }
3112 delta = ap->cur_time - ap->link_time;
3113 if (delta > ANEG_STATE_SETTLE_TIME) {
3114 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3115 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3116 } else {
3117 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3118 !(ap->flags & MR_NP_RX)) {
3119 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3120 } else {
3121 ret = ANEG_FAILED;
3122 }
3123 }
3124 }
3125 break;
3126
3127 case ANEG_STATE_IDLE_DETECT_INIT:
3128 ap->link_time = ap->cur_time;
3129 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3130 tw32_f(MAC_MODE, tp->mac_mode);
3131 udelay(40);
3132
3133 ap->state = ANEG_STATE_IDLE_DETECT;
3134 ret = ANEG_TIMER_ENAB;
3135 break;
3136
3137 case ANEG_STATE_IDLE_DETECT:
3138 if (ap->ability_match != 0 &&
3139 ap->rxconfig == 0) {
3140 ap->state = ANEG_STATE_AN_ENABLE;
3141 break;
3142 }
3143 delta = ap->cur_time - ap->link_time;
3144 if (delta > ANEG_STATE_SETTLE_TIME) {
3145 /* XXX another gem from the Broadcom driver :( */
3146 ap->state = ANEG_STATE_LINK_OK;
3147 }
3148 break;
3149
3150 case ANEG_STATE_LINK_OK:
3151 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3152 ret = ANEG_DONE;
3153 break;
3154
3155 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3156 /* ??? unimplemented */
3157 break;
3158
3159 case ANEG_STATE_NEXT_PAGE_WAIT:
3160 /* ??? unimplemented */
3161 break;
3162
3163 default:
3164 ret = ANEG_FAILED;
3165 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07003166 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167
3168 return ret;
3169}
3170
Matt Carlson5be73b42007-12-20 20:09:29 -08003171static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003172{
3173 int res = 0;
3174 struct tg3_fiber_aneginfo aninfo;
3175 int status = ANEG_FAILED;
3176 unsigned int tick;
3177 u32 tmp;
3178
3179 tw32_f(MAC_TX_AUTO_NEG, 0);
3180
3181 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3182 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3183 udelay(40);
3184
3185 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3186 udelay(40);
3187
3188 memset(&aninfo, 0, sizeof(aninfo));
3189 aninfo.flags |= MR_AN_ENABLE;
3190 aninfo.state = ANEG_STATE_UNKNOWN;
3191 aninfo.cur_time = 0;
3192 tick = 0;
3193 while (++tick < 195000) {
3194 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3195 if (status == ANEG_DONE || status == ANEG_FAILED)
3196 break;
3197
3198 udelay(1);
3199 }
3200
3201 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3202 tw32_f(MAC_MODE, tp->mac_mode);
3203 udelay(40);
3204
Matt Carlson5be73b42007-12-20 20:09:29 -08003205 *txflags = aninfo.txconfig;
3206 *rxflags = aninfo.flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207
3208 if (status == ANEG_DONE &&
3209 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3210 MR_LP_ADV_FULL_DUPLEX)))
3211 res = 1;
3212
3213 return res;
3214}
3215
3216static void tg3_init_bcm8002(struct tg3 *tp)
3217{
3218 u32 mac_status = tr32(MAC_STATUS);
3219 int i;
3220
3221 /* Reset when initting first time or we have a link. */
3222 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3223 !(mac_status & MAC_STATUS_PCS_SYNCED))
3224 return;
3225
3226 /* Set PLL lock range. */
3227 tg3_writephy(tp, 0x16, 0x8007);
3228
3229 /* SW reset */
3230 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3231
3232 /* Wait for reset to complete. */
3233 /* XXX schedule_timeout() ... */
3234 for (i = 0; i < 500; i++)
3235 udelay(10);
3236
3237 /* Config mode; select PMA/Ch 1 regs. */
3238 tg3_writephy(tp, 0x10, 0x8411);
3239
3240 /* Enable auto-lock and comdet, select txclk for tx. */
3241 tg3_writephy(tp, 0x11, 0x0a10);
3242
3243 tg3_writephy(tp, 0x18, 0x00a0);
3244 tg3_writephy(tp, 0x16, 0x41ff);
3245
3246 /* Assert and deassert POR. */
3247 tg3_writephy(tp, 0x13, 0x0400);
3248 udelay(40);
3249 tg3_writephy(tp, 0x13, 0x0000);
3250
3251 tg3_writephy(tp, 0x11, 0x0a50);
3252 udelay(40);
3253 tg3_writephy(tp, 0x11, 0x0a10);
3254
3255 /* Wait for signal to stabilize */
3256 /* XXX schedule_timeout() ... */
3257 for (i = 0; i < 15000; i++)
3258 udelay(10);
3259
3260 /* Deselect the channel register so we can read the PHYID
3261 * later.
3262 */
3263 tg3_writephy(tp, 0x10, 0x8011);
3264}
3265
3266static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3267{
Matt Carlson82cd3d12007-12-20 20:09:00 -08003268 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003269 u32 sg_dig_ctrl, sg_dig_status;
3270 u32 serdes_cfg, expected_sg_dig_ctrl;
3271 int workaround, port_a;
3272 int current_link_up;
3273
3274 serdes_cfg = 0;
3275 expected_sg_dig_ctrl = 0;
3276 workaround = 0;
3277 port_a = 1;
3278 current_link_up = 0;
3279
3280 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3281 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3282 workaround = 1;
3283 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3284 port_a = 0;
3285
3286 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3287 /* preserve bits 20-23 for voltage regulator */
3288 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3289 }
3290
3291 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3292
3293 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003294 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295 if (workaround) {
3296 u32 val = serdes_cfg;
3297
3298 if (port_a)
3299 val |= 0xc010000;
3300 else
3301 val |= 0x4010000;
3302 tw32_f(MAC_SERDES_CFG, val);
3303 }
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003304
3305 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003306 }
3307 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3308 tg3_setup_flow_control(tp, 0, 0);
3309 current_link_up = 1;
3310 }
3311 goto out;
3312 }
3313
3314 /* Want auto-negotiation. */
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003315 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003316
Matt Carlson82cd3d12007-12-20 20:09:00 -08003317 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3318 if (flowctrl & ADVERTISE_1000XPAUSE)
3319 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3320 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3321 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003322
3323 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003324 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3325 tp->serdes_counter &&
3326 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3327 MAC_STATUS_RCVD_CFG)) ==
3328 MAC_STATUS_PCS_SYNCED)) {
3329 tp->serdes_counter--;
3330 current_link_up = 1;
3331 goto out;
3332 }
3333restart_autoneg:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334 if (workaround)
3335 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003336 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337 udelay(5);
3338 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3339
Michael Chan3d3ebe72006-09-27 15:59:15 -07003340 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3341 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003342 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3343 MAC_STATUS_SIGNAL_DET)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003344 sg_dig_status = tr32(SG_DIG_STATUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003345 mac_status = tr32(MAC_STATUS);
3346
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003347 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348 (mac_status & MAC_STATUS_PCS_SYNCED)) {
Matt Carlson82cd3d12007-12-20 20:09:00 -08003349 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003350
Matt Carlson82cd3d12007-12-20 20:09:00 -08003351 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3352 local_adv |= ADVERTISE_1000XPAUSE;
3353 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3354 local_adv |= ADVERTISE_1000XPSE_ASYM;
3355
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003356 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08003357 remote_adv |= LPA_1000XPAUSE;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003358 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08003359 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003360
3361 tg3_setup_flow_control(tp, local_adv, remote_adv);
3362 current_link_up = 1;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003363 tp->serdes_counter = 0;
3364 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003365 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003366 if (tp->serdes_counter)
3367 tp->serdes_counter--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368 else {
3369 if (workaround) {
3370 u32 val = serdes_cfg;
3371
3372 if (port_a)
3373 val |= 0xc010000;
3374 else
3375 val |= 0x4010000;
3376
3377 tw32_f(MAC_SERDES_CFG, val);
3378 }
3379
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003380 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003381 udelay(40);
3382
3383 /* Link parallel detection - link is up */
3384 /* only if we have PCS_SYNC and not */
3385 /* receiving config code words */
3386 mac_status = tr32(MAC_STATUS);
3387 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3388 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3389 tg3_setup_flow_control(tp, 0, 0);
3390 current_link_up = 1;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003391 tp->tg3_flags2 |=
3392 TG3_FLG2_PARALLEL_DETECT;
3393 tp->serdes_counter =
3394 SERDES_PARALLEL_DET_TIMEOUT;
3395 } else
3396 goto restart_autoneg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003397 }
3398 }
Michael Chan3d3ebe72006-09-27 15:59:15 -07003399 } else {
3400 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3401 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402 }
3403
3404out:
3405 return current_link_up;
3406}
3407
3408static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3409{
3410 int current_link_up = 0;
3411
Michael Chan5cf64b82007-05-05 12:11:21 -07003412 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003413 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003414
3415 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
Matt Carlson5be73b42007-12-20 20:09:29 -08003416 u32 txflags, rxflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003418
Matt Carlson5be73b42007-12-20 20:09:29 -08003419 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3420 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421
Matt Carlson5be73b42007-12-20 20:09:29 -08003422 if (txflags & ANEG_CFG_PS1)
3423 local_adv |= ADVERTISE_1000XPAUSE;
3424 if (txflags & ANEG_CFG_PS2)
3425 local_adv |= ADVERTISE_1000XPSE_ASYM;
3426
3427 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3428 remote_adv |= LPA_1000XPAUSE;
3429 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3430 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431
3432 tg3_setup_flow_control(tp, local_adv, remote_adv);
3433
Linus Torvalds1da177e2005-04-16 15:20:36 -07003434 current_link_up = 1;
3435 }
3436 for (i = 0; i < 30; i++) {
3437 udelay(20);
3438 tw32_f(MAC_STATUS,
3439 (MAC_STATUS_SYNC_CHANGED |
3440 MAC_STATUS_CFG_CHANGED));
3441 udelay(40);
3442 if ((tr32(MAC_STATUS) &
3443 (MAC_STATUS_SYNC_CHANGED |
3444 MAC_STATUS_CFG_CHANGED)) == 0)
3445 break;
3446 }
3447
3448 mac_status = tr32(MAC_STATUS);
3449 if (current_link_up == 0 &&
3450 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3451 !(mac_status & MAC_STATUS_RCVD_CFG))
3452 current_link_up = 1;
3453 } else {
Matt Carlson5be73b42007-12-20 20:09:29 -08003454 tg3_setup_flow_control(tp, 0, 0);
3455
Linus Torvalds1da177e2005-04-16 15:20:36 -07003456 /* Forcing 1000FD link up. */
3457 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458
3459 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3460 udelay(40);
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07003461
3462 tw32_f(MAC_MODE, tp->mac_mode);
3463 udelay(40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464 }
3465
3466out:
3467 return current_link_up;
3468}
3469
3470static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3471{
3472 u32 orig_pause_cfg;
3473 u16 orig_active_speed;
3474 u8 orig_active_duplex;
3475 u32 mac_status;
3476 int current_link_up;
3477 int i;
3478
Matt Carlson8d018622007-12-20 20:05:44 -08003479 orig_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480 orig_active_speed = tp->link_config.active_speed;
3481 orig_active_duplex = tp->link_config.active_duplex;
3482
3483 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3484 netif_carrier_ok(tp->dev) &&
3485 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3486 mac_status = tr32(MAC_STATUS);
3487 mac_status &= (MAC_STATUS_PCS_SYNCED |
3488 MAC_STATUS_SIGNAL_DET |
3489 MAC_STATUS_CFG_CHANGED |
3490 MAC_STATUS_RCVD_CFG);
3491 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3492 MAC_STATUS_SIGNAL_DET)) {
3493 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3494 MAC_STATUS_CFG_CHANGED));
3495 return 0;
3496 }
3497 }
3498
3499 tw32_f(MAC_TX_AUTO_NEG, 0);
3500
3501 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3502 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3503 tw32_f(MAC_MODE, tp->mac_mode);
3504 udelay(40);
3505
3506 if (tp->phy_id == PHY_ID_BCM8002)
3507 tg3_init_bcm8002(tp);
3508
3509 /* Enable link change event even when serdes polling. */
3510 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3511 udelay(40);
3512
3513 current_link_up = 0;
3514 mac_status = tr32(MAC_STATUS);
3515
3516 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3517 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3518 else
3519 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3520
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521 tp->hw_status->status =
3522 (SD_STATUS_UPDATED |
3523 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3524
3525 for (i = 0; i < 100; i++) {
3526 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3527 MAC_STATUS_CFG_CHANGED));
3528 udelay(5);
3529 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
Michael Chan3d3ebe72006-09-27 15:59:15 -07003530 MAC_STATUS_CFG_CHANGED |
3531 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003532 break;
3533 }
3534
3535 mac_status = tr32(MAC_STATUS);
3536 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3537 current_link_up = 0;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003538 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3539 tp->serdes_counter == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540 tw32_f(MAC_MODE, (tp->mac_mode |
3541 MAC_MODE_SEND_CONFIGS));
3542 udelay(1);
3543 tw32_f(MAC_MODE, tp->mac_mode);
3544 }
3545 }
3546
3547 if (current_link_up == 1) {
3548 tp->link_config.active_speed = SPEED_1000;
3549 tp->link_config.active_duplex = DUPLEX_FULL;
3550 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3551 LED_CTRL_LNKLED_OVERRIDE |
3552 LED_CTRL_1000MBPS_ON));
3553 } else {
3554 tp->link_config.active_speed = SPEED_INVALID;
3555 tp->link_config.active_duplex = DUPLEX_INVALID;
3556 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3557 LED_CTRL_LNKLED_OVERRIDE |
3558 LED_CTRL_TRAFFIC_OVERRIDE));
3559 }
3560
3561 if (current_link_up != netif_carrier_ok(tp->dev)) {
3562 if (current_link_up)
3563 netif_carrier_on(tp->dev);
3564 else
3565 netif_carrier_off(tp->dev);
3566 tg3_link_report(tp);
3567 } else {
Matt Carlson8d018622007-12-20 20:05:44 -08003568 u32 now_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003569 if (orig_pause_cfg != now_pause_cfg ||
3570 orig_active_speed != tp->link_config.active_speed ||
3571 orig_active_duplex != tp->link_config.active_duplex)
3572 tg3_link_report(tp);
3573 }
3574
3575 return 0;
3576}
3577
Michael Chan747e8f82005-07-25 12:33:22 -07003578static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3579{
3580 int current_link_up, err = 0;
3581 u32 bmsr, bmcr;
3582 u16 current_speed;
3583 u8 current_duplex;
Matt Carlsonef167e22007-12-20 20:10:01 -08003584 u32 local_adv, remote_adv;
Michael Chan747e8f82005-07-25 12:33:22 -07003585
3586 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3587 tw32_f(MAC_MODE, tp->mac_mode);
3588 udelay(40);
3589
3590 tw32(MAC_EVENT, 0);
3591
3592 tw32_f(MAC_STATUS,
3593 (MAC_STATUS_SYNC_CHANGED |
3594 MAC_STATUS_CFG_CHANGED |
3595 MAC_STATUS_MI_COMPLETION |
3596 MAC_STATUS_LNKSTATE_CHANGED));
3597 udelay(40);
3598
3599 if (force_reset)
3600 tg3_phy_reset(tp);
3601
3602 current_link_up = 0;
3603 current_speed = SPEED_INVALID;
3604 current_duplex = DUPLEX_INVALID;
3605
3606 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3607 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08003608 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3609 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3610 bmsr |= BMSR_LSTATUS;
3611 else
3612 bmsr &= ~BMSR_LSTATUS;
3613 }
Michael Chan747e8f82005-07-25 12:33:22 -07003614
3615 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3616
3617 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
Matt Carlson2bd3ed02008-06-09 15:39:55 -07003618 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
Michael Chan747e8f82005-07-25 12:33:22 -07003619 /* do nothing, just check for link up at the end */
3620 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3621 u32 adv, new_adv;
3622
3623 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3624 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3625 ADVERTISE_1000XPAUSE |
3626 ADVERTISE_1000XPSE_ASYM |
3627 ADVERTISE_SLCT);
3628
Matt Carlsonba4d07a2007-12-20 20:08:00 -08003629 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
Michael Chan747e8f82005-07-25 12:33:22 -07003630
3631 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3632 new_adv |= ADVERTISE_1000XHALF;
3633 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3634 new_adv |= ADVERTISE_1000XFULL;
3635
3636 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3637 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3638 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3639 tg3_writephy(tp, MII_BMCR, bmcr);
3640
3641 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
Michael Chan3d3ebe72006-09-27 15:59:15 -07003642 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
Michael Chan747e8f82005-07-25 12:33:22 -07003643 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3644
3645 return err;
3646 }
3647 } else {
3648 u32 new_bmcr;
3649
3650 bmcr &= ~BMCR_SPEED1000;
3651 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3652
3653 if (tp->link_config.duplex == DUPLEX_FULL)
3654 new_bmcr |= BMCR_FULLDPLX;
3655
3656 if (new_bmcr != bmcr) {
3657 /* BMCR_SPEED1000 is a reserved bit that needs
3658 * to be set on write.
3659 */
3660 new_bmcr |= BMCR_SPEED1000;
3661
3662 /* Force a linkdown */
3663 if (netif_carrier_ok(tp->dev)) {
3664 u32 adv;
3665
3666 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3667 adv &= ~(ADVERTISE_1000XFULL |
3668 ADVERTISE_1000XHALF |
3669 ADVERTISE_SLCT);
3670 tg3_writephy(tp, MII_ADVERTISE, adv);
3671 tg3_writephy(tp, MII_BMCR, bmcr |
3672 BMCR_ANRESTART |
3673 BMCR_ANENABLE);
3674 udelay(10);
3675 netif_carrier_off(tp->dev);
3676 }
3677 tg3_writephy(tp, MII_BMCR, new_bmcr);
3678 bmcr = new_bmcr;
3679 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3680 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08003681 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3682 ASIC_REV_5714) {
3683 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3684 bmsr |= BMSR_LSTATUS;
3685 else
3686 bmsr &= ~BMSR_LSTATUS;
3687 }
Michael Chan747e8f82005-07-25 12:33:22 -07003688 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3689 }
3690 }
3691
3692 if (bmsr & BMSR_LSTATUS) {
3693 current_speed = SPEED_1000;
3694 current_link_up = 1;
3695 if (bmcr & BMCR_FULLDPLX)
3696 current_duplex = DUPLEX_FULL;
3697 else
3698 current_duplex = DUPLEX_HALF;
3699
Matt Carlsonef167e22007-12-20 20:10:01 -08003700 local_adv = 0;
3701 remote_adv = 0;
3702
Michael Chan747e8f82005-07-25 12:33:22 -07003703 if (bmcr & BMCR_ANENABLE) {
Matt Carlsonef167e22007-12-20 20:10:01 -08003704 u32 common;
Michael Chan747e8f82005-07-25 12:33:22 -07003705
3706 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3707 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3708 common = local_adv & remote_adv;
3709 if (common & (ADVERTISE_1000XHALF |
3710 ADVERTISE_1000XFULL)) {
3711 if (common & ADVERTISE_1000XFULL)
3712 current_duplex = DUPLEX_FULL;
3713 else
3714 current_duplex = DUPLEX_HALF;
Michael Chan747e8f82005-07-25 12:33:22 -07003715 }
3716 else
3717 current_link_up = 0;
3718 }
3719 }
3720
Matt Carlsonef167e22007-12-20 20:10:01 -08003721 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3722 tg3_setup_flow_control(tp, local_adv, remote_adv);
3723
Michael Chan747e8f82005-07-25 12:33:22 -07003724 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3725 if (tp->link_config.active_duplex == DUPLEX_HALF)
3726 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3727
3728 tw32_f(MAC_MODE, tp->mac_mode);
3729 udelay(40);
3730
3731 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3732
3733 tp->link_config.active_speed = current_speed;
3734 tp->link_config.active_duplex = current_duplex;
3735
3736 if (current_link_up != netif_carrier_ok(tp->dev)) {
3737 if (current_link_up)
3738 netif_carrier_on(tp->dev);
3739 else {
3740 netif_carrier_off(tp->dev);
3741 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3742 }
3743 tg3_link_report(tp);
3744 }
3745 return err;
3746}
3747
3748static void tg3_serdes_parallel_detect(struct tg3 *tp)
3749{
Michael Chan3d3ebe72006-09-27 15:59:15 -07003750 if (tp->serdes_counter) {
Michael Chan747e8f82005-07-25 12:33:22 -07003751 /* Give autoneg time to complete. */
Michael Chan3d3ebe72006-09-27 15:59:15 -07003752 tp->serdes_counter--;
Michael Chan747e8f82005-07-25 12:33:22 -07003753 return;
3754 }
3755 if (!netif_carrier_ok(tp->dev) &&
3756 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3757 u32 bmcr;
3758
3759 tg3_readphy(tp, MII_BMCR, &bmcr);
3760 if (bmcr & BMCR_ANENABLE) {
3761 u32 phy1, phy2;
3762
3763 /* Select shadow register 0x1f */
3764 tg3_writephy(tp, 0x1c, 0x7c00);
3765 tg3_readphy(tp, 0x1c, &phy1);
3766
3767 /* Select expansion interrupt status register */
3768 tg3_writephy(tp, 0x17, 0x0f01);
3769 tg3_readphy(tp, 0x15, &phy2);
3770 tg3_readphy(tp, 0x15, &phy2);
3771
3772 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3773 /* We have signal detect and not receiving
3774 * config code words, link is up by parallel
3775 * detection.
3776 */
3777
3778 bmcr &= ~BMCR_ANENABLE;
3779 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3780 tg3_writephy(tp, MII_BMCR, bmcr);
3781 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3782 }
3783 }
3784 }
3785 else if (netif_carrier_ok(tp->dev) &&
3786 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3787 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3788 u32 phy2;
3789
3790 /* Select expansion interrupt status register */
3791 tg3_writephy(tp, 0x17, 0x0f01);
3792 tg3_readphy(tp, 0x15, &phy2);
3793 if (phy2 & 0x20) {
3794 u32 bmcr;
3795
3796 /* Config code words received, turn on autoneg. */
3797 tg3_readphy(tp, MII_BMCR, &bmcr);
3798 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3799
3800 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3801
3802 }
3803 }
3804}
3805
Linus Torvalds1da177e2005-04-16 15:20:36 -07003806static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3807{
3808 int err;
3809
3810 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3811 err = tg3_setup_fiber_phy(tp, force_reset);
Michael Chan747e8f82005-07-25 12:33:22 -07003812 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3813 err = tg3_setup_fiber_mii_phy(tp, force_reset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003814 } else {
3815 err = tg3_setup_copper_phy(tp, force_reset);
3816 }
3817
Matt Carlsonb5af7122007-11-12 21:22:02 -08003818 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3819 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
Matt Carlsonaa6c91f2007-11-12 21:18:04 -08003820 u32 val, scale;
3821
3822 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3823 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3824 scale = 65;
3825 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3826 scale = 6;
3827 else
3828 scale = 12;
3829
3830 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3831 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3832 tw32(GRC_MISC_CFG, val);
3833 }
3834
Linus Torvalds1da177e2005-04-16 15:20:36 -07003835 if (tp->link_config.active_speed == SPEED_1000 &&
3836 tp->link_config.active_duplex == DUPLEX_HALF)
3837 tw32(MAC_TX_LENGTHS,
3838 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3839 (6 << TX_LENGTHS_IPG_SHIFT) |
3840 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3841 else
3842 tw32(MAC_TX_LENGTHS,
3843 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3844 (6 << TX_LENGTHS_IPG_SHIFT) |
3845 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3846
3847 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3848 if (netif_carrier_ok(tp->dev)) {
3849 tw32(HOSTCC_STAT_COAL_TICKS,
David S. Miller15f98502005-05-18 22:49:26 -07003850 tp->coal.stats_block_coalesce_usecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003851 } else {
3852 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3853 }
3854 }
3855
Matt Carlson8ed5d972007-05-07 00:25:49 -07003856 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3857 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3858 if (!netif_carrier_ok(tp->dev))
3859 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3860 tp->pwrmgmt_thresh;
3861 else
3862 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3863 tw32(PCIE_PWR_MGMT_THRESH, val);
3864 }
3865
Linus Torvalds1da177e2005-04-16 15:20:36 -07003866 return err;
3867}
3868
Michael Chandf3e6542006-05-26 17:48:07 -07003869/* This is called whenever we suspect that the system chipset is re-
3870 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3871 * is bogus tx completions. We try to recover by setting the
3872 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3873 * in the workqueue.
3874 */
3875static void tg3_tx_recover(struct tg3 *tp)
3876{
3877 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3878 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3879
3880 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3881 "mapped I/O cycles to the network device, attempting to "
3882 "recover. Please report the problem to the driver maintainer "
3883 "and include system chipset information.\n", tp->dev->name);
3884
3885 spin_lock(&tp->lock);
Michael Chandf3e6542006-05-26 17:48:07 -07003886 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
Michael Chandf3e6542006-05-26 17:48:07 -07003887 spin_unlock(&tp->lock);
3888}
3889
Michael Chan1b2a7202006-08-07 21:46:02 -07003890static inline u32 tg3_tx_avail(struct tg3 *tp)
3891{
3892 smp_mb();
3893 return (tp->tx_pending -
3894 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3895}
3896
Linus Torvalds1da177e2005-04-16 15:20:36 -07003897/* Tigon3 never reports partial packet sends. So we do not
3898 * need special logic to handle SKBs that have not had all
3899 * of their frags sent yet, like SunGEM does.
3900 */
3901static void tg3_tx(struct tg3 *tp)
3902{
3903 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3904 u32 sw_idx = tp->tx_cons;
3905
3906 while (sw_idx != hw_idx) {
3907 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3908 struct sk_buff *skb = ri->skb;
Michael Chandf3e6542006-05-26 17:48:07 -07003909 int i, tx_bug = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003910
Michael Chandf3e6542006-05-26 17:48:07 -07003911 if (unlikely(skb == NULL)) {
3912 tg3_tx_recover(tp);
3913 return;
3914 }
3915
David S. Miller90079ce2008-09-11 04:52:51 -07003916 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003917
3918 ri->skb = NULL;
3919
3920 sw_idx = NEXT_TX(sw_idx);
3921
3922 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003923 ri = &tp->tx_buffers[sw_idx];
Michael Chandf3e6542006-05-26 17:48:07 -07003924 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3925 tx_bug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003926 sw_idx = NEXT_TX(sw_idx);
3927 }
3928
David S. Millerf47c11e2005-06-24 20:18:35 -07003929 dev_kfree_skb(skb);
Michael Chandf3e6542006-05-26 17:48:07 -07003930
3931 if (unlikely(tx_bug)) {
3932 tg3_tx_recover(tp);
3933 return;
3934 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003935 }
3936
3937 tp->tx_cons = sw_idx;
3938
Michael Chan1b2a7202006-08-07 21:46:02 -07003939 /* Need to make the tx_cons update visible to tg3_start_xmit()
3940 * before checking for netif_queue_stopped(). Without the
3941 * memory barrier, there is a small possibility that tg3_start_xmit()
3942 * will miss it and cause the queue to be stopped forever.
3943 */
3944 smp_mb();
3945
3946 if (unlikely(netif_queue_stopped(tp->dev) &&
Ranjit Manomohan42952232006-10-18 20:54:26 -07003947 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
Michael Chan1b2a7202006-08-07 21:46:02 -07003948 netif_tx_lock(tp->dev);
Michael Chan51b91462005-09-01 17:41:28 -07003949 if (netif_queue_stopped(tp->dev) &&
Ranjit Manomohan42952232006-10-18 20:54:26 -07003950 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
Michael Chan51b91462005-09-01 17:41:28 -07003951 netif_wake_queue(tp->dev);
Michael Chan1b2a7202006-08-07 21:46:02 -07003952 netif_tx_unlock(tp->dev);
Michael Chan51b91462005-09-01 17:41:28 -07003953 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003954}
3955
3956/* Returns size of skb allocated or < 0 on error.
3957 *
3958 * We only need to fill in the address because the other members
3959 * of the RX descriptor are invariant, see tg3_init_rings.
3960 *
3961 * Note the purposeful assymetry of cpu vs. chip accesses. For
3962 * posting buffers we only dirty the first cache line of the RX
3963 * descriptor (containing the address). Whereas for the RX status
3964 * buffers the cpu only reads the last cacheline of the RX descriptor
3965 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3966 */
3967static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3968 int src_idx, u32 dest_idx_unmasked)
3969{
3970 struct tg3_rx_buffer_desc *desc;
3971 struct ring_info *map, *src_map;
3972 struct sk_buff *skb;
3973 dma_addr_t mapping;
3974 int skb_size, dest_idx;
3975
3976 src_map = NULL;
3977 switch (opaque_key) {
3978 case RXD_OPAQUE_RING_STD:
3979 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3980 desc = &tp->rx_std[dest_idx];
3981 map = &tp->rx_std_buffers[dest_idx];
3982 if (src_idx >= 0)
3983 src_map = &tp->rx_std_buffers[src_idx];
Michael Chan7e72aad2005-07-25 12:31:17 -07003984 skb_size = tp->rx_pkt_buf_sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003985 break;
3986
3987 case RXD_OPAQUE_RING_JUMBO:
3988 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3989 desc = &tp->rx_jumbo[dest_idx];
3990 map = &tp->rx_jumbo_buffers[dest_idx];
3991 if (src_idx >= 0)
3992 src_map = &tp->rx_jumbo_buffers[src_idx];
3993 skb_size = RX_JUMBO_PKT_BUF_SZ;
3994 break;
3995
3996 default:
3997 return -EINVAL;
Stephen Hemminger855e1112008-04-16 16:37:28 -07003998 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003999
4000 /* Do not overwrite any of the map or rp information
4001 * until we are sure we can commit to a new buffer.
4002 *
4003 * Callers depend upon this behavior and assume that
4004 * we leave everything unchanged if we fail.
4005 */
David S. Millera20e9c62006-07-31 22:38:16 -07004006 skb = netdev_alloc_skb(tp->dev, skb_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004007 if (skb == NULL)
4008 return -ENOMEM;
4009
Linus Torvalds1da177e2005-04-16 15:20:36 -07004010 skb_reserve(skb, tp->rx_offset);
4011
4012 mapping = pci_map_single(tp->pdev, skb->data,
4013 skb_size - tp->rx_offset,
4014 PCI_DMA_FROMDEVICE);
4015
4016 map->skb = skb;
4017 pci_unmap_addr_set(map, mapping, mapping);
4018
4019 if (src_map != NULL)
4020 src_map->skb = NULL;
4021
4022 desc->addr_hi = ((u64)mapping >> 32);
4023 desc->addr_lo = ((u64)mapping & 0xffffffff);
4024
4025 return skb_size;
4026}
4027
4028/* We only need to move over in the address because the other
4029 * members of the RX descriptor are invariant. See notes above
4030 * tg3_alloc_rx_skb for full details.
4031 */
4032static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
4033 int src_idx, u32 dest_idx_unmasked)
4034{
4035 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4036 struct ring_info *src_map, *dest_map;
4037 int dest_idx;
4038
4039 switch (opaque_key) {
4040 case RXD_OPAQUE_RING_STD:
4041 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4042 dest_desc = &tp->rx_std[dest_idx];
4043 dest_map = &tp->rx_std_buffers[dest_idx];
4044 src_desc = &tp->rx_std[src_idx];
4045 src_map = &tp->rx_std_buffers[src_idx];
4046 break;
4047
4048 case RXD_OPAQUE_RING_JUMBO:
4049 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4050 dest_desc = &tp->rx_jumbo[dest_idx];
4051 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4052 src_desc = &tp->rx_jumbo[src_idx];
4053 src_map = &tp->rx_jumbo_buffers[src_idx];
4054 break;
4055
4056 default:
4057 return;
Stephen Hemminger855e1112008-04-16 16:37:28 -07004058 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004059
4060 dest_map->skb = src_map->skb;
4061 pci_unmap_addr_set(dest_map, mapping,
4062 pci_unmap_addr(src_map, mapping));
4063 dest_desc->addr_hi = src_desc->addr_hi;
4064 dest_desc->addr_lo = src_desc->addr_lo;
4065
4066 src_map->skb = NULL;
4067}
4068
4069#if TG3_VLAN_TAG_USED
4070static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4071{
4072 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4073}
4074#endif
4075
4076/* The RX ring scheme is composed of multiple rings which post fresh
4077 * buffers to the chip, and one special ring the chip uses to report
4078 * status back to the host.
4079 *
4080 * The special ring reports the status of received packets to the
4081 * host. The chip does not write into the original descriptor the
4082 * RX buffer was obtained from. The chip simply takes the original
4083 * descriptor as provided by the host, updates the status and length
4084 * field, then writes this into the next status ring entry.
4085 *
4086 * Each ring the host uses to post buffers to the chip is described
4087 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4088 * it is first placed into the on-chip ram. When the packet's length
4089 * is known, it walks down the TG3_BDINFO entries to select the ring.
4090 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4091 * which is within the range of the new packet's length is chosen.
4092 *
4093 * The "separate ring for rx status" scheme may sound queer, but it makes
4094 * sense from a cache coherency perspective. If only the host writes
4095 * to the buffer post rings, and only the chip writes to the rx status
4096 * rings, then cache lines never move beyond shared-modified state.
4097 * If both the host and chip were to write into the same ring, cache line
4098 * eviction could occur since both entities want it in an exclusive state.
4099 */
4100static int tg3_rx(struct tg3 *tp, int budget)
4101{
Michael Chanf92905d2006-06-29 20:14:29 -07004102 u32 work_mask, rx_std_posted = 0;
Michael Chan483ba502005-04-25 15:14:03 -07004103 u32 sw_idx = tp->rx_rcb_ptr;
4104 u16 hw_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004105 int received;
4106
4107 hw_idx = tp->hw_status->idx[0].rx_producer;
4108 /*
4109 * We need to order the read of hw_idx and the read of
4110 * the opaque cookie.
4111 */
4112 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004113 work_mask = 0;
4114 received = 0;
4115 while (sw_idx != hw_idx && budget > 0) {
4116 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4117 unsigned int len;
4118 struct sk_buff *skb;
4119 dma_addr_t dma_addr;
4120 u32 opaque_key, desc_idx, *post_ptr;
4121
4122 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4123 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4124 if (opaque_key == RXD_OPAQUE_RING_STD) {
4125 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4126 mapping);
4127 skb = tp->rx_std_buffers[desc_idx].skb;
4128 post_ptr = &tp->rx_std_ptr;
Michael Chanf92905d2006-06-29 20:14:29 -07004129 rx_std_posted++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004130 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4131 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4132 mapping);
4133 skb = tp->rx_jumbo_buffers[desc_idx].skb;
4134 post_ptr = &tp->rx_jumbo_ptr;
4135 }
4136 else {
4137 goto next_pkt_nopost;
4138 }
4139
4140 work_mask |= opaque_key;
4141
4142 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4143 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4144 drop_it:
4145 tg3_recycle_rx(tp, opaque_key,
4146 desc_idx, *post_ptr);
4147 drop_it_no_recycle:
4148 /* Other statistics kept track of by card. */
4149 tp->net_stats.rx_dropped++;
4150 goto next_pkt;
4151 }
4152
4153 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
4154
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004155 if (len > RX_COPY_THRESHOLD
Linus Torvalds1da177e2005-04-16 15:20:36 -07004156 && tp->rx_offset == 2
4157 /* rx_offset != 2 iff this is a 5701 card running
4158 * in PCI-X mode [see tg3_get_invariants()] */
4159 ) {
4160 int skb_size;
4161
4162 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4163 desc_idx, *post_ptr);
4164 if (skb_size < 0)
4165 goto drop_it;
4166
4167 pci_unmap_single(tp->pdev, dma_addr,
4168 skb_size - tp->rx_offset,
4169 PCI_DMA_FROMDEVICE);
4170
4171 skb_put(skb, len);
4172 } else {
4173 struct sk_buff *copy_skb;
4174
4175 tg3_recycle_rx(tp, opaque_key,
4176 desc_idx, *post_ptr);
4177
David S. Millera20e9c62006-07-31 22:38:16 -07004178 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004179 if (copy_skb == NULL)
4180 goto drop_it_no_recycle;
4181
Linus Torvalds1da177e2005-04-16 15:20:36 -07004182 skb_reserve(copy_skb, 2);
4183 skb_put(copy_skb, len);
4184 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03004185 skb_copy_from_linear_data(skb, copy_skb->data, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004186 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4187
4188 /* We'll reuse the original ring buffer. */
4189 skb = copy_skb;
4190 }
4191
4192 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4193 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4194 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4195 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4196 skb->ip_summed = CHECKSUM_UNNECESSARY;
4197 else
4198 skb->ip_summed = CHECKSUM_NONE;
4199
4200 skb->protocol = eth_type_trans(skb, tp->dev);
4201#if TG3_VLAN_TAG_USED
4202 if (tp->vlgrp != NULL &&
4203 desc->type_flags & RXD_FLAG_VLAN) {
4204 tg3_vlan_rx(tp, skb,
4205 desc->err_vlan & RXD_VLAN_MASK);
4206 } else
4207#endif
4208 netif_receive_skb(skb);
4209
4210 tp->dev->last_rx = jiffies;
4211 received++;
4212 budget--;
4213
4214next_pkt:
4215 (*post_ptr)++;
Michael Chanf92905d2006-06-29 20:14:29 -07004216
4217 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4218 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4219
4220 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4221 TG3_64BIT_REG_LOW, idx);
4222 work_mask &= ~RXD_OPAQUE_RING_STD;
4223 rx_std_posted = 0;
4224 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004225next_pkt_nopost:
Michael Chan483ba502005-04-25 15:14:03 -07004226 sw_idx++;
Eric Dumazet6b31a512007-02-06 13:29:21 -08004227 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
Michael Chan52f6d692005-04-25 15:14:32 -07004228
4229 /* Refresh hw_idx to see if there is new work */
4230 if (sw_idx == hw_idx) {
4231 hw_idx = tp->hw_status->idx[0].rx_producer;
4232 rmb();
4233 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004234 }
4235
4236 /* ACK the status ring. */
Michael Chan483ba502005-04-25 15:14:03 -07004237 tp->rx_rcb_ptr = sw_idx;
4238 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004239
4240 /* Refill RX ring(s). */
4241 if (work_mask & RXD_OPAQUE_RING_STD) {
4242 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4243 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4244 sw_idx);
4245 }
4246 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4247 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4248 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4249 sw_idx);
4250 }
4251 mmiowb();
4252
4253 return received;
4254}
4255
David S. Miller6f535762007-10-11 18:08:29 -07004256static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004257{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004258 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004259
Linus Torvalds1da177e2005-04-16 15:20:36 -07004260 /* handle link change and other phy events */
4261 if (!(tp->tg3_flags &
4262 (TG3_FLAG_USE_LINKCHG_REG |
4263 TG3_FLAG_POLL_SERDES))) {
4264 if (sblk->status & SD_STATUS_LINK_CHG) {
4265 sblk->status = SD_STATUS_UPDATED |
4266 (sblk->status & ~SD_STATUS_LINK_CHG);
David S. Millerf47c11e2005-06-24 20:18:35 -07004267 spin_lock(&tp->lock);
Matt Carlsondd477002008-05-25 23:45:58 -07004268 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4269 tw32_f(MAC_STATUS,
4270 (MAC_STATUS_SYNC_CHANGED |
4271 MAC_STATUS_CFG_CHANGED |
4272 MAC_STATUS_MI_COMPLETION |
4273 MAC_STATUS_LNKSTATE_CHANGED));
4274 udelay(40);
4275 } else
4276 tg3_setup_phy(tp, 0);
David S. Millerf47c11e2005-06-24 20:18:35 -07004277 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004278 }
4279 }
4280
4281 /* run TX completion thread */
4282 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004283 tg3_tx(tp);
David S. Miller6f535762007-10-11 18:08:29 -07004284 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
Michael Chan4fd7ab52007-10-12 01:39:50 -07004285 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004286 }
4287
Linus Torvalds1da177e2005-04-16 15:20:36 -07004288 /* run RX thread, within the bounds set by NAPI.
4289 * All RX "locking" is done by ensuring outside
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004290 * code synchronizes with tg3->napi.poll()
Linus Torvalds1da177e2005-04-16 15:20:36 -07004291 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004292 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
David S. Miller6f535762007-10-11 18:08:29 -07004293 work_done += tg3_rx(tp, budget - work_done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004294
David S. Miller6f535762007-10-11 18:08:29 -07004295 return work_done;
4296}
David S. Millerf7383c22005-05-18 22:50:53 -07004297
David S. Miller6f535762007-10-11 18:08:29 -07004298static int tg3_poll(struct napi_struct *napi, int budget)
4299{
4300 struct tg3 *tp = container_of(napi, struct tg3, napi);
4301 int work_done = 0;
Michael Chan4fd7ab52007-10-12 01:39:50 -07004302 struct tg3_hw_status *sblk = tp->hw_status;
David S. Miller6f535762007-10-11 18:08:29 -07004303
4304 while (1) {
4305 work_done = tg3_poll_work(tp, work_done, budget);
4306
4307 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4308 goto tx_recovery;
4309
4310 if (unlikely(work_done >= budget))
4311 break;
4312
Michael Chan4fd7ab52007-10-12 01:39:50 -07004313 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4314 /* tp->last_tag is used in tg3_restart_ints() below
4315 * to tell the hw how much work has been processed,
4316 * so we must read it before checking for more work.
4317 */
4318 tp->last_tag = sblk->status_tag;
4319 rmb();
4320 } else
4321 sblk->status &= ~SD_STATUS_UPDATED;
4322
David S. Miller6f535762007-10-11 18:08:29 -07004323 if (likely(!tg3_has_work(tp))) {
David S. Miller6f535762007-10-11 18:08:29 -07004324 netif_rx_complete(tp->dev, napi);
4325 tg3_restart_ints(tp);
4326 break;
4327 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004328 }
4329
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004330 return work_done;
David S. Miller6f535762007-10-11 18:08:29 -07004331
4332tx_recovery:
Michael Chan4fd7ab52007-10-12 01:39:50 -07004333 /* work_done is guaranteed to be less than budget. */
David S. Miller6f535762007-10-11 18:08:29 -07004334 netif_rx_complete(tp->dev, napi);
4335 schedule_work(&tp->reset_task);
Michael Chan4fd7ab52007-10-12 01:39:50 -07004336 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004337}
4338
David S. Millerf47c11e2005-06-24 20:18:35 -07004339static void tg3_irq_quiesce(struct tg3 *tp)
4340{
4341 BUG_ON(tp->irq_sync);
4342
4343 tp->irq_sync = 1;
4344 smp_mb();
4345
4346 synchronize_irq(tp->pdev->irq);
4347}
4348
4349static inline int tg3_irq_sync(struct tg3 *tp)
4350{
4351 return tp->irq_sync;
4352}
4353
4354/* Fully shutdown all tg3 driver activity elsewhere in the system.
4355 * If irq_sync is non-zero, then the IRQ handler must be synchronized
4356 * with as well. Most of the time, this is not necessary except when
4357 * shutting down the device.
4358 */
4359static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4360{
Michael Chan46966542007-07-11 19:47:19 -07004361 spin_lock_bh(&tp->lock);
David S. Millerf47c11e2005-06-24 20:18:35 -07004362 if (irq_sync)
4363 tg3_irq_quiesce(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07004364}
4365
4366static inline void tg3_full_unlock(struct tg3 *tp)
4367{
David S. Millerf47c11e2005-06-24 20:18:35 -07004368 spin_unlock_bh(&tp->lock);
4369}
4370
Michael Chanfcfa0a32006-03-20 22:28:41 -08004371/* One-shot MSI handler - Chip automatically disables interrupt
4372 * after sending MSI so driver doesn't have to do it.
4373 */
David Howells7d12e782006-10-05 14:55:46 +01004374static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
Michael Chanfcfa0a32006-03-20 22:28:41 -08004375{
4376 struct net_device *dev = dev_id;
4377 struct tg3 *tp = netdev_priv(dev);
4378
4379 prefetch(tp->hw_status);
4380 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4381
4382 if (likely(!tg3_irq_sync(tp)))
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004383 netif_rx_schedule(dev, &tp->napi);
Michael Chanfcfa0a32006-03-20 22:28:41 -08004384
4385 return IRQ_HANDLED;
4386}
4387
Michael Chan88b06bc2005-04-21 17:13:25 -07004388/* MSI ISR - No need to check for interrupt sharing and no need to
4389 * flush status block and interrupt mailbox. PCI ordering rules
4390 * guarantee that MSI will arrive after the status block.
4391 */
David Howells7d12e782006-10-05 14:55:46 +01004392static irqreturn_t tg3_msi(int irq, void *dev_id)
Michael Chan88b06bc2005-04-21 17:13:25 -07004393{
4394 struct net_device *dev = dev_id;
4395 struct tg3 *tp = netdev_priv(dev);
Michael Chan88b06bc2005-04-21 17:13:25 -07004396
Michael Chan61487482005-09-05 17:53:19 -07004397 prefetch(tp->hw_status);
4398 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Michael Chan88b06bc2005-04-21 17:13:25 -07004399 /*
David S. Millerfac9b832005-05-18 22:46:34 -07004400 * Writing any value to intr-mbox-0 clears PCI INTA# and
Michael Chan88b06bc2005-04-21 17:13:25 -07004401 * chip-internal interrupt pending events.
David S. Millerfac9b832005-05-18 22:46:34 -07004402 * Writing non-zero to intr-mbox-0 additional tells the
Michael Chan88b06bc2005-04-21 17:13:25 -07004403 * NIC to stop sending us irqs, engaging "in-intr-handler"
4404 * event coalescing.
4405 */
4406 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chan61487482005-09-05 17:53:19 -07004407 if (likely(!tg3_irq_sync(tp)))
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004408 netif_rx_schedule(dev, &tp->napi);
Michael Chan61487482005-09-05 17:53:19 -07004409
Michael Chan88b06bc2005-04-21 17:13:25 -07004410 return IRQ_RETVAL(1);
4411}
4412
David Howells7d12e782006-10-05 14:55:46 +01004413static irqreturn_t tg3_interrupt(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004414{
4415 struct net_device *dev = dev_id;
4416 struct tg3 *tp = netdev_priv(dev);
4417 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004418 unsigned int handled = 1;
4419
Linus Torvalds1da177e2005-04-16 15:20:36 -07004420 /* In INTx mode, it is possible for the interrupt to arrive at
4421 * the CPU before the status block posted prior to the interrupt.
4422 * Reading the PCI State register will confirm whether the
4423 * interrupt is ours and will flush the status block.
4424 */
Michael Chand18edcb2007-03-24 20:57:11 -07004425 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4426 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4427 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4428 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07004429 goto out;
David S. Millerfac9b832005-05-18 22:46:34 -07004430 }
Michael Chand18edcb2007-03-24 20:57:11 -07004431 }
4432
4433 /*
4434 * Writing any value to intr-mbox-0 clears PCI INTA# and
4435 * chip-internal interrupt pending events.
4436 * Writing non-zero to intr-mbox-0 additional tells the
4437 * NIC to stop sending us irqs, engaging "in-intr-handler"
4438 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07004439 *
4440 * Flush the mailbox to de-assert the IRQ immediately to prevent
4441 * spurious interrupts. The flush impacts performance but
4442 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07004443 */
Michael Chanc04cb342007-05-07 00:26:15 -07004444 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chand18edcb2007-03-24 20:57:11 -07004445 if (tg3_irq_sync(tp))
4446 goto out;
4447 sblk->status &= ~SD_STATUS_UPDATED;
4448 if (likely(tg3_has_work(tp))) {
4449 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004450 netif_rx_schedule(dev, &tp->napi);
Michael Chand18edcb2007-03-24 20:57:11 -07004451 } else {
4452 /* No work, shared interrupt perhaps? re-enable
4453 * interrupts, and flush that PCI write
4454 */
4455 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4456 0x00000000);
David S. Millerfac9b832005-05-18 22:46:34 -07004457 }
David S. Millerf47c11e2005-06-24 20:18:35 -07004458out:
David S. Millerfac9b832005-05-18 22:46:34 -07004459 return IRQ_RETVAL(handled);
4460}
4461
David Howells7d12e782006-10-05 14:55:46 +01004462static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
David S. Millerfac9b832005-05-18 22:46:34 -07004463{
4464 struct net_device *dev = dev_id;
4465 struct tg3 *tp = netdev_priv(dev);
4466 struct tg3_hw_status *sblk = tp->hw_status;
David S. Millerfac9b832005-05-18 22:46:34 -07004467 unsigned int handled = 1;
4468
David S. Millerfac9b832005-05-18 22:46:34 -07004469 /* In INTx mode, it is possible for the interrupt to arrive at
4470 * the CPU before the status block posted prior to the interrupt.
4471 * Reading the PCI State register will confirm whether the
4472 * interrupt is ours and will flush the status block.
4473 */
Michael Chand18edcb2007-03-24 20:57:11 -07004474 if (unlikely(sblk->status_tag == tp->last_tag)) {
4475 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4476 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4477 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07004478 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004479 }
Michael Chand18edcb2007-03-24 20:57:11 -07004480 }
4481
4482 /*
4483 * writing any value to intr-mbox-0 clears PCI INTA# and
4484 * chip-internal interrupt pending events.
4485 * writing non-zero to intr-mbox-0 additional tells the
4486 * NIC to stop sending us irqs, engaging "in-intr-handler"
4487 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07004488 *
4489 * Flush the mailbox to de-assert the IRQ immediately to prevent
4490 * spurious interrupts. The flush impacts performance but
4491 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07004492 */
Michael Chanc04cb342007-05-07 00:26:15 -07004493 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chand18edcb2007-03-24 20:57:11 -07004494 if (tg3_irq_sync(tp))
4495 goto out;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004496 if (netif_rx_schedule_prep(dev, &tp->napi)) {
Michael Chand18edcb2007-03-24 20:57:11 -07004497 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4498 /* Update last_tag to mark that this status has been
4499 * seen. Because interrupt may be shared, we may be
4500 * racing with tg3_poll(), so only update last_tag
4501 * if tg3_poll() is not scheduled.
4502 */
4503 tp->last_tag = sblk->status_tag;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004504 __netif_rx_schedule(dev, &tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004505 }
David S. Millerf47c11e2005-06-24 20:18:35 -07004506out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004507 return IRQ_RETVAL(handled);
4508}
4509
Michael Chan79381092005-04-21 17:13:59 -07004510/* ISR for interrupt test */
David Howells7d12e782006-10-05 14:55:46 +01004511static irqreturn_t tg3_test_isr(int irq, void *dev_id)
Michael Chan79381092005-04-21 17:13:59 -07004512{
4513 struct net_device *dev = dev_id;
4514 struct tg3 *tp = netdev_priv(dev);
4515 struct tg3_hw_status *sblk = tp->hw_status;
4516
Michael Chanf9804dd2005-09-27 12:13:10 -07004517 if ((sblk->status & SD_STATUS_UPDATED) ||
4518 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
Michael Chanb16250e2006-09-27 16:10:14 -07004519 tg3_disable_ints(tp);
Michael Chan79381092005-04-21 17:13:59 -07004520 return IRQ_RETVAL(1);
4521 }
4522 return IRQ_RETVAL(0);
4523}
4524
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07004525static int tg3_init_hw(struct tg3 *, int);
Michael Chan944d9802005-05-29 14:57:48 -07004526static int tg3_halt(struct tg3 *, int, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004527
Michael Chanb9ec6c12006-07-25 16:37:27 -07004528/* Restart hardware after configuration changes, self-test, etc.
4529 * Invoked with tp->lock held.
4530 */
4531static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
Eric Dumazet78c61462008-04-24 23:33:06 -07004532 __releases(tp->lock)
4533 __acquires(tp->lock)
Michael Chanb9ec6c12006-07-25 16:37:27 -07004534{
4535 int err;
4536
4537 err = tg3_init_hw(tp, reset_phy);
4538 if (err) {
4539 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4540 "aborting.\n", tp->dev->name);
4541 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4542 tg3_full_unlock(tp);
4543 del_timer_sync(&tp->timer);
4544 tp->irq_sync = 0;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004545 napi_enable(&tp->napi);
Michael Chanb9ec6c12006-07-25 16:37:27 -07004546 dev_close(tp->dev);
4547 tg3_full_lock(tp, 0);
4548 }
4549 return err;
4550}
4551
Linus Torvalds1da177e2005-04-16 15:20:36 -07004552#ifdef CONFIG_NET_POLL_CONTROLLER
4553static void tg3_poll_controller(struct net_device *dev)
4554{
Michael Chan88b06bc2005-04-21 17:13:25 -07004555 struct tg3 *tp = netdev_priv(dev);
4556
David Howells7d12e782006-10-05 14:55:46 +01004557 tg3_interrupt(tp->pdev->irq, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004558}
4559#endif
4560
David Howellsc4028952006-11-22 14:57:56 +00004561static void tg3_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004562{
David Howellsc4028952006-11-22 14:57:56 +00004563 struct tg3 *tp = container_of(work, struct tg3, reset_task);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004564 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004565 unsigned int restart_timer;
4566
Michael Chan7faa0062006-02-02 17:29:28 -08004567 tg3_full_lock(tp, 0);
Michael Chan7faa0062006-02-02 17:29:28 -08004568
4569 if (!netif_running(tp->dev)) {
Michael Chan7faa0062006-02-02 17:29:28 -08004570 tg3_full_unlock(tp);
4571 return;
4572 }
4573
4574 tg3_full_unlock(tp);
4575
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004576 tg3_phy_stop(tp);
4577
Linus Torvalds1da177e2005-04-16 15:20:36 -07004578 tg3_netif_stop(tp);
4579
David S. Millerf47c11e2005-06-24 20:18:35 -07004580 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004581
4582 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4583 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4584
Michael Chandf3e6542006-05-26 17:48:07 -07004585 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4586 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4587 tp->write32_rx_mbox = tg3_write_flush_reg32;
4588 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4589 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4590 }
4591
Michael Chan944d9802005-05-29 14:57:48 -07004592 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004593 err = tg3_init_hw(tp, 1);
4594 if (err)
Michael Chanb9ec6c12006-07-25 16:37:27 -07004595 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004596
4597 tg3_netif_start(tp);
4598
Linus Torvalds1da177e2005-04-16 15:20:36 -07004599 if (restart_timer)
4600 mod_timer(&tp->timer, jiffies + 1);
Michael Chan7faa0062006-02-02 17:29:28 -08004601
Michael Chanb9ec6c12006-07-25 16:37:27 -07004602out:
Michael Chan7faa0062006-02-02 17:29:28 -08004603 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004604
4605 if (!err)
4606 tg3_phy_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004607}
4608
Michael Chanb0408752007-02-13 12:18:30 -08004609static void tg3_dump_short_state(struct tg3 *tp)
4610{
4611 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4612 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4613 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4614 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4615}
4616
Linus Torvalds1da177e2005-04-16 15:20:36 -07004617static void tg3_tx_timeout(struct net_device *dev)
4618{
4619 struct tg3 *tp = netdev_priv(dev);
4620
Michael Chanb0408752007-02-13 12:18:30 -08004621 if (netif_msg_tx_err(tp)) {
Michael Chan9f88f292006-12-07 00:22:54 -08004622 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4623 dev->name);
Michael Chanb0408752007-02-13 12:18:30 -08004624 tg3_dump_short_state(tp);
4625 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004626
4627 schedule_work(&tp->reset_task);
4628}
4629
Michael Chanc58ec932005-09-17 00:46:27 -07004630/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4631static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4632{
4633 u32 base = (u32) mapping & 0xffffffff;
4634
4635 return ((base > 0xffffdcc0) &&
4636 (base + len + 8 < base));
4637}
4638
Michael Chan72f2afb2006-03-06 19:28:35 -08004639/* Test for DMA addresses > 40-bit */
4640static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4641 int len)
4642{
4643#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
Michael Chan6728a8e2006-03-27 23:16:49 -08004644 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
Michael Chan72f2afb2006-03-06 19:28:35 -08004645 return (((u64) mapping + len) > DMA_40BIT_MASK);
4646 return 0;
4647#else
4648 return 0;
4649#endif
4650}
4651
Linus Torvalds1da177e2005-04-16 15:20:36 -07004652static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4653
Michael Chan72f2afb2006-03-06 19:28:35 -08004654/* Workaround 4GB and 40-bit hardware DMA bugs. */
4655static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
Michael Chanc58ec932005-09-17 00:46:27 -07004656 u32 last_plus_one, u32 *start,
4657 u32 base_flags, u32 mss)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004658{
Matt Carlson41588ba2008-04-19 18:12:33 -07004659 struct sk_buff *new_skb;
Michael Chanc58ec932005-09-17 00:46:27 -07004660 dma_addr_t new_addr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004661 u32 entry = *start;
Michael Chanc58ec932005-09-17 00:46:27 -07004662 int i, ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004663
Matt Carlson41588ba2008-04-19 18:12:33 -07004664 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4665 new_skb = skb_copy(skb, GFP_ATOMIC);
4666 else {
4667 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4668
4669 new_skb = skb_copy_expand(skb,
4670 skb_headroom(skb) + more_headroom,
4671 skb_tailroom(skb), GFP_ATOMIC);
4672 }
4673
Linus Torvalds1da177e2005-04-16 15:20:36 -07004674 if (!new_skb) {
Michael Chanc58ec932005-09-17 00:46:27 -07004675 ret = -1;
4676 } else {
4677 /* New SKB is guaranteed to be linear. */
4678 entry = *start;
David S. Miller90079ce2008-09-11 04:52:51 -07004679 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4680 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4681
Michael Chanc58ec932005-09-17 00:46:27 -07004682 /* Make sure new skb does not cross any 4G boundaries.
4683 * Drop the packet if it does.
4684 */
David S. Miller90079ce2008-09-11 04:52:51 -07004685 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
David S. Miller638266f2008-09-11 15:45:19 -07004686 if (!ret)
4687 skb_dma_unmap(&tp->pdev->dev, new_skb,
4688 DMA_TO_DEVICE);
Michael Chanc58ec932005-09-17 00:46:27 -07004689 ret = -1;
4690 dev_kfree_skb(new_skb);
4691 new_skb = NULL;
4692 } else {
4693 tg3_set_txd(tp, entry, new_addr, new_skb->len,
4694 base_flags, 1 | (mss << 1));
4695 *start = NEXT_TX(entry);
4696 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004697 }
4698
Linus Torvalds1da177e2005-04-16 15:20:36 -07004699 /* Now clean up the sw ring entries. */
4700 i = 0;
4701 while (entry != last_plus_one) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004702 if (i == 0) {
4703 tp->tx_buffers[entry].skb = new_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004704 } else {
4705 tp->tx_buffers[entry].skb = NULL;
4706 }
4707 entry = NEXT_TX(entry);
4708 i++;
4709 }
4710
David S. Miller90079ce2008-09-11 04:52:51 -07004711 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004712 dev_kfree_skb(skb);
4713
Michael Chanc58ec932005-09-17 00:46:27 -07004714 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004715}
4716
4717static void tg3_set_txd(struct tg3 *tp, int entry,
4718 dma_addr_t mapping, int len, u32 flags,
4719 u32 mss_and_is_end)
4720{
4721 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4722 int is_end = (mss_and_is_end & 0x1);
4723 u32 mss = (mss_and_is_end >> 1);
4724 u32 vlan_tag = 0;
4725
4726 if (is_end)
4727 flags |= TXD_FLAG_END;
4728 if (flags & TXD_FLAG_VLAN) {
4729 vlan_tag = flags >> 16;
4730 flags &= 0xffff;
4731 }
4732 vlan_tag |= (mss << TXD_MSS_SHIFT);
4733
4734 txd->addr_hi = ((u64) mapping >> 32);
4735 txd->addr_lo = ((u64) mapping & 0xffffffff);
4736 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4737 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4738}
4739
Michael Chan5a6f3072006-03-20 22:28:05 -08004740/* hard_start_xmit for devices that don't have any bugs and
4741 * support TG3_FLG2_HW_TSO_2 only.
4742 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4744{
4745 struct tg3 *tp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004746 u32 len, entry, base_flags, mss;
David S. Miller90079ce2008-09-11 04:52:51 -07004747 struct skb_shared_info *sp;
4748 dma_addr_t mapping;
Michael Chan5a6f3072006-03-20 22:28:05 -08004749
4750 len = skb_headlen(skb);
4751
Michael Chan00b70502006-06-17 21:58:45 -07004752 /* We are running in BH disabled context with netif_tx_lock
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004753 * and TX reclaim runs via tp->napi.poll inside of a software
Michael Chan5a6f3072006-03-20 22:28:05 -08004754 * interrupt. Furthermore, IRQ processing runs lockless so we have
4755 * no IRQ context deadlocks to worry about either. Rejoice!
4756 */
Michael Chan1b2a7202006-08-07 21:46:02 -07004757 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004758 if (!netif_queue_stopped(dev)) {
4759 netif_stop_queue(dev);
4760
4761 /* This is a hard error, log it. */
4762 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4763 "queue awake!\n", dev->name);
4764 }
Michael Chan5a6f3072006-03-20 22:28:05 -08004765 return NETDEV_TX_BUSY;
4766 }
4767
4768 entry = tp->tx_prod;
4769 base_flags = 0;
Michael Chan5a6f3072006-03-20 22:28:05 -08004770 mss = 0;
Matt Carlsonc13e3712007-05-05 11:50:04 -07004771 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004772 int tcp_opt_len, ip_tcp_len;
4773
4774 if (skb_header_cloned(skb) &&
4775 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4776 dev_kfree_skb(skb);
4777 goto out_unlock;
4778 }
4779
Michael Chanb0026622006-07-03 19:42:14 -07004780 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4781 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4782 else {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004783 struct iphdr *iph = ip_hdr(skb);
4784
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004785 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -03004786 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
Michael Chanb0026622006-07-03 19:42:14 -07004787
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004788 iph->check = 0;
4789 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
Michael Chanb0026622006-07-03 19:42:14 -07004790 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4791 }
Michael Chan5a6f3072006-03-20 22:28:05 -08004792
4793 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4794 TXD_FLAG_CPU_POST_DMA);
4795
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004796 tcp_hdr(skb)->check = 0;
Michael Chan5a6f3072006-03-20 22:28:05 -08004797
Michael Chan5a6f3072006-03-20 22:28:05 -08004798 }
Patrick McHardy84fa7932006-08-29 16:44:56 -07004799 else if (skb->ip_summed == CHECKSUM_PARTIAL)
Michael Chan5a6f3072006-03-20 22:28:05 -08004800 base_flags |= TXD_FLAG_TCPUDP_CSUM;
Michael Chan5a6f3072006-03-20 22:28:05 -08004801#if TG3_VLAN_TAG_USED
4802 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4803 base_flags |= (TXD_FLAG_VLAN |
4804 (vlan_tx_tag_get(skb) << 16));
4805#endif
4806
David S. Miller90079ce2008-09-11 04:52:51 -07004807 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4808 dev_kfree_skb(skb);
4809 goto out_unlock;
4810 }
4811
4812 sp = skb_shinfo(skb);
4813
4814 mapping = sp->dma_maps[0];
Michael Chan5a6f3072006-03-20 22:28:05 -08004815
4816 tp->tx_buffers[entry].skb = skb;
Michael Chan5a6f3072006-03-20 22:28:05 -08004817
4818 tg3_set_txd(tp, entry, mapping, len, base_flags,
4819 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4820
4821 entry = NEXT_TX(entry);
4822
4823 /* Now loop through additional data fragments, and queue them. */
4824 if (skb_shinfo(skb)->nr_frags > 0) {
4825 unsigned int i, last;
4826
4827 last = skb_shinfo(skb)->nr_frags - 1;
4828 for (i = 0; i <= last; i++) {
4829 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4830
4831 len = frag->size;
David S. Miller90079ce2008-09-11 04:52:51 -07004832 mapping = sp->dma_maps[i + 1];
Michael Chan5a6f3072006-03-20 22:28:05 -08004833 tp->tx_buffers[entry].skb = NULL;
Michael Chan5a6f3072006-03-20 22:28:05 -08004834
4835 tg3_set_txd(tp, entry, mapping, len,
4836 base_flags, (i == last) | (mss << 1));
4837
4838 entry = NEXT_TX(entry);
4839 }
4840 }
4841
4842 /* Packets are ready, update Tx producer idx local and on card. */
4843 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4844
4845 tp->tx_prod = entry;
Michael Chan1b2a7202006-08-07 21:46:02 -07004846 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004847 netif_stop_queue(dev);
Ranjit Manomohan42952232006-10-18 20:54:26 -07004848 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
Michael Chan5a6f3072006-03-20 22:28:05 -08004849 netif_wake_queue(tp->dev);
4850 }
4851
4852out_unlock:
4853 mmiowb();
Michael Chan5a6f3072006-03-20 22:28:05 -08004854
4855 dev->trans_start = jiffies;
4856
4857 return NETDEV_TX_OK;
4858}
4859
Michael Chan52c0fd82006-06-29 20:15:54 -07004860static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4861
4862/* Use GSO to workaround a rare TSO bug that may be triggered when the
4863 * TSO header is greater than 80 bytes.
4864 */
4865static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4866{
4867 struct sk_buff *segs, *nskb;
4868
4869 /* Estimate the number of fragments in the worst case */
Michael Chan1b2a7202006-08-07 21:46:02 -07004870 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
Michael Chan52c0fd82006-06-29 20:15:54 -07004871 netif_stop_queue(tp->dev);
Michael Chan7f62ad52007-02-20 23:25:40 -08004872 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4873 return NETDEV_TX_BUSY;
4874
4875 netif_wake_queue(tp->dev);
Michael Chan52c0fd82006-06-29 20:15:54 -07004876 }
4877
4878 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07004879 if (IS_ERR(segs))
Michael Chan52c0fd82006-06-29 20:15:54 -07004880 goto tg3_tso_bug_end;
4881
4882 do {
4883 nskb = segs;
4884 segs = segs->next;
4885 nskb->next = NULL;
4886 tg3_start_xmit_dma_bug(nskb, tp->dev);
4887 } while (segs);
4888
4889tg3_tso_bug_end:
4890 dev_kfree_skb(skb);
4891
4892 return NETDEV_TX_OK;
4893}
Michael Chan52c0fd82006-06-29 20:15:54 -07004894
Michael Chan5a6f3072006-03-20 22:28:05 -08004895/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4896 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4897 */
4898static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4899{
4900 struct tg3 *tp = netdev_priv(dev);
Michael Chan5a6f3072006-03-20 22:28:05 -08004901 u32 len, entry, base_flags, mss;
David S. Miller90079ce2008-09-11 04:52:51 -07004902 struct skb_shared_info *sp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004903 int would_hit_hwbug;
David S. Miller90079ce2008-09-11 04:52:51 -07004904 dma_addr_t mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004905
4906 len = skb_headlen(skb);
4907
Michael Chan00b70502006-06-17 21:58:45 -07004908 /* We are running in BH disabled context with netif_tx_lock
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004909 * and TX reclaim runs via tp->napi.poll inside of a software
David S. Millerf47c11e2005-06-24 20:18:35 -07004910 * interrupt. Furthermore, IRQ processing runs lockless so we have
4911 * no IRQ context deadlocks to worry about either. Rejoice!
Linus Torvalds1da177e2005-04-16 15:20:36 -07004912 */
Michael Chan1b2a7202006-08-07 21:46:02 -07004913 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
Stephen Hemminger1f064a82005-12-06 17:36:44 -08004914 if (!netif_queue_stopped(dev)) {
4915 netif_stop_queue(dev);
4916
4917 /* This is a hard error, log it. */
4918 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4919 "queue awake!\n", dev->name);
4920 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004921 return NETDEV_TX_BUSY;
4922 }
4923
4924 entry = tp->tx_prod;
4925 base_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07004926 if (skb->ip_summed == CHECKSUM_PARTIAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004927 base_flags |= TXD_FLAG_TCPUDP_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004928 mss = 0;
Matt Carlsonc13e3712007-05-05 11:50:04 -07004929 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004930 struct iphdr *iph;
Michael Chan52c0fd82006-06-29 20:15:54 -07004931 int tcp_opt_len, ip_tcp_len, hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004932
4933 if (skb_header_cloned(skb) &&
4934 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4935 dev_kfree_skb(skb);
4936 goto out_unlock;
4937 }
4938
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004939 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -03004940 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004941
Michael Chan52c0fd82006-06-29 20:15:54 -07004942 hdr_len = ip_tcp_len + tcp_opt_len;
4943 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
Michael Chan7f62ad52007-02-20 23:25:40 -08004944 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
Michael Chan52c0fd82006-06-29 20:15:54 -07004945 return (tg3_tso_bug(tp, skb));
4946
Linus Torvalds1da177e2005-04-16 15:20:36 -07004947 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4948 TXD_FLAG_CPU_POST_DMA);
4949
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004950 iph = ip_hdr(skb);
4951 iph->check = 0;
4952 iph->tot_len = htons(mss + hdr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004953 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004954 tcp_hdr(skb)->check = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004955 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004956 } else
4957 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4958 iph->daddr, 0,
4959 IPPROTO_TCP,
4960 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004961
4962 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4963 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004964 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004965 int tsflags;
4966
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004967 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004968 mss |= (tsflags << 11);
4969 }
4970 } else {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004971 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004972 int tsflags;
4973
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004974 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004975 base_flags |= tsflags << 12;
4976 }
4977 }
4978 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004979#if TG3_VLAN_TAG_USED
4980 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4981 base_flags |= (TXD_FLAG_VLAN |
4982 (vlan_tx_tag_get(skb) << 16));
4983#endif
4984
David S. Miller90079ce2008-09-11 04:52:51 -07004985 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4986 dev_kfree_skb(skb);
4987 goto out_unlock;
4988 }
4989
4990 sp = skb_shinfo(skb);
4991
4992 mapping = sp->dma_maps[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004993
4994 tp->tx_buffers[entry].skb = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004995
4996 would_hit_hwbug = 0;
4997
Matt Carlson41588ba2008-04-19 18:12:33 -07004998 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4999 would_hit_hwbug = 1;
5000 else if (tg3_4g_overflow_test(mapping, len))
Michael Chanc58ec932005-09-17 00:46:27 -07005001 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005002
5003 tg3_set_txd(tp, entry, mapping, len, base_flags,
5004 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5005
5006 entry = NEXT_TX(entry);
5007
5008 /* Now loop through additional data fragments, and queue them. */
5009 if (skb_shinfo(skb)->nr_frags > 0) {
5010 unsigned int i, last;
5011
5012 last = skb_shinfo(skb)->nr_frags - 1;
5013 for (i = 0; i <= last; i++) {
5014 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5015
5016 len = frag->size;
David S. Miller90079ce2008-09-11 04:52:51 -07005017 mapping = sp->dma_maps[i + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07005018
5019 tp->tx_buffers[entry].skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005020
Michael Chanc58ec932005-09-17 00:46:27 -07005021 if (tg3_4g_overflow_test(mapping, len))
5022 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005023
Michael Chan72f2afb2006-03-06 19:28:35 -08005024 if (tg3_40bit_overflow_test(tp, mapping, len))
5025 would_hit_hwbug = 1;
5026
Linus Torvalds1da177e2005-04-16 15:20:36 -07005027 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5028 tg3_set_txd(tp, entry, mapping, len,
5029 base_flags, (i == last)|(mss << 1));
5030 else
5031 tg3_set_txd(tp, entry, mapping, len,
5032 base_flags, (i == last));
5033
5034 entry = NEXT_TX(entry);
5035 }
5036 }
5037
5038 if (would_hit_hwbug) {
5039 u32 last_plus_one = entry;
5040 u32 start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005041
Michael Chanc58ec932005-09-17 00:46:27 -07005042 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5043 start &= (TG3_TX_RING_SIZE - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005044
5045 /* If the workaround fails due to memory/mapping
5046 * failure, silently drop this packet.
5047 */
Michael Chan72f2afb2006-03-06 19:28:35 -08005048 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
Michael Chanc58ec932005-09-17 00:46:27 -07005049 &start, base_flags, mss))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005050 goto out_unlock;
5051
5052 entry = start;
5053 }
5054
5055 /* Packets are ready, update Tx producer idx local and on card. */
5056 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5057
5058 tp->tx_prod = entry;
Michael Chan1b2a7202006-08-07 21:46:02 -07005059 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005060 netif_stop_queue(dev);
Ranjit Manomohan42952232006-10-18 20:54:26 -07005061 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
Michael Chan51b91462005-09-01 17:41:28 -07005062 netif_wake_queue(tp->dev);
5063 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005064
5065out_unlock:
5066 mmiowb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005067
5068 dev->trans_start = jiffies;
5069
5070 return NETDEV_TX_OK;
5071}
5072
5073static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5074 int new_mtu)
5075{
5076 dev->mtu = new_mtu;
5077
Michael Chanef7f5ec2005-07-25 12:32:25 -07005078 if (new_mtu > ETH_DATA_LEN) {
Michael Chana4e2b342005-10-26 15:46:52 -07005079 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chanef7f5ec2005-07-25 12:32:25 -07005080 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5081 ethtool_op_set_tso(dev, 0);
5082 }
5083 else
5084 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5085 } else {
Michael Chana4e2b342005-10-26 15:46:52 -07005086 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chanef7f5ec2005-07-25 12:32:25 -07005087 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
Michael Chan0f893dc2005-07-25 12:30:38 -07005088 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
Michael Chanef7f5ec2005-07-25 12:32:25 -07005089 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005090}
5091
5092static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5093{
5094 struct tg3 *tp = netdev_priv(dev);
Michael Chanb9ec6c12006-07-25 16:37:27 -07005095 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005096
5097 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5098 return -EINVAL;
5099
5100 if (!netif_running(dev)) {
5101 /* We'll just catch it later when the
5102 * device is up'd.
5103 */
5104 tg3_set_mtu(dev, tp, new_mtu);
5105 return 0;
5106 }
5107
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07005108 tg3_phy_stop(tp);
5109
Linus Torvalds1da177e2005-04-16 15:20:36 -07005110 tg3_netif_stop(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07005111
5112 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005113
Michael Chan944d9802005-05-29 14:57:48 -07005114 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005115
5116 tg3_set_mtu(dev, tp, new_mtu);
5117
Michael Chanb9ec6c12006-07-25 16:37:27 -07005118 err = tg3_restart_hw(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005119
Michael Chanb9ec6c12006-07-25 16:37:27 -07005120 if (!err)
5121 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005122
David S. Millerf47c11e2005-06-24 20:18:35 -07005123 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005124
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07005125 if (!err)
5126 tg3_phy_start(tp);
5127
Michael Chanb9ec6c12006-07-25 16:37:27 -07005128 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005129}
5130
5131/* Free up pending packets in all rx/tx rings.
5132 *
5133 * The chip has been shut down and the driver detached from
5134 * the networking, so no interrupts or new tx packets will
5135 * end up in the driver. tp->{tx,}lock is not held and we are not
5136 * in an interrupt context and thus may sleep.
5137 */
5138static void tg3_free_rings(struct tg3 *tp)
5139{
5140 struct ring_info *rxp;
5141 int i;
5142
5143 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5144 rxp = &tp->rx_std_buffers[i];
5145
5146 if (rxp->skb == NULL)
5147 continue;
5148 pci_unmap_single(tp->pdev,
5149 pci_unmap_addr(rxp, mapping),
Michael Chan7e72aad2005-07-25 12:31:17 -07005150 tp->rx_pkt_buf_sz - tp->rx_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005151 PCI_DMA_FROMDEVICE);
5152 dev_kfree_skb_any(rxp->skb);
5153 rxp->skb = NULL;
5154 }
5155
5156 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5157 rxp = &tp->rx_jumbo_buffers[i];
5158
5159 if (rxp->skb == NULL)
5160 continue;
5161 pci_unmap_single(tp->pdev,
5162 pci_unmap_addr(rxp, mapping),
5163 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5164 PCI_DMA_FROMDEVICE);
5165 dev_kfree_skb_any(rxp->skb);
5166 rxp->skb = NULL;
5167 }
5168
5169 for (i = 0; i < TG3_TX_RING_SIZE; ) {
5170 struct tx_ring_info *txp;
5171 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005172
5173 txp = &tp->tx_buffers[i];
5174 skb = txp->skb;
5175
5176 if (skb == NULL) {
5177 i++;
5178 continue;
5179 }
5180
David S. Miller90079ce2008-09-11 04:52:51 -07005181 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5182
Linus Torvalds1da177e2005-04-16 15:20:36 -07005183 txp->skb = NULL;
5184
David S. Miller90079ce2008-09-11 04:52:51 -07005185 i += skb_shinfo(skb)->nr_frags + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005186
5187 dev_kfree_skb_any(skb);
5188 }
5189}
5190
5191/* Initialize tx/rx rings for packet processing.
5192 *
5193 * The chip has been shut down and the driver detached from
5194 * the networking, so no interrupts or new tx packets will
5195 * end up in the driver. tp->{tx,}lock are held and thus
5196 * we may not sleep.
5197 */
Michael Chan32d8c572006-07-25 16:38:29 -07005198static int tg3_init_rings(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005199{
5200 u32 i;
5201
5202 /* Free up all the SKBs. */
5203 tg3_free_rings(tp);
5204
5205 /* Zero out all descriptors. */
5206 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5207 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5208 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5209 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5210
Michael Chan7e72aad2005-07-25 12:31:17 -07005211 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
Michael Chana4e2b342005-10-26 15:46:52 -07005212 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
Michael Chan7e72aad2005-07-25 12:31:17 -07005213 (tp->dev->mtu > ETH_DATA_LEN))
5214 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5215
Linus Torvalds1da177e2005-04-16 15:20:36 -07005216 /* Initialize invariants of the rings, we only set this
5217 * stuff once. This works because the card does not
5218 * write into the rx buffer posting rings.
5219 */
5220 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5221 struct tg3_rx_buffer_desc *rxd;
5222
5223 rxd = &tp->rx_std[i];
Michael Chan7e72aad2005-07-25 12:31:17 -07005224 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005225 << RXD_LEN_SHIFT;
5226 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5227 rxd->opaque = (RXD_OPAQUE_RING_STD |
5228 (i << RXD_OPAQUE_INDEX_SHIFT));
5229 }
5230
Michael Chan0f893dc2005-07-25 12:30:38 -07005231 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005232 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5233 struct tg3_rx_buffer_desc *rxd;
5234
5235 rxd = &tp->rx_jumbo[i];
5236 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5237 << RXD_LEN_SHIFT;
5238 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5239 RXD_FLAG_JUMBO;
5240 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5241 (i << RXD_OPAQUE_INDEX_SHIFT));
5242 }
5243 }
5244
5245 /* Now allocate fresh SKBs for each rx ring. */
5246 for (i = 0; i < tp->rx_pending; i++) {
Michael Chan32d8c572006-07-25 16:38:29 -07005247 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5248 printk(KERN_WARNING PFX
5249 "%s: Using a smaller RX standard ring, "
5250 "only %d out of %d buffers were allocated "
5251 "successfully.\n",
5252 tp->dev->name, i, tp->rx_pending);
5253 if (i == 0)
5254 return -ENOMEM;
5255 tp->rx_pending = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005256 break;
Michael Chan32d8c572006-07-25 16:38:29 -07005257 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005258 }
5259
Michael Chan0f893dc2005-07-25 12:30:38 -07005260 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005261 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5262 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
Michael Chan32d8c572006-07-25 16:38:29 -07005263 -1, i) < 0) {
5264 printk(KERN_WARNING PFX
5265 "%s: Using a smaller RX jumbo ring, "
5266 "only %d out of %d buffers were "
5267 "allocated successfully.\n",
5268 tp->dev->name, i, tp->rx_jumbo_pending);
5269 if (i == 0) {
5270 tg3_free_rings(tp);
5271 return -ENOMEM;
5272 }
5273 tp->rx_jumbo_pending = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005274 break;
Michael Chan32d8c572006-07-25 16:38:29 -07005275 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005276 }
5277 }
Michael Chan32d8c572006-07-25 16:38:29 -07005278 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005279}
5280
5281/*
5282 * Must not be invoked with interrupt sources disabled and
5283 * the hardware shutdown down.
5284 */
5285static void tg3_free_consistent(struct tg3 *tp)
5286{
Jesper Juhlb4558ea2005-10-28 16:53:13 -04005287 kfree(tp->rx_std_buffers);
5288 tp->rx_std_buffers = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005289 if (tp->rx_std) {
5290 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5291 tp->rx_std, tp->rx_std_mapping);
5292 tp->rx_std = NULL;
5293 }
5294 if (tp->rx_jumbo) {
5295 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5296 tp->rx_jumbo, tp->rx_jumbo_mapping);
5297 tp->rx_jumbo = NULL;
5298 }
5299 if (tp->rx_rcb) {
5300 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5301 tp->rx_rcb, tp->rx_rcb_mapping);
5302 tp->rx_rcb = NULL;
5303 }
5304 if (tp->tx_ring) {
5305 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5306 tp->tx_ring, tp->tx_desc_mapping);
5307 tp->tx_ring = NULL;
5308 }
5309 if (tp->hw_status) {
5310 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5311 tp->hw_status, tp->status_mapping);
5312 tp->hw_status = NULL;
5313 }
5314 if (tp->hw_stats) {
5315 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5316 tp->hw_stats, tp->stats_mapping);
5317 tp->hw_stats = NULL;
5318 }
5319}
5320
5321/*
5322 * Must not be invoked with interrupt sources disabled and
5323 * the hardware shutdown down. Can sleep.
5324 */
5325static int tg3_alloc_consistent(struct tg3 *tp)
5326{
Yan Burmanbd2b3342006-12-14 15:25:00 -08005327 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005328 (TG3_RX_RING_SIZE +
5329 TG3_RX_JUMBO_RING_SIZE)) +
5330 (sizeof(struct tx_ring_info) *
5331 TG3_TX_RING_SIZE),
5332 GFP_KERNEL);
5333 if (!tp->rx_std_buffers)
5334 return -ENOMEM;
5335
Linus Torvalds1da177e2005-04-16 15:20:36 -07005336 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5337 tp->tx_buffers = (struct tx_ring_info *)
5338 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5339
5340 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5341 &tp->rx_std_mapping);
5342 if (!tp->rx_std)
5343 goto err_out;
5344
5345 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5346 &tp->rx_jumbo_mapping);
5347
5348 if (!tp->rx_jumbo)
5349 goto err_out;
5350
5351 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5352 &tp->rx_rcb_mapping);
5353 if (!tp->rx_rcb)
5354 goto err_out;
5355
5356 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5357 &tp->tx_desc_mapping);
5358 if (!tp->tx_ring)
5359 goto err_out;
5360
5361 tp->hw_status = pci_alloc_consistent(tp->pdev,
5362 TG3_HW_STATUS_SIZE,
5363 &tp->status_mapping);
5364 if (!tp->hw_status)
5365 goto err_out;
5366
5367 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5368 sizeof(struct tg3_hw_stats),
5369 &tp->stats_mapping);
5370 if (!tp->hw_stats)
5371 goto err_out;
5372
5373 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5374 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5375
5376 return 0;
5377
5378err_out:
5379 tg3_free_consistent(tp);
5380 return -ENOMEM;
5381}
5382
5383#define MAX_WAIT_CNT 1000
5384
5385/* To stop a block, clear the enable bit and poll till it
5386 * clears. tp->lock is held.
5387 */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005388static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005389{
5390 unsigned int i;
5391 u32 val;
5392
5393 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5394 switch (ofs) {
5395 case RCVLSC_MODE:
5396 case DMAC_MODE:
5397 case MBFREE_MODE:
5398 case BUFMGR_MODE:
5399 case MEMARB_MODE:
5400 /* We can't enable/disable these bits of the
5401 * 5705/5750, just say success.
5402 */
5403 return 0;
5404
5405 default:
5406 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005407 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005408 }
5409
5410 val = tr32(ofs);
5411 val &= ~enable_bit;
5412 tw32_f(ofs, val);
5413
5414 for (i = 0; i < MAX_WAIT_CNT; i++) {
5415 udelay(100);
5416 val = tr32(ofs);
5417 if ((val & enable_bit) == 0)
5418 break;
5419 }
5420
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005421 if (i == MAX_WAIT_CNT && !silent) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005422 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5423 "ofs=%lx enable_bit=%x\n",
5424 ofs, enable_bit);
5425 return -ENODEV;
5426 }
5427
5428 return 0;
5429}
5430
5431/* tp->lock is held. */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005432static int tg3_abort_hw(struct tg3 *tp, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005433{
5434 int i, err;
5435
5436 tg3_disable_ints(tp);
5437
5438 tp->rx_mode &= ~RX_MODE_ENABLE;
5439 tw32_f(MAC_RX_MODE, tp->rx_mode);
5440 udelay(10);
5441
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005442 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5443 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5444 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5445 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5446 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5447 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005448
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005449 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5450 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5451 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5452 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5453 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5454 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5455 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005456
5457 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5458 tw32_f(MAC_MODE, tp->mac_mode);
5459 udelay(40);
5460
5461 tp->tx_mode &= ~TX_MODE_ENABLE;
5462 tw32_f(MAC_TX_MODE, tp->tx_mode);
5463
5464 for (i = 0; i < MAX_WAIT_CNT; i++) {
5465 udelay(100);
5466 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5467 break;
5468 }
5469 if (i >= MAX_WAIT_CNT) {
5470 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5471 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5472 tp->dev->name, tr32(MAC_TX_MODE));
Michael Chane6de8ad2005-05-05 14:42:41 -07005473 err |= -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005474 }
5475
Michael Chane6de8ad2005-05-05 14:42:41 -07005476 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005477 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5478 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005479
5480 tw32(FTQ_RESET, 0xffffffff);
5481 tw32(FTQ_RESET, 0x00000000);
5482
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005483 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5484 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005485
5486 if (tp->hw_status)
5487 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5488 if (tp->hw_stats)
5489 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5490
Linus Torvalds1da177e2005-04-16 15:20:36 -07005491 return err;
5492}
5493
5494/* tp->lock is held. */
5495static int tg3_nvram_lock(struct tg3 *tp)
5496{
5497 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5498 int i;
5499
Michael Chanec41c7d2006-01-17 02:40:55 -08005500 if (tp->nvram_lock_cnt == 0) {
5501 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5502 for (i = 0; i < 8000; i++) {
5503 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5504 break;
5505 udelay(20);
5506 }
5507 if (i == 8000) {
5508 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5509 return -ENODEV;
5510 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005511 }
Michael Chanec41c7d2006-01-17 02:40:55 -08005512 tp->nvram_lock_cnt++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005513 }
5514 return 0;
5515}
5516
5517/* tp->lock is held. */
5518static void tg3_nvram_unlock(struct tg3 *tp)
5519{
Michael Chanec41c7d2006-01-17 02:40:55 -08005520 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5521 if (tp->nvram_lock_cnt > 0)
5522 tp->nvram_lock_cnt--;
5523 if (tp->nvram_lock_cnt == 0)
5524 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5525 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005526}
5527
5528/* tp->lock is held. */
Michael Chane6af3012005-04-21 17:12:05 -07005529static void tg3_enable_nvram_access(struct tg3 *tp)
5530{
5531 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5532 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5533 u32 nvaccess = tr32(NVRAM_ACCESS);
5534
5535 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5536 }
5537}
5538
5539/* tp->lock is held. */
5540static void tg3_disable_nvram_access(struct tg3 *tp)
5541{
5542 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5543 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5544 u32 nvaccess = tr32(NVRAM_ACCESS);
5545
5546 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5547 }
5548}
5549
Matt Carlson0d3031d2007-10-10 18:02:43 -07005550static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5551{
5552 int i;
5553 u32 apedata;
5554
5555 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5556 if (apedata != APE_SEG_SIG_MAGIC)
5557 return;
5558
5559 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
Matt Carlson731fd792008-08-15 14:07:51 -07005560 if (!(apedata & APE_FW_STATUS_READY))
Matt Carlson0d3031d2007-10-10 18:02:43 -07005561 return;
5562
5563 /* Wait for up to 1 millisecond for APE to service previous event. */
5564 for (i = 0; i < 10; i++) {
5565 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5566 return;
5567
5568 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5569
5570 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5571 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5572 event | APE_EVENT_STATUS_EVENT_PENDING);
5573
5574 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5575
5576 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5577 break;
5578
5579 udelay(100);
5580 }
5581
5582 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5583 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5584}
5585
5586static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5587{
5588 u32 event;
5589 u32 apedata;
5590
5591 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5592 return;
5593
5594 switch (kind) {
5595 case RESET_KIND_INIT:
5596 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5597 APE_HOST_SEG_SIG_MAGIC);
5598 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5599 APE_HOST_SEG_LEN_MAGIC);
5600 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5601 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5602 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5603 APE_HOST_DRIVER_ID_MAGIC);
5604 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5605 APE_HOST_BEHAV_NO_PHYLOCK);
5606
5607 event = APE_EVENT_STATUS_STATE_START;
5608 break;
5609 case RESET_KIND_SHUTDOWN:
Matt Carlsonb2aee152008-11-03 16:51:11 -08005610 /* With the interface we are currently using,
5611 * APE does not track driver state. Wiping
5612 * out the HOST SEGMENT SIGNATURE forces
5613 * the APE to assume OS absent status.
5614 */
5615 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
5616
Matt Carlson0d3031d2007-10-10 18:02:43 -07005617 event = APE_EVENT_STATUS_STATE_UNLOAD;
5618 break;
5619 case RESET_KIND_SUSPEND:
5620 event = APE_EVENT_STATUS_STATE_SUSPEND;
5621 break;
5622 default:
5623 return;
5624 }
5625
5626 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5627
5628 tg3_ape_send_event(tp, event);
5629}
5630
Michael Chane6af3012005-04-21 17:12:05 -07005631/* tp->lock is held. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005632static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5633{
David S. Millerf49639e2006-06-09 11:58:36 -07005634 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5635 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005636
5637 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5638 switch (kind) {
5639 case RESET_KIND_INIT:
5640 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5641 DRV_STATE_START);
5642 break;
5643
5644 case RESET_KIND_SHUTDOWN:
5645 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5646 DRV_STATE_UNLOAD);
5647 break;
5648
5649 case RESET_KIND_SUSPEND:
5650 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5651 DRV_STATE_SUSPEND);
5652 break;
5653
5654 default:
5655 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005656 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005657 }
Matt Carlson0d3031d2007-10-10 18:02:43 -07005658
5659 if (kind == RESET_KIND_INIT ||
5660 kind == RESET_KIND_SUSPEND)
5661 tg3_ape_driver_state_change(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005662}
5663
5664/* tp->lock is held. */
5665static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5666{
5667 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5668 switch (kind) {
5669 case RESET_KIND_INIT:
5670 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5671 DRV_STATE_START_DONE);
5672 break;
5673
5674 case RESET_KIND_SHUTDOWN:
5675 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5676 DRV_STATE_UNLOAD_DONE);
5677 break;
5678
5679 default:
5680 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005681 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005682 }
Matt Carlson0d3031d2007-10-10 18:02:43 -07005683
5684 if (kind == RESET_KIND_SHUTDOWN)
5685 tg3_ape_driver_state_change(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005686}
5687
5688/* tp->lock is held. */
5689static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5690{
5691 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5692 switch (kind) {
5693 case RESET_KIND_INIT:
5694 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5695 DRV_STATE_START);
5696 break;
5697
5698 case RESET_KIND_SHUTDOWN:
5699 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5700 DRV_STATE_UNLOAD);
5701 break;
5702
5703 case RESET_KIND_SUSPEND:
5704 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5705 DRV_STATE_SUSPEND);
5706 break;
5707
5708 default:
5709 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005710 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005711 }
5712}
5713
Michael Chan7a6f4362006-09-27 16:03:31 -07005714static int tg3_poll_fw(struct tg3 *tp)
5715{
5716 int i;
5717 u32 val;
5718
Michael Chanb5d37722006-09-27 16:06:21 -07005719 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Gary Zambrano0ccead12006-11-14 16:34:00 -08005720 /* Wait up to 20ms for init done. */
5721 for (i = 0; i < 200; i++) {
Michael Chanb5d37722006-09-27 16:06:21 -07005722 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5723 return 0;
Gary Zambrano0ccead12006-11-14 16:34:00 -08005724 udelay(100);
Michael Chanb5d37722006-09-27 16:06:21 -07005725 }
5726 return -ENODEV;
5727 }
5728
Michael Chan7a6f4362006-09-27 16:03:31 -07005729 /* Wait for firmware initialization to complete. */
5730 for (i = 0; i < 100000; i++) {
5731 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5732 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5733 break;
5734 udelay(10);
5735 }
5736
5737 /* Chip might not be fitted with firmware. Some Sun onboard
5738 * parts are configured like that. So don't signal the timeout
5739 * of the above loop as an error, but do report the lack of
5740 * running firmware once.
5741 */
5742 if (i >= 100000 &&
5743 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5744 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5745
5746 printk(KERN_INFO PFX "%s: No firmware running.\n",
5747 tp->dev->name);
5748 }
5749
5750 return 0;
5751}
5752
Michael Chanee6a99b2007-07-18 21:49:10 -07005753/* Save PCI command register before chip reset */
5754static void tg3_save_pci_state(struct tg3 *tp)
5755{
Matt Carlson8a6eac92007-10-21 16:17:55 -07005756 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07005757}
5758
5759/* Restore PCI state after chip reset */
5760static void tg3_restore_pci_state(struct tg3 *tp)
5761{
5762 u32 val;
5763
5764 /* Re-enable indirect register accesses. */
5765 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5766 tp->misc_host_ctrl);
5767
5768 /* Set MAX PCI retry to zero. */
5769 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5770 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5771 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5772 val |= PCISTATE_RETRY_SAME_DMA;
Matt Carlson0d3031d2007-10-10 18:02:43 -07005773 /* Allow reads and writes to the APE register and memory space. */
5774 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5775 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5776 PCISTATE_ALLOW_APE_SHMEM_WR;
Michael Chanee6a99b2007-07-18 21:49:10 -07005777 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5778
Matt Carlson8a6eac92007-10-21 16:17:55 -07005779 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07005780
Matt Carlson5f5c51e2007-11-12 21:19:37 -08005781 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5782 pcie_set_readrq(tp->pdev, 4096);
5783 else {
Michael Chan114342f2007-10-15 02:12:26 -07005784 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5785 tp->pci_cacheline_sz);
5786 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5787 tp->pci_lat_timer);
5788 }
Matt Carlson5f5c51e2007-11-12 21:19:37 -08005789
Michael Chanee6a99b2007-07-18 21:49:10 -07005790 /* Make sure PCI-X relaxed ordering bit is clear. */
Matt Carlson9974a352007-10-07 23:27:28 -07005791 if (tp->pcix_cap) {
5792 u16 pcix_cmd;
5793
5794 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5795 &pcix_cmd);
5796 pcix_cmd &= ~PCI_X_CMD_ERO;
5797 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5798 pcix_cmd);
5799 }
Michael Chanee6a99b2007-07-18 21:49:10 -07005800
5801 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chanee6a99b2007-07-18 21:49:10 -07005802
5803 /* Chip reset on 5780 will reset MSI enable bit,
5804 * so need to restore it.
5805 */
5806 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5807 u16 ctrl;
5808
5809 pci_read_config_word(tp->pdev,
5810 tp->msi_cap + PCI_MSI_FLAGS,
5811 &ctrl);
5812 pci_write_config_word(tp->pdev,
5813 tp->msi_cap + PCI_MSI_FLAGS,
5814 ctrl | PCI_MSI_FLAGS_ENABLE);
5815 val = tr32(MSGINT_MODE);
5816 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5817 }
5818 }
5819}
5820
Linus Torvalds1da177e2005-04-16 15:20:36 -07005821static void tg3_stop_fw(struct tg3 *);
5822
5823/* tp->lock is held. */
5824static int tg3_chip_reset(struct tg3 *tp)
5825{
5826 u32 val;
Michael Chan1ee582d2005-08-09 20:16:46 -07005827 void (*write_op)(struct tg3 *, u32, u32);
Michael Chan7a6f4362006-09-27 16:03:31 -07005828 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005829
David S. Millerf49639e2006-06-09 11:58:36 -07005830 tg3_nvram_lock(tp);
5831
Matt Carlson158d7ab2008-05-29 01:37:54 -07005832 tg3_mdio_stop(tp);
5833
Matt Carlson77b483f2008-08-15 14:07:24 -07005834 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5835
David S. Millerf49639e2006-06-09 11:58:36 -07005836 /* No matching tg3_nvram_unlock() after this because
5837 * chip reset below will undo the nvram lock.
5838 */
5839 tp->nvram_lock_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005840
Michael Chanee6a99b2007-07-18 21:49:10 -07005841 /* GRC_MISC_CFG core clock reset will clear the memory
5842 * enable bit in PCI register 4 and the MSI enable bit
5843 * on some chips, so we save relevant registers here.
5844 */
5845 tg3_save_pci_state(tp);
5846
Michael Chand9ab5ad2006-03-20 22:27:35 -08005847 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -08005848 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07005849 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07005850 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -07005851 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5852 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chand9ab5ad2006-03-20 22:27:35 -08005853 tw32(GRC_FASTBOOT_PC, 0);
5854
Linus Torvalds1da177e2005-04-16 15:20:36 -07005855 /*
5856 * We must avoid the readl() that normally takes place.
5857 * It locks machines, causes machine checks, and other
5858 * fun things. So, temporarily disable the 5701
5859 * hardware workaround, while we do the reset.
5860 */
Michael Chan1ee582d2005-08-09 20:16:46 -07005861 write_op = tp->write32;
5862 if (write_op == tg3_write_flush_reg32)
5863 tp->write32 = tg3_write32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005864
Michael Chand18edcb2007-03-24 20:57:11 -07005865 /* Prevent the irq handler from reading or writing PCI registers
5866 * during chip reset when the memory enable bit in the PCI command
5867 * register may be cleared. The chip does not generate interrupt
5868 * at this time, but the irq handler may still be called due to irq
5869 * sharing or irqpoll.
5870 */
5871 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
Michael Chanb8fa2f32007-04-06 17:35:37 -07005872 if (tp->hw_status) {
5873 tp->hw_status->status = 0;
5874 tp->hw_status->status_tag = 0;
5875 }
Michael Chand18edcb2007-03-24 20:57:11 -07005876 tp->last_tag = 0;
5877 smp_mb();
5878 synchronize_irq(tp->pdev->irq);
5879
Linus Torvalds1da177e2005-04-16 15:20:36 -07005880 /* do the reset */
5881 val = GRC_MISC_CFG_CORECLK_RESET;
5882
5883 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5884 if (tr32(0x7e2c) == 0x60) {
5885 tw32(0x7e2c, 0x20);
5886 }
5887 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5888 tw32(GRC_MISC_CFG, (1 << 29));
5889 val |= (1 << 29);
5890 }
5891 }
5892
Michael Chanb5d37722006-09-27 16:06:21 -07005893 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5894 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5895 tw32(GRC_VCPU_EXT_CTRL,
5896 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5897 }
5898
Linus Torvalds1da177e2005-04-16 15:20:36 -07005899 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5900 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5901 tw32(GRC_MISC_CFG, val);
5902
Michael Chan1ee582d2005-08-09 20:16:46 -07005903 /* restore 5701 hardware bug workaround write method */
5904 tp->write32 = write_op;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005905
5906 /* Unfortunately, we have to delay before the PCI read back.
5907 * Some 575X chips even will not respond to a PCI cfg access
5908 * when the reset command is given to the chip.
5909 *
5910 * How do these hardware designers expect things to work
5911 * properly if the PCI write is posted for a long period
5912 * of time? It is always necessary to have some method by
5913 * which a register read back can occur to push the write
5914 * out which does the reset.
5915 *
5916 * For most tg3 variants the trick below was working.
5917 * Ho hum...
5918 */
5919 udelay(120);
5920
5921 /* Flush PCI posted writes. The normal MMIO registers
5922 * are inaccessible at this time so this is the only
5923 * way to make this reliably (actually, this is no longer
5924 * the case, see above). I tried to use indirect
5925 * register read/write but this upset some 5701 variants.
5926 */
5927 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5928
5929 udelay(120);
5930
5931 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5932 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5933 int i;
5934 u32 cfg_val;
5935
5936 /* Wait for link training to complete. */
5937 for (i = 0; i < 5000; i++)
5938 udelay(100);
5939
5940 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5941 pci_write_config_dword(tp->pdev, 0xc4,
5942 cfg_val | (1 << 15));
5943 }
5944 /* Set PCIE max payload size and clear error status. */
5945 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5946 }
5947
Michael Chanee6a99b2007-07-18 21:49:10 -07005948 tg3_restore_pci_state(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005949
Michael Chand18edcb2007-03-24 20:57:11 -07005950 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5951
Michael Chanee6a99b2007-07-18 21:49:10 -07005952 val = 0;
5953 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan4cf78e42005-07-25 12:29:19 -07005954 val = tr32(MEMARB_MODE);
Michael Chanee6a99b2007-07-18 21:49:10 -07005955 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005956
5957 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5958 tg3_stop_fw(tp);
5959 tw32(0x5000, 0x400);
5960 }
5961
5962 tw32(GRC_MODE, tp->grc_mode);
5963
5964 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01005965 val = tr32(0xc4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005966
5967 tw32(0xc4, val | (1 << 15));
5968 }
5969
5970 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5971 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5972 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5973 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5974 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5975 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5976 }
5977
5978 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5979 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5980 tw32_f(MAC_MODE, tp->mac_mode);
Michael Chan747e8f82005-07-25 12:33:22 -07005981 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5982 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5983 tw32_f(MAC_MODE, tp->mac_mode);
Matt Carlson3bda1252008-08-15 14:08:22 -07005984 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
5985 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
5986 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
5987 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
5988 tw32_f(MAC_MODE, tp->mac_mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005989 } else
5990 tw32_f(MAC_MODE, 0);
5991 udelay(40);
5992
Matt Carlson158d7ab2008-05-29 01:37:54 -07005993 tg3_mdio_start(tp);
5994
Matt Carlson77b483f2008-08-15 14:07:24 -07005995 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
5996
Michael Chan7a6f4362006-09-27 16:03:31 -07005997 err = tg3_poll_fw(tp);
5998 if (err)
5999 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006000
6001 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6002 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01006003 val = tr32(0x7c00);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006004
6005 tw32(0x7c00, val | (1 << 25));
6006 }
6007
6008 /* Reprobe ASF enable state. */
6009 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6010 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6011 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6012 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6013 u32 nic_cfg;
6014
6015 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6016 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6017 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
Matt Carlson4ba526c2008-08-15 14:10:04 -07006018 tp->last_event_jiffies = jiffies;
John W. Linvillecbf46852005-04-21 17:01:29 -07006019 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006020 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6021 }
6022 }
6023
6024 return 0;
6025}
6026
6027/* tp->lock is held. */
6028static void tg3_stop_fw(struct tg3 *tp)
6029{
Matt Carlson0d3031d2007-10-10 18:02:43 -07006030 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6031 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Matt Carlson7c5026a2008-05-02 16:49:29 -07006032 /* Wait for RX cpu to ACK the previous event. */
6033 tg3_wait_for_event_ack(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006034
6035 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
Matt Carlson4ba526c2008-08-15 14:10:04 -07006036
6037 tg3_generate_fw_event(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006038
Matt Carlson7c5026a2008-05-02 16:49:29 -07006039 /* Wait for RX cpu to ACK this event. */
6040 tg3_wait_for_event_ack(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006041 }
6042}
6043
6044/* tp->lock is held. */
Michael Chan944d9802005-05-29 14:57:48 -07006045static int tg3_halt(struct tg3 *tp, int kind, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006046{
6047 int err;
6048
6049 tg3_stop_fw(tp);
6050
Michael Chan944d9802005-05-29 14:57:48 -07006051 tg3_write_sig_pre_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006052
David S. Millerb3b7d6b2005-05-05 14:40:20 -07006053 tg3_abort_hw(tp, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006054 err = tg3_chip_reset(tp);
6055
Michael Chan944d9802005-05-29 14:57:48 -07006056 tg3_write_sig_legacy(tp, kind);
6057 tg3_write_sig_post_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006058
6059 if (err)
6060 return err;
6061
6062 return 0;
6063}
6064
6065#define TG3_FW_RELEASE_MAJOR 0x0
6066#define TG3_FW_RELASE_MINOR 0x0
6067#define TG3_FW_RELEASE_FIX 0x0
6068#define TG3_FW_START_ADDR 0x08000000
6069#define TG3_FW_TEXT_ADDR 0x08000000
6070#define TG3_FW_TEXT_LEN 0x9c0
6071#define TG3_FW_RODATA_ADDR 0x080009c0
6072#define TG3_FW_RODATA_LEN 0x60
6073#define TG3_FW_DATA_ADDR 0x08000a40
6074#define TG3_FW_DATA_LEN 0x20
6075#define TG3_FW_SBSS_ADDR 0x08000a60
6076#define TG3_FW_SBSS_LEN 0xc
6077#define TG3_FW_BSS_ADDR 0x08000a70
6078#define TG3_FW_BSS_LEN 0x10
6079
Andreas Mohr50da8592006-08-14 23:54:30 -07006080static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006081 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6082 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6083 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6084 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6085 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6086 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6087 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6088 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6089 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6090 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6091 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6092 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6093 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6094 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6095 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6096 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6097 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6098 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6099 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6100 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6101 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6102 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6103 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6106 0, 0, 0, 0, 0, 0,
6107 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6108 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6109 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6110 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6111 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6112 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6113 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6114 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6115 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6116 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6117 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6118 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6119 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6120 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6121 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6122 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6123 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6124 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6125 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6126 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6127 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6128 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6129 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6130 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6131 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6132 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6133 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6134 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6135 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6136 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6137 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6138 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6139 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6140 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6141 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6142 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6143 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6144 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6145 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6146 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6147 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6148 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6149 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6150 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6151 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6152 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6153 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6154 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6155 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6156 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6157 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6158 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6159 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6160 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6161 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6162 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6163 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6164 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6165 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6166 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6167 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6168 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6169 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6170 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6171 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6172};
6173
Andreas Mohr50da8592006-08-14 23:54:30 -07006174static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006175 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6176 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6177 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6178 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6179 0x00000000
6180};
6181
6182#if 0 /* All zeros, don't eat up space with it. */
6183u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6184 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6185 0x00000000, 0x00000000, 0x00000000, 0x00000000
6186};
6187#endif
6188
6189#define RX_CPU_SCRATCH_BASE 0x30000
6190#define RX_CPU_SCRATCH_SIZE 0x04000
6191#define TX_CPU_SCRATCH_BASE 0x34000
6192#define TX_CPU_SCRATCH_SIZE 0x04000
6193
6194/* tp->lock is held. */
6195static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6196{
6197 int i;
6198
Eric Sesterhenn5d9428d2006-04-02 13:52:48 +02006199 BUG_ON(offset == TX_CPU_BASE &&
6200 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006201
Michael Chanb5d37722006-09-27 16:06:21 -07006202 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6203 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6204
6205 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6206 return 0;
6207 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006208 if (offset == RX_CPU_BASE) {
6209 for (i = 0; i < 10000; i++) {
6210 tw32(offset + CPU_STATE, 0xffffffff);
6211 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6212 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6213 break;
6214 }
6215
6216 tw32(offset + CPU_STATE, 0xffffffff);
6217 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
6218 udelay(10);
6219 } else {
6220 for (i = 0; i < 10000; i++) {
6221 tw32(offset + CPU_STATE, 0xffffffff);
6222 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6223 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6224 break;
6225 }
6226 }
6227
6228 if (i >= 10000) {
6229 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6230 "and %s CPU\n",
6231 tp->dev->name,
6232 (offset == RX_CPU_BASE ? "RX" : "TX"));
6233 return -ENODEV;
6234 }
Michael Chanec41c7d2006-01-17 02:40:55 -08006235
6236 /* Clear firmware's nvram arbitration. */
6237 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6238 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006239 return 0;
6240}
6241
6242struct fw_info {
6243 unsigned int text_base;
6244 unsigned int text_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006245 const u32 *text_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006246 unsigned int rodata_base;
6247 unsigned int rodata_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006248 const u32 *rodata_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006249 unsigned int data_base;
6250 unsigned int data_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006251 const u32 *data_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006252};
6253
6254/* tp->lock is held. */
6255static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6256 int cpu_scratch_size, struct fw_info *info)
6257{
Michael Chanec41c7d2006-01-17 02:40:55 -08006258 int err, lock_err, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006259 void (*write_op)(struct tg3 *, u32, u32);
6260
6261 if (cpu_base == TX_CPU_BASE &&
6262 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6263 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6264 "TX cpu firmware on %s which is 5705.\n",
6265 tp->dev->name);
6266 return -EINVAL;
6267 }
6268
6269 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6270 write_op = tg3_write_mem;
6271 else
6272 write_op = tg3_write_indirect_reg32;
6273
Michael Chan1b628152005-05-29 14:59:49 -07006274 /* It is possible that bootcode is still loading at this point.
6275 * Get the nvram lock first before halting the cpu.
6276 */
Michael Chanec41c7d2006-01-17 02:40:55 -08006277 lock_err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006278 err = tg3_halt_cpu(tp, cpu_base);
Michael Chanec41c7d2006-01-17 02:40:55 -08006279 if (!lock_err)
6280 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006281 if (err)
6282 goto out;
6283
6284 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6285 write_op(tp, cpu_scratch_base + i, 0);
6286 tw32(cpu_base + CPU_STATE, 0xffffffff);
6287 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6288 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6289 write_op(tp, (cpu_scratch_base +
6290 (info->text_base & 0xffff) +
6291 (i * sizeof(u32))),
6292 (info->text_data ?
6293 info->text_data[i] : 0));
6294 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6295 write_op(tp, (cpu_scratch_base +
6296 (info->rodata_base & 0xffff) +
6297 (i * sizeof(u32))),
6298 (info->rodata_data ?
6299 info->rodata_data[i] : 0));
6300 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6301 write_op(tp, (cpu_scratch_base +
6302 (info->data_base & 0xffff) +
6303 (i * sizeof(u32))),
6304 (info->data_data ?
6305 info->data_data[i] : 0));
6306
6307 err = 0;
6308
6309out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006310 return err;
6311}
6312
6313/* tp->lock is held. */
6314static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6315{
6316 struct fw_info info;
6317 int err, i;
6318
6319 info.text_base = TG3_FW_TEXT_ADDR;
6320 info.text_len = TG3_FW_TEXT_LEN;
6321 info.text_data = &tg3FwText[0];
6322 info.rodata_base = TG3_FW_RODATA_ADDR;
6323 info.rodata_len = TG3_FW_RODATA_LEN;
6324 info.rodata_data = &tg3FwRodata[0];
6325 info.data_base = TG3_FW_DATA_ADDR;
6326 info.data_len = TG3_FW_DATA_LEN;
6327 info.data_data = NULL;
6328
6329 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6330 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6331 &info);
6332 if (err)
6333 return err;
6334
6335 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6336 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6337 &info);
6338 if (err)
6339 return err;
6340
6341 /* Now startup only the RX cpu. */
6342 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6343 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6344
6345 for (i = 0; i < 5; i++) {
6346 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6347 break;
6348 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6349 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
6350 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6351 udelay(1000);
6352 }
6353 if (i >= 5) {
6354 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6355 "to set RX CPU PC, is %08x should be %08x\n",
6356 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6357 TG3_FW_TEXT_ADDR);
6358 return -ENODEV;
6359 }
6360 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6361 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
6362
6363 return 0;
6364}
6365
Linus Torvalds1da177e2005-04-16 15:20:36 -07006366
6367#define TG3_TSO_FW_RELEASE_MAJOR 0x1
6368#define TG3_TSO_FW_RELASE_MINOR 0x6
6369#define TG3_TSO_FW_RELEASE_FIX 0x0
6370#define TG3_TSO_FW_START_ADDR 0x08000000
6371#define TG3_TSO_FW_TEXT_ADDR 0x08000000
6372#define TG3_TSO_FW_TEXT_LEN 0x1aa0
6373#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
6374#define TG3_TSO_FW_RODATA_LEN 0x60
6375#define TG3_TSO_FW_DATA_ADDR 0x08001b20
6376#define TG3_TSO_FW_DATA_LEN 0x30
6377#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
6378#define TG3_TSO_FW_SBSS_LEN 0x2c
6379#define TG3_TSO_FW_BSS_ADDR 0x08001b80
6380#define TG3_TSO_FW_BSS_LEN 0x894
6381
Andreas Mohr50da8592006-08-14 23:54:30 -07006382static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006383 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6384 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6385 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6386 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6387 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6388 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6389 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6390 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6391 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6392 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6393 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6394 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6395 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6396 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6397 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6398 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6399 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6400 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6401 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6402 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6403 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6404 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6405 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6406 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6407 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6408 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6409 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6410 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6411 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6412 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6413 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6414 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6415 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6416 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6417 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6418 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6419 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6420 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6421 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6422 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6423 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6424 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6425 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6426 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6427 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6428 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6429 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6430 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6431 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6432 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6433 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6434 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6435 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6436 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6437 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6438 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6439 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6440 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6441 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6442 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6443 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6444 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6445 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6446 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6447 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6448 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6449 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6450 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6451 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6452 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6453 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6454 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6455 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6456 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6457 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6458 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6459 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6460 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6461 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6462 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6463 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6464 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6465 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6466 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6467 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6468 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6469 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6470 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6471 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6472 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6473 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6474 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6475 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6476 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6477 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6478 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6479 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6480 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6481 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6482 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6483 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6484 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6485 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6486 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6487 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6488 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6489 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6490 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6491 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6492 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6493 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6494 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6495 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6496 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6497 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6498 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6499 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6500 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6501 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6502 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6503 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6504 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6505 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6506 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6507 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6508 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6509 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6510 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6511 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6512 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6513 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6514 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6515 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6516 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6517 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6518 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6519 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6520 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6521 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6522 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6523 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6524 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6525 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6526 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6527 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6528 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6529 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6530 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6531 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6532 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6533 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6534 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6535 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6536 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6537 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6538 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6539 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6540 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6541 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6542 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6543 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6544 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6545 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6546 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6547 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6548 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6549 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6550 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6551 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6552 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6553 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6554 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6555 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6556 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6557 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6558 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6559 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6560 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6561 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6562 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6563 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6564 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6565 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6566 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6567 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6568 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6569 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6570 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6571 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6572 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6573 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6574 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6575 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6576 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6577 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6578 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6579 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6580 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6581 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6582 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6583 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6584 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6585 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6586 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6587 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6588 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6589 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6590 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6591 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6592 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6593 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6594 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6595 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6596 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6597 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6598 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6599 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6600 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6601 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6602 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6603 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6604 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6605 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6606 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6607 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6608 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6609 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6610 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6611 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6612 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6613 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6614 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6615 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6616 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6617 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6618 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6619 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6620 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6621 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6622 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6623 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6624 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6625 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6626 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6627 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6628 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6629 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6630 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6631 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6632 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6633 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6634 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6635 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6636 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6637 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6638 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6639 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6640 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6641 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6642 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6643 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6644 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6645 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6646 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6647 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6648 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6649 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6650 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6651 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6652 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6653 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6654 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6655 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6656 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6657 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6658 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6659 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6660 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6661 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6662 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6663 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6664 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6665 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6666 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6667};
6668
Andreas Mohr50da8592006-08-14 23:54:30 -07006669static const u32 tg3TsoFwRodata[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006670 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6671 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6672 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6673 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6674 0x00000000,
6675};
6676
Andreas Mohr50da8592006-08-14 23:54:30 -07006677static const u32 tg3TsoFwData[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006678 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6679 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6680 0x00000000,
6681};
6682
6683/* 5705 needs a special version of the TSO firmware. */
6684#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
6685#define TG3_TSO5_FW_RELASE_MINOR 0x2
6686#define TG3_TSO5_FW_RELEASE_FIX 0x0
6687#define TG3_TSO5_FW_START_ADDR 0x00010000
6688#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
6689#define TG3_TSO5_FW_TEXT_LEN 0xe90
6690#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
6691#define TG3_TSO5_FW_RODATA_LEN 0x50
6692#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
6693#define TG3_TSO5_FW_DATA_LEN 0x20
6694#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
6695#define TG3_TSO5_FW_SBSS_LEN 0x28
6696#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
6697#define TG3_TSO5_FW_BSS_LEN 0x88
6698
Andreas Mohr50da8592006-08-14 23:54:30 -07006699static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006700 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6701 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6702 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6703 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6704 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6705 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6706 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6707 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6708 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6709 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6710 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6711 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6712 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6713 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6714 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6715 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6716 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6717 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6718 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6719 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6720 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6721 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6722 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6723 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6724 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6725 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6726 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6727 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6728 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6729 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6730 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6731 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6732 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6733 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6734 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6735 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6736 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6737 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6738 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6739 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6740 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6741 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6742 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6743 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6744 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6745 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6746 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6747 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6748 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6749 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6750 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6751 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6752 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6753 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6754 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6755 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6756 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6757 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6758 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6759 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6760 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6761 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6762 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6763 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6764 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6765 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6766 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6767 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6768 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6769 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6770 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6771 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6772 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6773 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6774 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6775 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6776 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6777 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6778 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6779 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6780 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6781 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6782 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6783 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6784 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6785 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6786 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6787 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6788 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6789 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6790 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6791 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6792 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6793 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6794 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6795 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6796 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6797 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6798 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6799 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6800 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6801 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6802 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6803 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6804 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6805 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6806 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6807 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6808 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6809 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6810 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6811 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6812 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6813 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6814 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6815 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6816 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6817 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6818 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6819 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6820 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6821 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6822 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6823 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6824 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6825 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6826 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6827 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6828 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6829 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6830 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6831 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6832 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6833 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6834 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6835 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6836 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6837 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6838 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6839 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6840 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6841 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6842 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6843 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6844 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6845 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6846 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6847 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6848 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6849 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6850 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6851 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6852 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6853 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6854 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6855 0x00000000, 0x00000000, 0x00000000,
6856};
6857
Andreas Mohr50da8592006-08-14 23:54:30 -07006858static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006859 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6860 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6861 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6862 0x00000000, 0x00000000, 0x00000000,
6863};
6864
Andreas Mohr50da8592006-08-14 23:54:30 -07006865static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006866 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6867 0x00000000, 0x00000000, 0x00000000,
6868};
6869
6870/* tp->lock is held. */
6871static int tg3_load_tso_firmware(struct tg3 *tp)
6872{
6873 struct fw_info info;
6874 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6875 int err, i;
6876
6877 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6878 return 0;
6879
6880 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6881 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6882 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6883 info.text_data = &tg3Tso5FwText[0];
6884 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6885 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6886 info.rodata_data = &tg3Tso5FwRodata[0];
6887 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6888 info.data_len = TG3_TSO5_FW_DATA_LEN;
6889 info.data_data = &tg3Tso5FwData[0];
6890 cpu_base = RX_CPU_BASE;
6891 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6892 cpu_scratch_size = (info.text_len +
6893 info.rodata_len +
6894 info.data_len +
6895 TG3_TSO5_FW_SBSS_LEN +
6896 TG3_TSO5_FW_BSS_LEN);
6897 } else {
6898 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6899 info.text_len = TG3_TSO_FW_TEXT_LEN;
6900 info.text_data = &tg3TsoFwText[0];
6901 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6902 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6903 info.rodata_data = &tg3TsoFwRodata[0];
6904 info.data_base = TG3_TSO_FW_DATA_ADDR;
6905 info.data_len = TG3_TSO_FW_DATA_LEN;
6906 info.data_data = &tg3TsoFwData[0];
6907 cpu_base = TX_CPU_BASE;
6908 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6909 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6910 }
6911
6912 err = tg3_load_firmware_cpu(tp, cpu_base,
6913 cpu_scratch_base, cpu_scratch_size,
6914 &info);
6915 if (err)
6916 return err;
6917
6918 /* Now startup the cpu. */
6919 tw32(cpu_base + CPU_STATE, 0xffffffff);
6920 tw32_f(cpu_base + CPU_PC, info.text_base);
6921
6922 for (i = 0; i < 5; i++) {
6923 if (tr32(cpu_base + CPU_PC) == info.text_base)
6924 break;
6925 tw32(cpu_base + CPU_STATE, 0xffffffff);
6926 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6927 tw32_f(cpu_base + CPU_PC, info.text_base);
6928 udelay(1000);
6929 }
6930 if (i >= 5) {
6931 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6932 "to set CPU PC, is %08x should be %08x\n",
6933 tp->dev->name, tr32(cpu_base + CPU_PC),
6934 info.text_base);
6935 return -ENODEV;
6936 }
6937 tw32(cpu_base + CPU_STATE, 0xffffffff);
6938 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6939 return 0;
6940}
6941
Linus Torvalds1da177e2005-04-16 15:20:36 -07006942
Linus Torvalds1da177e2005-04-16 15:20:36 -07006943static int tg3_set_mac_addr(struct net_device *dev, void *p)
6944{
6945 struct tg3 *tp = netdev_priv(dev);
6946 struct sockaddr *addr = p;
Michael Chan986e0ae2007-05-05 12:10:20 -07006947 int err = 0, skip_mac_1 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006948
Michael Chanf9804dd2005-09-27 12:13:10 -07006949 if (!is_valid_ether_addr(addr->sa_data))
6950 return -EINVAL;
6951
Linus Torvalds1da177e2005-04-16 15:20:36 -07006952 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6953
Michael Chane75f7c92006-03-20 21:33:26 -08006954 if (!netif_running(dev))
6955 return 0;
6956
Michael Chan58712ef2006-04-29 18:58:01 -07006957 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
Michael Chan986e0ae2007-05-05 12:10:20 -07006958 u32 addr0_high, addr0_low, addr1_high, addr1_low;
Michael Chan58712ef2006-04-29 18:58:01 -07006959
Michael Chan986e0ae2007-05-05 12:10:20 -07006960 addr0_high = tr32(MAC_ADDR_0_HIGH);
6961 addr0_low = tr32(MAC_ADDR_0_LOW);
6962 addr1_high = tr32(MAC_ADDR_1_HIGH);
6963 addr1_low = tr32(MAC_ADDR_1_LOW);
6964
6965 /* Skip MAC addr 1 if ASF is using it. */
6966 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6967 !(addr1_high == 0 && addr1_low == 0))
6968 skip_mac_1 = 1;
Michael Chan58712ef2006-04-29 18:58:01 -07006969 }
Michael Chan986e0ae2007-05-05 12:10:20 -07006970 spin_lock_bh(&tp->lock);
6971 __tg3_set_mac_addr(tp, skip_mac_1);
6972 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006973
Michael Chanb9ec6c12006-07-25 16:37:27 -07006974 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006975}
6976
6977/* tp->lock is held. */
6978static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6979 dma_addr_t mapping, u32 maxlen_flags,
6980 u32 nic_addr)
6981{
6982 tg3_write_mem(tp,
6983 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6984 ((u64) mapping >> 32));
6985 tg3_write_mem(tp,
6986 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6987 ((u64) mapping & 0xffffffff));
6988 tg3_write_mem(tp,
6989 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6990 maxlen_flags);
6991
6992 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6993 tg3_write_mem(tp,
6994 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6995 nic_addr);
6996}
6997
6998static void __tg3_set_rx_mode(struct net_device *);
Michael Chand244c892005-07-05 14:42:33 -07006999static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
David S. Miller15f98502005-05-18 22:49:26 -07007000{
7001 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7002 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7003 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7004 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7005 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7006 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7007 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7008 }
7009 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7010 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7011 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7012 u32 val = ec->stats_block_coalesce_usecs;
7013
7014 if (!netif_carrier_ok(tp->dev))
7015 val = 0;
7016
7017 tw32(HOSTCC_STAT_COAL_TICKS, val);
7018 }
7019}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007020
7021/* tp->lock is held. */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007022static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007023{
7024 u32 val, rdmac_mode;
7025 int i, err, limit;
7026
7027 tg3_disable_ints(tp);
7028
7029 tg3_stop_fw(tp);
7030
7031 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7032
7033 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
Michael Chane6de8ad2005-05-05 14:42:41 -07007034 tg3_abort_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007035 }
7036
Matt Carlsondd477002008-05-25 23:45:58 -07007037 if (reset_phy &&
7038 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
Michael Chand4d2c552006-03-20 17:47:20 -08007039 tg3_phy_reset(tp);
7040
Linus Torvalds1da177e2005-04-16 15:20:36 -07007041 err = tg3_chip_reset(tp);
7042 if (err)
7043 return err;
7044
7045 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7046
Matt Carlsonb5af7122007-11-12 21:22:02 -08007047 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
7048 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07007049 val = tr32(TG3_CPMU_CTRL);
7050 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7051 tw32(TG3_CPMU_CTRL, val);
Matt Carlson9acb9612007-11-12 21:10:06 -08007052
7053 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7054 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7055 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7056 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7057
7058 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7059 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7060 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7061 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7062
7063 val = tr32(TG3_CPMU_HST_ACC);
7064 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7065 val |= CPMU_HST_ACC_MACCLK_6_25;
7066 tw32(TG3_CPMU_HST_ACC, val);
Matt Carlsond30cdd22007-10-07 23:28:35 -07007067 }
7068
Linus Torvalds1da177e2005-04-16 15:20:36 -07007069 /* This works around an issue with Athlon chipsets on
7070 * B3 tigon3 silicon. This bit has no effect on any
7071 * other revision. But do not set this on PCI Express
Matt Carlson795d01c2007-10-07 23:28:17 -07007072 * chips and don't even touch the clocks if the CPMU is present.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007073 */
Matt Carlson795d01c2007-10-07 23:28:17 -07007074 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7075 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7076 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7077 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7078 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007079
7080 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7081 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7082 val = tr32(TG3PCI_PCISTATE);
7083 val |= PCISTATE_RETRY_SAME_DMA;
7084 tw32(TG3PCI_PCISTATE, val);
7085 }
7086
Matt Carlson0d3031d2007-10-10 18:02:43 -07007087 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7088 /* Allow reads and writes to the
7089 * APE register and memory space.
7090 */
7091 val = tr32(TG3PCI_PCISTATE);
7092 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7093 PCISTATE_ALLOW_APE_SHMEM_WR;
7094 tw32(TG3PCI_PCISTATE, val);
7095 }
7096
Linus Torvalds1da177e2005-04-16 15:20:36 -07007097 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7098 /* Enable some hw fixes. */
7099 val = tr32(TG3PCI_MSI_DATA);
7100 val |= (1 << 26) | (1 << 28) | (1 << 29);
7101 tw32(TG3PCI_MSI_DATA, val);
7102 }
7103
7104 /* Descriptor ring init may make accesses to the
7105 * NIC SRAM area to setup the TX descriptors, so we
7106 * can only do this after the hardware has been
7107 * successfully reset.
7108 */
Michael Chan32d8c572006-07-25 16:38:29 -07007109 err = tg3_init_rings(tp);
7110 if (err)
7111 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007112
Matt Carlson9936bcf2007-10-10 18:03:07 -07007113 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
Matt Carlson57e69832008-05-25 23:48:31 -07007114 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
7115 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07007116 /* This value is determined during the probe time DMA
7117 * engine test, tg3_test_dma.
7118 */
7119 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7120 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007121
7122 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7123 GRC_MODE_4X_NIC_SEND_RINGS |
7124 GRC_MODE_NO_TX_PHDR_CSUM |
7125 GRC_MODE_NO_RX_PHDR_CSUM);
7126 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
Michael Chand2d746f2006-04-06 21:45:39 -07007127
7128 /* Pseudo-header checksum is done by hardware logic and not
7129 * the offload processers, so make the chip do the pseudo-
7130 * header checksums on receive. For transmit it is more
7131 * convenient to do the pseudo-header checksum in software
7132 * as Linux does that on transmit for us in all cases.
7133 */
7134 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007135
7136 tw32(GRC_MODE,
7137 tp->grc_mode |
7138 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7139
7140 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7141 val = tr32(GRC_MISC_CFG);
7142 val &= ~0xff;
7143 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7144 tw32(GRC_MISC_CFG, val);
7145
7146 /* Initialize MBUF/DESC pool. */
John W. Linvillecbf46852005-04-21 17:01:29 -07007147 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007148 /* Do nothing. */
7149 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7150 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7151 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7152 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7153 else
7154 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7155 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7156 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7157 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007158 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7159 int fw_len;
7160
7161 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7162 TG3_TSO5_FW_RODATA_LEN +
7163 TG3_TSO5_FW_DATA_LEN +
7164 TG3_TSO5_FW_SBSS_LEN +
7165 TG3_TSO5_FW_BSS_LEN);
7166 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7167 tw32(BUFMGR_MB_POOL_ADDR,
7168 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7169 tw32(BUFMGR_MB_POOL_SIZE,
7170 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7171 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007172
Michael Chan0f893dc2005-07-25 12:30:38 -07007173 if (tp->dev->mtu <= ETH_DATA_LEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007174 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7175 tp->bufmgr_config.mbuf_read_dma_low_water);
7176 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7177 tp->bufmgr_config.mbuf_mac_rx_low_water);
7178 tw32(BUFMGR_MB_HIGH_WATER,
7179 tp->bufmgr_config.mbuf_high_water);
7180 } else {
7181 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7182 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7183 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7184 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7185 tw32(BUFMGR_MB_HIGH_WATER,
7186 tp->bufmgr_config.mbuf_high_water_jumbo);
7187 }
7188 tw32(BUFMGR_DMA_LOW_WATER,
7189 tp->bufmgr_config.dma_low_water);
7190 tw32(BUFMGR_DMA_HIGH_WATER,
7191 tp->bufmgr_config.dma_high_water);
7192
7193 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7194 for (i = 0; i < 2000; i++) {
7195 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7196 break;
7197 udelay(10);
7198 }
7199 if (i >= 2000) {
7200 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7201 tp->dev->name);
7202 return -ENODEV;
7203 }
7204
7205 /* Setup replenish threshold. */
Michael Chanf92905d2006-06-29 20:14:29 -07007206 val = tp->rx_pending / 8;
7207 if (val == 0)
7208 val = 1;
7209 else if (val > tp->rx_std_max_post)
7210 val = tp->rx_std_max_post;
Michael Chanb5d37722006-09-27 16:06:21 -07007211 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7212 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7213 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7214
7215 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7216 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7217 }
Michael Chanf92905d2006-06-29 20:14:29 -07007218
7219 tw32(RCVBDI_STD_THRESH, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007220
7221 /* Initialize TG3_BDINFO's at:
7222 * RCVDBDI_STD_BD: standard eth size rx ring
7223 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7224 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7225 *
7226 * like so:
7227 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7228 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7229 * ring attribute flags
7230 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7231 *
7232 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7233 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7234 *
7235 * The size of each ring is fixed in the firmware, but the location is
7236 * configurable.
7237 */
7238 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7239 ((u64) tp->rx_std_mapping >> 32));
7240 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7241 ((u64) tp->rx_std_mapping & 0xffffffff));
7242 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7243 NIC_SRAM_RX_BUFFER_DESC);
7244
7245 /* Don't even try to program the JUMBO/MINI buffer descriptor
7246 * configs on 5705.
7247 */
7248 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7249 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7250 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7251 } else {
7252 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7253 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7254
7255 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7256 BDINFO_FLAGS_DISABLED);
7257
7258 /* Setup replenish threshold. */
7259 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7260
Michael Chan0f893dc2005-07-25 12:30:38 -07007261 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007262 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7263 ((u64) tp->rx_jumbo_mapping >> 32));
7264 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7265 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7266 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7267 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7268 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7269 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7270 } else {
7271 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7272 BDINFO_FLAGS_DISABLED);
7273 }
7274
7275 }
7276
7277 /* There is only one send ring on 5705/5750, no need to explicitly
7278 * disable the others.
7279 */
7280 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7281 /* Clear out send RCB ring in SRAM. */
7282 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7283 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7284 BDINFO_FLAGS_DISABLED);
7285 }
7286
7287 tp->tx_prod = 0;
7288 tp->tx_cons = 0;
7289 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7290 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7291
7292 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7293 tp->tx_desc_mapping,
7294 (TG3_TX_RING_SIZE <<
7295 BDINFO_FLAGS_MAXLEN_SHIFT),
7296 NIC_SRAM_TX_BUFFER_DESC);
7297
7298 /* There is only one receive return ring on 5705/5750, no need
7299 * to explicitly disable the others.
7300 */
7301 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7302 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7303 i += TG3_BDINFO_SIZE) {
7304 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7305 BDINFO_FLAGS_DISABLED);
7306 }
7307 }
7308
7309 tp->rx_rcb_ptr = 0;
7310 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7311
7312 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7313 tp->rx_rcb_mapping,
7314 (TG3_RX_RCB_RING_SIZE(tp) <<
7315 BDINFO_FLAGS_MAXLEN_SHIFT),
7316 0);
7317
7318 tp->rx_std_ptr = tp->rx_pending;
7319 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7320 tp->rx_std_ptr);
7321
Michael Chan0f893dc2005-07-25 12:30:38 -07007322 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
Linus Torvalds1da177e2005-04-16 15:20:36 -07007323 tp->rx_jumbo_pending : 0;
7324 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7325 tp->rx_jumbo_ptr);
7326
7327 /* Initialize MAC address and backoff seed. */
Michael Chan986e0ae2007-05-05 12:10:20 -07007328 __tg3_set_mac_addr(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007329
7330 /* MTU + ethernet header + FCS + optional VLAN tag */
7331 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7332
7333 /* The slot time is changed by tg3_setup_phy if we
7334 * run at gigabit with half duplex.
7335 */
7336 tw32(MAC_TX_LENGTHS,
7337 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7338 (6 << TX_LENGTHS_IPG_SHIFT) |
7339 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7340
7341 /* Receive rules. */
7342 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7343 tw32(RCVLPC_CONFIG, 0x0181);
7344
7345 /* Calculate RDMAC_MODE setting early, we need it to determine
7346 * the RCVLPC_STATE_ENABLE mask.
7347 */
7348 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7349 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7350 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7351 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7352 RDMAC_MODE_LNGREAD_ENAB);
Michael Chan85e94ce2005-04-21 17:05:28 -07007353
Matt Carlson57e69832008-05-25 23:48:31 -07007354 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7355 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsond30cdd22007-10-07 23:28:35 -07007356 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7357 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7358 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7359
Michael Chan85e94ce2005-04-21 17:05:28 -07007360 /* If statement applies to 5705 and 5750 PCI devices only */
7361 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7362 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7363 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007364 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
Matt Carlsonc13e3712007-05-05 11:50:04 -07007365 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007366 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7367 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7368 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7369 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7370 }
7371 }
7372
Michael Chan85e94ce2005-04-21 17:05:28 -07007373 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7374 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7375
Linus Torvalds1da177e2005-04-16 15:20:36 -07007376 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7377 rdmac_mode |= (1 << 27);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007378
7379 /* Receive/send statistics. */
Michael Chan16613942006-06-29 20:15:13 -07007380 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7381 val = tr32(RCVLPC_STATS_ENABLE);
7382 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7383 tw32(RCVLPC_STATS_ENABLE, val);
7384 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7385 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007386 val = tr32(RCVLPC_STATS_ENABLE);
7387 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7388 tw32(RCVLPC_STATS_ENABLE, val);
7389 } else {
7390 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7391 }
7392 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7393 tw32(SNDDATAI_STATSENAB, 0xffffff);
7394 tw32(SNDDATAI_STATSCTRL,
7395 (SNDDATAI_SCTRL_ENABLE |
7396 SNDDATAI_SCTRL_FASTUPD));
7397
7398 /* Setup host coalescing engine. */
7399 tw32(HOSTCC_MODE, 0);
7400 for (i = 0; i < 2000; i++) {
7401 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7402 break;
7403 udelay(10);
7404 }
7405
Michael Chand244c892005-07-05 14:42:33 -07007406 __tg3_set_coalesce(tp, &tp->coal);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007407
7408 /* set status block DMA address */
7409 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7410 ((u64) tp->status_mapping >> 32));
7411 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7412 ((u64) tp->status_mapping & 0xffffffff));
7413
7414 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7415 /* Status/statistics block address. See tg3_timer,
7416 * the tg3_periodic_fetch_stats call there, and
7417 * tg3_get_stats to see how this works for 5705/5750 chips.
7418 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007419 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7420 ((u64) tp->stats_mapping >> 32));
7421 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7422 ((u64) tp->stats_mapping & 0xffffffff));
7423 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7424 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7425 }
7426
7427 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7428
7429 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7430 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7431 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7432 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7433
7434 /* Clear statistics/status block in chip, and status block in ram. */
7435 for (i = NIC_SRAM_STATS_BLK;
7436 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7437 i += sizeof(u32)) {
7438 tg3_write_mem(tp, i, 0);
7439 udelay(40);
7440 }
7441 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7442
Michael Chanc94e3942005-09-27 12:12:42 -07007443 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7444 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7445 /* reset to prevent losing 1st rx packet intermittently */
7446 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7447 udelay(10);
7448 }
7449
Matt Carlson3bda1252008-08-15 14:08:22 -07007450 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7451 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7452 else
7453 tp->mac_mode = 0;
7454 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
Linus Torvalds1da177e2005-04-16 15:20:36 -07007455 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07007456 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7457 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7458 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7459 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007460 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7461 udelay(40);
7462
Michael Chan314fba32005-04-21 17:07:04 -07007463 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
Michael Chan9d26e212006-12-07 00:21:14 -08007464 * If TG3_FLG2_IS_NIC is zero, we should read the
Michael Chan314fba32005-04-21 17:07:04 -07007465 * register to preserve the GPIO settings for LOMs. The GPIOs,
7466 * whether used as inputs or outputs, are set by boot code after
7467 * reset.
7468 */
Michael Chan9d26e212006-12-07 00:21:14 -08007469 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
Michael Chan314fba32005-04-21 17:07:04 -07007470 u32 gpio_mask;
7471
Michael Chan9d26e212006-12-07 00:21:14 -08007472 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7473 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7474 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chan3e7d83b2005-04-21 17:10:36 -07007475
7476 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7477 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7478 GRC_LCLCTRL_GPIO_OUTPUT3;
7479
Michael Chanaf36e6b2006-03-23 01:28:06 -08007480 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7481 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7482
Gary Zambranoaaf84462007-05-05 11:51:45 -07007483 tp->grc_local_ctrl &= ~gpio_mask;
Michael Chan314fba32005-04-21 17:07:04 -07007484 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7485
7486 /* GPIO1 must be driven high for eeprom write protect */
Michael Chan9d26e212006-12-07 00:21:14 -08007487 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7488 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7489 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan314fba32005-04-21 17:07:04 -07007490 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007491 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7492 udelay(100);
7493
Michael Chan09ee9292005-08-09 20:17:00 -07007494 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
David S. Millerfac9b832005-05-18 22:46:34 -07007495 tp->last_tag = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007496
7497 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7498 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7499 udelay(40);
7500 }
7501
7502 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7503 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7504 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7505 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7506 WDMAC_MODE_LNGREAD_ENAB);
7507
Michael Chan85e94ce2005-04-21 17:05:28 -07007508 /* If statement applies to 5705 and 5750 PCI devices only */
7509 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7510 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7511 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007512 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7513 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7514 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7515 /* nothing */
7516 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7517 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7518 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7519 val |= WDMAC_MODE_RX_ACCEL;
7520 }
7521 }
7522
Michael Chand9ab5ad2006-03-20 22:27:35 -08007523 /* Enable host coalescing bug fix */
Michael Chanaf36e6b2006-03-23 01:28:06 -08007524 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07007525 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07007526 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
Matt Carlson57e69832008-05-25 23:48:31 -07007527 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7528 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
Matt Carlsonf51f3562008-05-25 23:45:08 -07007529 val |= WDMAC_MODE_STATUS_TAG_FIX;
Michael Chand9ab5ad2006-03-20 22:27:35 -08007530
Linus Torvalds1da177e2005-04-16 15:20:36 -07007531 tw32_f(WDMAC_MODE, val);
7532 udelay(40);
7533
Matt Carlson9974a352007-10-07 23:27:28 -07007534 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7535 u16 pcix_cmd;
7536
7537 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7538 &pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007539 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
Matt Carlson9974a352007-10-07 23:27:28 -07007540 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7541 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007542 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
Matt Carlson9974a352007-10-07 23:27:28 -07007543 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7544 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007545 }
Matt Carlson9974a352007-10-07 23:27:28 -07007546 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7547 pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007548 }
7549
7550 tw32_f(RDMAC_MODE, rdmac_mode);
7551 udelay(40);
7552
7553 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7554 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7555 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
Matt Carlson9936bcf2007-10-10 18:03:07 -07007556
7557 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7558 tw32(SNDDATAC_MODE,
7559 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7560 else
7561 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7562
Linus Torvalds1da177e2005-04-16 15:20:36 -07007563 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7564 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7565 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7566 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007567 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7568 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007569 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7570 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7571
7572 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7573 err = tg3_load_5701_a0_firmware_fix(tp);
7574 if (err)
7575 return err;
7576 }
7577
Linus Torvalds1da177e2005-04-16 15:20:36 -07007578 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7579 err = tg3_load_tso_firmware(tp);
7580 if (err)
7581 return err;
7582 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007583
7584 tp->tx_mode = TX_MODE_ENABLE;
7585 tw32_f(MAC_TX_MODE, tp->tx_mode);
7586 udelay(100);
7587
7588 tp->rx_mode = RX_MODE_ENABLE;
Matt Carlson9936bcf2007-10-10 18:03:07 -07007589 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlson57e69832008-05-25 23:48:31 -07007590 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7591 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7592 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chanaf36e6b2006-03-23 01:28:06 -08007593 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7594
Linus Torvalds1da177e2005-04-16 15:20:36 -07007595 tw32_f(MAC_RX_MODE, tp->rx_mode);
7596 udelay(10);
7597
Linus Torvalds1da177e2005-04-16 15:20:36 -07007598 tw32(MAC_LED_CTRL, tp->led_ctrl);
7599
7600 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
Michael Chanc94e3942005-09-27 12:12:42 -07007601 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007602 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7603 udelay(10);
7604 }
7605 tw32_f(MAC_RX_MODE, tp->rx_mode);
7606 udelay(10);
7607
7608 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7609 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7610 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7611 /* Set drive transmission level to 1.2V */
7612 /* only if the signal pre-emphasis bit is not set */
7613 val = tr32(MAC_SERDES_CFG);
7614 val &= 0xfffff000;
7615 val |= 0x880;
7616 tw32(MAC_SERDES_CFG, val);
7617 }
7618 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7619 tw32(MAC_SERDES_CFG, 0x616000);
7620 }
7621
7622 /* Prevent chip from dropping frames when flow control
7623 * is enabled.
7624 */
7625 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7626
7627 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7628 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7629 /* Use hardware link auto-negotiation */
7630 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7631 }
7632
Michael Chand4d2c552006-03-20 17:47:20 -08007633 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7634 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7635 u32 tmp;
7636
7637 tmp = tr32(SERDES_RX_CTRL);
7638 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7639 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7640 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7641 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7642 }
7643
Matt Carlsondd477002008-05-25 23:45:58 -07007644 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7645 if (tp->link_config.phy_is_low_power) {
7646 tp->link_config.phy_is_low_power = 0;
7647 tp->link_config.speed = tp->link_config.orig_speed;
7648 tp->link_config.duplex = tp->link_config.orig_duplex;
7649 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7650 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007651
Matt Carlsondd477002008-05-25 23:45:58 -07007652 err = tg3_setup_phy(tp, 0);
7653 if (err)
7654 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007655
Matt Carlsondd477002008-05-25 23:45:58 -07007656 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7657 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7658 u32 tmp;
7659
7660 /* Clear CRC stats. */
7661 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7662 tg3_writephy(tp, MII_TG3_TEST1,
7663 tmp | MII_TG3_TEST1_CRC_EN);
7664 tg3_readphy(tp, 0x14, &tmp);
7665 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007666 }
7667 }
7668
7669 __tg3_set_rx_mode(tp->dev);
7670
7671 /* Initialize receive rules. */
7672 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7673 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7674 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7675 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7676
Michael Chan4cf78e42005-07-25 12:29:19 -07007677 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
Michael Chana4e2b342005-10-26 15:46:52 -07007678 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007679 limit = 8;
7680 else
7681 limit = 16;
7682 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7683 limit -= 4;
7684 switch (limit) {
7685 case 16:
7686 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7687 case 15:
7688 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7689 case 14:
7690 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7691 case 13:
7692 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7693 case 12:
7694 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7695 case 11:
7696 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7697 case 10:
7698 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7699 case 9:
7700 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7701 case 8:
7702 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7703 case 7:
7704 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7705 case 6:
7706 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7707 case 5:
7708 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7709 case 4:
7710 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7711 case 3:
7712 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7713 case 2:
7714 case 1:
7715
7716 default:
7717 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07007718 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007719
Matt Carlson9ce768e2007-10-11 19:49:11 -07007720 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7721 /* Write our heartbeat update interval to APE. */
7722 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7723 APE_HOST_HEARTBEAT_INT_DISABLE);
Matt Carlson0d3031d2007-10-10 18:02:43 -07007724
Linus Torvalds1da177e2005-04-16 15:20:36 -07007725 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7726
Linus Torvalds1da177e2005-04-16 15:20:36 -07007727 return 0;
7728}
7729
7730/* Called at device open time to get the chip ready for
7731 * packet processing. Invoked with tp->lock held.
7732 */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007733static int tg3_init_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007734{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007735 tg3_switch_clocks(tp);
7736
7737 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7738
Matt Carlson2f751b62008-08-04 23:17:34 -07007739 return tg3_reset_hw(tp, reset_phy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007740}
7741
7742#define TG3_STAT_ADD32(PSTAT, REG) \
7743do { u32 __val = tr32(REG); \
7744 (PSTAT)->low += __val; \
7745 if ((PSTAT)->low < __val) \
7746 (PSTAT)->high += 1; \
7747} while (0)
7748
7749static void tg3_periodic_fetch_stats(struct tg3 *tp)
7750{
7751 struct tg3_hw_stats *sp = tp->hw_stats;
7752
7753 if (!netif_carrier_ok(tp->dev))
7754 return;
7755
7756 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7757 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7758 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7759 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7760 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7761 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7762 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7763 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7764 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7765 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7766 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7767 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7768 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7769
7770 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7771 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7772 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7773 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7774 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7775 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7776 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7777 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7778 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7779 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7780 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7781 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7782 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7783 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
Michael Chan463d3052006-05-22 16:36:27 -07007784
7785 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7786 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7787 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007788}
7789
7790static void tg3_timer(unsigned long __opaque)
7791{
7792 struct tg3 *tp = (struct tg3 *) __opaque;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007793
Michael Chanf475f162006-03-27 23:20:14 -08007794 if (tp->irq_sync)
7795 goto restart_timer;
7796
David S. Millerf47c11e2005-06-24 20:18:35 -07007797 spin_lock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007798
David S. Millerfac9b832005-05-18 22:46:34 -07007799 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7800 /* All of this garbage is because when using non-tagged
7801 * IRQ status the mailbox/status_block protocol the chip
7802 * uses with the cpu is race prone.
7803 */
7804 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7805 tw32(GRC_LOCAL_CTRL,
7806 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7807 } else {
7808 tw32(HOSTCC_MODE, tp->coalesce_mode |
7809 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7810 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007811
David S. Millerfac9b832005-05-18 22:46:34 -07007812 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7813 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
David S. Millerf47c11e2005-06-24 20:18:35 -07007814 spin_unlock(&tp->lock);
David S. Millerfac9b832005-05-18 22:46:34 -07007815 schedule_work(&tp->reset_task);
7816 return;
7817 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007818 }
7819
Linus Torvalds1da177e2005-04-16 15:20:36 -07007820 /* This part only runs once per second. */
7821 if (!--tp->timer_counter) {
David S. Millerfac9b832005-05-18 22:46:34 -07007822 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7823 tg3_periodic_fetch_stats(tp);
7824
Linus Torvalds1da177e2005-04-16 15:20:36 -07007825 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7826 u32 mac_stat;
7827 int phy_event;
7828
7829 mac_stat = tr32(MAC_STATUS);
7830
7831 phy_event = 0;
7832 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7833 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7834 phy_event = 1;
7835 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7836 phy_event = 1;
7837
7838 if (phy_event)
7839 tg3_setup_phy(tp, 0);
7840 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7841 u32 mac_stat = tr32(MAC_STATUS);
7842 int need_setup = 0;
7843
7844 if (netif_carrier_ok(tp->dev) &&
7845 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7846 need_setup = 1;
7847 }
7848 if (! netif_carrier_ok(tp->dev) &&
7849 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7850 MAC_STATUS_SIGNAL_DET))) {
7851 need_setup = 1;
7852 }
7853 if (need_setup) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07007854 if (!tp->serdes_counter) {
7855 tw32_f(MAC_MODE,
7856 (tp->mac_mode &
7857 ~MAC_MODE_PORT_MODE_MASK));
7858 udelay(40);
7859 tw32_f(MAC_MODE, tp->mac_mode);
7860 udelay(40);
7861 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007862 tg3_setup_phy(tp, 0);
7863 }
Michael Chan747e8f82005-07-25 12:33:22 -07007864 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7865 tg3_serdes_parallel_detect(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007866
7867 tp->timer_counter = tp->timer_multiplier;
7868 }
7869
Michael Chan130b8e42006-09-27 16:00:40 -07007870 /* Heartbeat is only sent once every 2 seconds.
7871 *
7872 * The heartbeat is to tell the ASF firmware that the host
7873 * driver is still alive. In the event that the OS crashes,
7874 * ASF needs to reset the hardware to free up the FIFO space
7875 * that may be filled with rx packets destined for the host.
7876 * If the FIFO is full, ASF will no longer function properly.
7877 *
7878 * Unintended resets have been reported on real time kernels
7879 * where the timer doesn't run on time. Netpoll will also have
7880 * same problem.
7881 *
7882 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7883 * to check the ring condition when the heartbeat is expiring
7884 * before doing the reset. This will prevent most unintended
7885 * resets.
7886 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007887 if (!--tp->asf_counter) {
Matt Carlsonbc7959b2008-08-15 14:08:55 -07007888 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7889 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Matt Carlson7c5026a2008-05-02 16:49:29 -07007890 tg3_wait_for_event_ack(tp);
7891
Michael Chanbbadf502006-04-06 21:46:34 -07007892 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
Michael Chan130b8e42006-09-27 16:00:40 -07007893 FWCMD_NICDRV_ALIVE3);
Michael Chanbbadf502006-04-06 21:46:34 -07007894 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
Michael Chan28fbef72005-10-26 15:48:35 -07007895 /* 5 seconds timeout */
Michael Chanbbadf502006-04-06 21:46:34 -07007896 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
Matt Carlson4ba526c2008-08-15 14:10:04 -07007897
7898 tg3_generate_fw_event(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007899 }
7900 tp->asf_counter = tp->asf_multiplier;
7901 }
7902
David S. Millerf47c11e2005-06-24 20:18:35 -07007903 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007904
Michael Chanf475f162006-03-27 23:20:14 -08007905restart_timer:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007906 tp->timer.expires = jiffies + tp->timer_offset;
7907 add_timer(&tp->timer);
7908}
7909
Adrian Bunk81789ef2006-03-20 23:00:14 -08007910static int tg3_request_irq(struct tg3 *tp)
Michael Chanfcfa0a32006-03-20 22:28:41 -08007911{
David Howells7d12e782006-10-05 14:55:46 +01007912 irq_handler_t fn;
Michael Chanfcfa0a32006-03-20 22:28:41 -08007913 unsigned long flags;
7914 struct net_device *dev = tp->dev;
7915
7916 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7917 fn = tg3_msi;
7918 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7919 fn = tg3_msi_1shot;
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07007920 flags = IRQF_SAMPLE_RANDOM;
Michael Chanfcfa0a32006-03-20 22:28:41 -08007921 } else {
7922 fn = tg3_interrupt;
7923 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7924 fn = tg3_interrupt_tagged;
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07007925 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
Michael Chanfcfa0a32006-03-20 22:28:41 -08007926 }
7927 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7928}
7929
Michael Chan79381092005-04-21 17:13:59 -07007930static int tg3_test_interrupt(struct tg3 *tp)
7931{
7932 struct net_device *dev = tp->dev;
Michael Chanb16250e2006-09-27 16:10:14 -07007933 int err, i, intr_ok = 0;
Michael Chan79381092005-04-21 17:13:59 -07007934
Michael Chand4bc3922005-05-29 14:59:20 -07007935 if (!netif_running(dev))
7936 return -ENODEV;
7937
Michael Chan79381092005-04-21 17:13:59 -07007938 tg3_disable_ints(tp);
7939
7940 free_irq(tp->pdev->irq, dev);
7941
7942 err = request_irq(tp->pdev->irq, tg3_test_isr,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07007943 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
Michael Chan79381092005-04-21 17:13:59 -07007944 if (err)
7945 return err;
7946
Michael Chan38f38432005-09-05 17:53:32 -07007947 tp->hw_status->status &= ~SD_STATUS_UPDATED;
Michael Chan79381092005-04-21 17:13:59 -07007948 tg3_enable_ints(tp);
7949
7950 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7951 HOSTCC_MODE_NOW);
7952
7953 for (i = 0; i < 5; i++) {
Michael Chanb16250e2006-09-27 16:10:14 -07007954 u32 int_mbox, misc_host_ctrl;
7955
Michael Chan09ee9292005-08-09 20:17:00 -07007956 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7957 TG3_64BIT_REG_LOW);
Michael Chanb16250e2006-09-27 16:10:14 -07007958 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7959
7960 if ((int_mbox != 0) ||
7961 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7962 intr_ok = 1;
Michael Chan79381092005-04-21 17:13:59 -07007963 break;
Michael Chanb16250e2006-09-27 16:10:14 -07007964 }
7965
Michael Chan79381092005-04-21 17:13:59 -07007966 msleep(10);
7967 }
7968
7969 tg3_disable_ints(tp);
7970
7971 free_irq(tp->pdev->irq, dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04007972
Michael Chanfcfa0a32006-03-20 22:28:41 -08007973 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07007974
7975 if (err)
7976 return err;
7977
Michael Chanb16250e2006-09-27 16:10:14 -07007978 if (intr_ok)
Michael Chan79381092005-04-21 17:13:59 -07007979 return 0;
7980
7981 return -EIO;
7982}
7983
7984/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7985 * successfully restored
7986 */
7987static int tg3_test_msi(struct tg3 *tp)
7988{
7989 struct net_device *dev = tp->dev;
7990 int err;
7991 u16 pci_cmd;
7992
7993 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7994 return 0;
7995
7996 /* Turn off SERR reporting in case MSI terminates with Master
7997 * Abort.
7998 */
7999 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8000 pci_write_config_word(tp->pdev, PCI_COMMAND,
8001 pci_cmd & ~PCI_COMMAND_SERR);
8002
8003 err = tg3_test_interrupt(tp);
8004
8005 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8006
8007 if (!err)
8008 return 0;
8009
8010 /* other failures */
8011 if (err != -EIO)
8012 return err;
8013
8014 /* MSI test failed, go back to INTx mode */
8015 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
8016 "switching to INTx mode. Please report this failure to "
8017 "the PCI maintainer and include system chipset information.\n",
8018 tp->dev->name);
8019
8020 free_irq(tp->pdev->irq, dev);
8021 pci_disable_msi(tp->pdev);
8022
8023 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8024
Michael Chanfcfa0a32006-03-20 22:28:41 -08008025 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07008026 if (err)
8027 return err;
8028
8029 /* Need to reset the chip because the MSI cycle may have terminated
8030 * with Master Abort.
8031 */
David S. Millerf47c11e2005-06-24 20:18:35 -07008032 tg3_full_lock(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07008033
Michael Chan944d9802005-05-29 14:57:48 -07008034 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07008035 err = tg3_init_hw(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07008036
David S. Millerf47c11e2005-06-24 20:18:35 -07008037 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07008038
8039 if (err)
8040 free_irq(tp->pdev->irq, dev);
8041
8042 return err;
8043}
8044
Linus Torvalds1da177e2005-04-16 15:20:36 -07008045static int tg3_open(struct net_device *dev)
8046{
8047 struct tg3 *tp = netdev_priv(dev);
8048 int err;
8049
Michael Chanc49a1562006-12-17 17:07:29 -08008050 netif_carrier_off(tp->dev);
8051
Michael Chanbc1c7562006-03-20 17:48:03 -08008052 err = tg3_set_power_state(tp, PCI_D0);
Matt Carlson2f751b62008-08-04 23:17:34 -07008053 if (err)
Michael Chanbc1c7562006-03-20 17:48:03 -08008054 return err;
Matt Carlson2f751b62008-08-04 23:17:34 -07008055
8056 tg3_full_lock(tp, 0);
Michael Chanbc1c7562006-03-20 17:48:03 -08008057
Linus Torvalds1da177e2005-04-16 15:20:36 -07008058 tg3_disable_ints(tp);
8059 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8060
David S. Millerf47c11e2005-06-24 20:18:35 -07008061 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008062
8063 /* The placement of this call is tied
8064 * to the setup and use of Host TX descriptors.
8065 */
8066 err = tg3_alloc_consistent(tp);
8067 if (err)
8068 return err;
8069
Michael Chan7544b092007-05-05 13:08:32 -07008070 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
David S. Millerfac9b832005-05-18 22:46:34 -07008071 /* All MSI supporting chips should support tagged
8072 * status. Assert that this is the case.
8073 */
8074 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8075 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8076 "Not using MSI.\n", tp->dev->name);
8077 } else if (pci_enable_msi(tp->pdev) == 0) {
Michael Chan88b06bc2005-04-21 17:13:25 -07008078 u32 msi_mode;
8079
8080 msi_mode = tr32(MSGINT_MODE);
8081 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8082 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8083 }
8084 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08008085 err = tg3_request_irq(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008086
8087 if (err) {
Michael Chan88b06bc2005-04-21 17:13:25 -07008088 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8089 pci_disable_msi(tp->pdev);
8090 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8091 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008092 tg3_free_consistent(tp);
8093 return err;
8094 }
8095
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008096 napi_enable(&tp->napi);
8097
David S. Millerf47c11e2005-06-24 20:18:35 -07008098 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008099
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07008100 err = tg3_init_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008101 if (err) {
Michael Chan944d9802005-05-29 14:57:48 -07008102 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008103 tg3_free_rings(tp);
8104 } else {
David S. Millerfac9b832005-05-18 22:46:34 -07008105 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8106 tp->timer_offset = HZ;
8107 else
8108 tp->timer_offset = HZ / 10;
8109
8110 BUG_ON(tp->timer_offset > HZ);
8111 tp->timer_counter = tp->timer_multiplier =
8112 (HZ / tp->timer_offset);
8113 tp->asf_counter = tp->asf_multiplier =
Michael Chan28fbef72005-10-26 15:48:35 -07008114 ((HZ / tp->timer_offset) * 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008115
8116 init_timer(&tp->timer);
8117 tp->timer.expires = jiffies + tp->timer_offset;
8118 tp->timer.data = (unsigned long) tp;
8119 tp->timer.function = tg3_timer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008120 }
8121
David S. Millerf47c11e2005-06-24 20:18:35 -07008122 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008123
8124 if (err) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008125 napi_disable(&tp->napi);
Michael Chan88b06bc2005-04-21 17:13:25 -07008126 free_irq(tp->pdev->irq, dev);
8127 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8128 pci_disable_msi(tp->pdev);
8129 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8130 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008131 tg3_free_consistent(tp);
8132 return err;
8133 }
8134
Michael Chan79381092005-04-21 17:13:59 -07008135 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8136 err = tg3_test_msi(tp);
David S. Millerfac9b832005-05-18 22:46:34 -07008137
Michael Chan79381092005-04-21 17:13:59 -07008138 if (err) {
David S. Millerf47c11e2005-06-24 20:18:35 -07008139 tg3_full_lock(tp, 0);
Michael Chan79381092005-04-21 17:13:59 -07008140
8141 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8142 pci_disable_msi(tp->pdev);
8143 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8144 }
Michael Chan944d9802005-05-29 14:57:48 -07008145 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan79381092005-04-21 17:13:59 -07008146 tg3_free_rings(tp);
8147 tg3_free_consistent(tp);
8148
David S. Millerf47c11e2005-06-24 20:18:35 -07008149 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07008150
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008151 napi_disable(&tp->napi);
8152
Michael Chan79381092005-04-21 17:13:59 -07008153 return err;
8154 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08008155
8156 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8157 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
Michael Chanb5d37722006-09-27 16:06:21 -07008158 u32 val = tr32(PCIE_TRANSACTION_CFG);
Michael Chanfcfa0a32006-03-20 22:28:41 -08008159
Michael Chanb5d37722006-09-27 16:06:21 -07008160 tw32(PCIE_TRANSACTION_CFG,
8161 val | PCIE_TRANS_CFG_1SHOT_MSI);
Michael Chanfcfa0a32006-03-20 22:28:41 -08008162 }
8163 }
Michael Chan79381092005-04-21 17:13:59 -07008164 }
8165
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07008166 tg3_phy_start(tp);
8167
David S. Millerf47c11e2005-06-24 20:18:35 -07008168 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008169
Michael Chan79381092005-04-21 17:13:59 -07008170 add_timer(&tp->timer);
8171 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008172 tg3_enable_ints(tp);
8173
David S. Millerf47c11e2005-06-24 20:18:35 -07008174 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008175
8176 netif_start_queue(dev);
8177
8178 return 0;
8179}
8180
8181#if 0
8182/*static*/ void tg3_dump_state(struct tg3 *tp)
8183{
8184 u32 val32, val32_2, val32_3, val32_4, val32_5;
8185 u16 val16;
8186 int i;
8187
8188 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8189 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8190 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8191 val16, val32);
8192
8193 /* MAC block */
8194 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8195 tr32(MAC_MODE), tr32(MAC_STATUS));
8196 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8197 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8198 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8199 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8200 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8201 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8202
8203 /* Send data initiator control block */
8204 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8205 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8206 printk(" SNDDATAI_STATSCTRL[%08x]\n",
8207 tr32(SNDDATAI_STATSCTRL));
8208
8209 /* Send data completion control block */
8210 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8211
8212 /* Send BD ring selector block */
8213 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8214 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8215
8216 /* Send BD initiator control block */
8217 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8218 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8219
8220 /* Send BD completion control block */
8221 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8222
8223 /* Receive list placement control block */
8224 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8225 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8226 printk(" RCVLPC_STATSCTRL[%08x]\n",
8227 tr32(RCVLPC_STATSCTRL));
8228
8229 /* Receive data and receive BD initiator control block */
8230 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8231 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8232
8233 /* Receive data completion control block */
8234 printk("DEBUG: RCVDCC_MODE[%08x]\n",
8235 tr32(RCVDCC_MODE));
8236
8237 /* Receive BD initiator control block */
8238 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8239 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8240
8241 /* Receive BD completion control block */
8242 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8243 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8244
8245 /* Receive list selector control block */
8246 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8247 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8248
8249 /* Mbuf cluster free block */
8250 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8251 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8252
8253 /* Host coalescing control block */
8254 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8255 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8256 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8257 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8258 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8259 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8260 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8261 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8262 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8263 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8264 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8265 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8266
8267 /* Memory arbiter control block */
8268 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8269 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8270
8271 /* Buffer manager control block */
8272 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8273 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8274 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8275 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8276 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8277 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8278 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8279 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8280
8281 /* Read DMA control block */
8282 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8283 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8284
8285 /* Write DMA control block */
8286 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8287 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8288
8289 /* DMA completion block */
8290 printk("DEBUG: DMAC_MODE[%08x]\n",
8291 tr32(DMAC_MODE));
8292
8293 /* GRC block */
8294 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8295 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8296 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8297 tr32(GRC_LOCAL_CTRL));
8298
8299 /* TG3_BDINFOs */
8300 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8301 tr32(RCVDBDI_JUMBO_BD + 0x0),
8302 tr32(RCVDBDI_JUMBO_BD + 0x4),
8303 tr32(RCVDBDI_JUMBO_BD + 0x8),
8304 tr32(RCVDBDI_JUMBO_BD + 0xc));
8305 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8306 tr32(RCVDBDI_STD_BD + 0x0),
8307 tr32(RCVDBDI_STD_BD + 0x4),
8308 tr32(RCVDBDI_STD_BD + 0x8),
8309 tr32(RCVDBDI_STD_BD + 0xc));
8310 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8311 tr32(RCVDBDI_MINI_BD + 0x0),
8312 tr32(RCVDBDI_MINI_BD + 0x4),
8313 tr32(RCVDBDI_MINI_BD + 0x8),
8314 tr32(RCVDBDI_MINI_BD + 0xc));
8315
8316 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8317 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8318 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8319 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8320 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8321 val32, val32_2, val32_3, val32_4);
8322
8323 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8324 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8325 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8326 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8327 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8328 val32, val32_2, val32_3, val32_4);
8329
8330 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8331 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8332 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8333 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8334 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8335 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8336 val32, val32_2, val32_3, val32_4, val32_5);
8337
8338 /* SW status block */
8339 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8340 tp->hw_status->status,
8341 tp->hw_status->status_tag,
8342 tp->hw_status->rx_jumbo_consumer,
8343 tp->hw_status->rx_consumer,
8344 tp->hw_status->rx_mini_consumer,
8345 tp->hw_status->idx[0].rx_producer,
8346 tp->hw_status->idx[0].tx_consumer);
8347
8348 /* SW statistics block */
8349 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8350 ((u32 *)tp->hw_stats)[0],
8351 ((u32 *)tp->hw_stats)[1],
8352 ((u32 *)tp->hw_stats)[2],
8353 ((u32 *)tp->hw_stats)[3]);
8354
8355 /* Mailboxes */
8356 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
Michael Chan09ee9292005-08-09 20:17:00 -07008357 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8358 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8359 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8360 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008361
8362 /* NIC side send descriptors. */
8363 for (i = 0; i < 6; i++) {
8364 unsigned long txd;
8365
8366 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8367 + (i * sizeof(struct tg3_tx_buffer_desc));
8368 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8369 i,
8370 readl(txd + 0x0), readl(txd + 0x4),
8371 readl(txd + 0x8), readl(txd + 0xc));
8372 }
8373
8374 /* NIC side RX descriptors. */
8375 for (i = 0; i < 6; i++) {
8376 unsigned long rxd;
8377
8378 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8379 + (i * sizeof(struct tg3_rx_buffer_desc));
8380 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8381 i,
8382 readl(rxd + 0x0), readl(rxd + 0x4),
8383 readl(rxd + 0x8), readl(rxd + 0xc));
8384 rxd += (4 * sizeof(u32));
8385 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8386 i,
8387 readl(rxd + 0x0), readl(rxd + 0x4),
8388 readl(rxd + 0x8), readl(rxd + 0xc));
8389 }
8390
8391 for (i = 0; i < 6; i++) {
8392 unsigned long rxd;
8393
8394 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8395 + (i * sizeof(struct tg3_rx_buffer_desc));
8396 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8397 i,
8398 readl(rxd + 0x0), readl(rxd + 0x4),
8399 readl(rxd + 0x8), readl(rxd + 0xc));
8400 rxd += (4 * sizeof(u32));
8401 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8402 i,
8403 readl(rxd + 0x0), readl(rxd + 0x4),
8404 readl(rxd + 0x8), readl(rxd + 0xc));
8405 }
8406}
8407#endif
8408
8409static struct net_device_stats *tg3_get_stats(struct net_device *);
8410static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8411
8412static int tg3_close(struct net_device *dev)
8413{
8414 struct tg3 *tp = netdev_priv(dev);
8415
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008416 napi_disable(&tp->napi);
Oleg Nesterov28e53bd2007-05-09 02:34:22 -07008417 cancel_work_sync(&tp->reset_task);
Michael Chan7faa0062006-02-02 17:29:28 -08008418
Linus Torvalds1da177e2005-04-16 15:20:36 -07008419 netif_stop_queue(dev);
8420
8421 del_timer_sync(&tp->timer);
8422
David S. Millerf47c11e2005-06-24 20:18:35 -07008423 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008424#if 0
8425 tg3_dump_state(tp);
8426#endif
8427
8428 tg3_disable_ints(tp);
8429
Michael Chan944d9802005-05-29 14:57:48 -07008430 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008431 tg3_free_rings(tp);
Michael Chan5cf64b82007-05-05 12:11:21 -07008432 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008433
David S. Millerf47c11e2005-06-24 20:18:35 -07008434 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008435
Michael Chan88b06bc2005-04-21 17:13:25 -07008436 free_irq(tp->pdev->irq, dev);
8437 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8438 pci_disable_msi(tp->pdev);
8439 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8440 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008441
8442 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8443 sizeof(tp->net_stats_prev));
8444 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8445 sizeof(tp->estats_prev));
8446
8447 tg3_free_consistent(tp);
8448
Michael Chanbc1c7562006-03-20 17:48:03 -08008449 tg3_set_power_state(tp, PCI_D3hot);
8450
8451 netif_carrier_off(tp->dev);
8452
Linus Torvalds1da177e2005-04-16 15:20:36 -07008453 return 0;
8454}
8455
8456static inline unsigned long get_stat64(tg3_stat64_t *val)
8457{
8458 unsigned long ret;
8459
8460#if (BITS_PER_LONG == 32)
8461 ret = val->low;
8462#else
8463 ret = ((u64)val->high << 32) | ((u64)val->low);
8464#endif
8465 return ret;
8466}
8467
Stefan Buehler816f8b82008-08-15 14:10:54 -07008468static inline u64 get_estat64(tg3_stat64_t *val)
8469{
8470 return ((u64)val->high << 32) | ((u64)val->low);
8471}
8472
Linus Torvalds1da177e2005-04-16 15:20:36 -07008473static unsigned long calc_crc_errors(struct tg3 *tp)
8474{
8475 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8476
8477 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8478 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8479 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008480 u32 val;
8481
David S. Millerf47c11e2005-06-24 20:18:35 -07008482 spin_lock_bh(&tp->lock);
Michael Chan569a5df2007-02-13 12:18:15 -08008483 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8484 tg3_writephy(tp, MII_TG3_TEST1,
8485 val | MII_TG3_TEST1_CRC_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008486 tg3_readphy(tp, 0x14, &val);
8487 } else
8488 val = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07008489 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008490
8491 tp->phy_crc_errors += val;
8492
8493 return tp->phy_crc_errors;
8494 }
8495
8496 return get_stat64(&hw_stats->rx_fcs_errors);
8497}
8498
8499#define ESTAT_ADD(member) \
8500 estats->member = old_estats->member + \
Stefan Buehler816f8b82008-08-15 14:10:54 -07008501 get_estat64(&hw_stats->member)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008502
8503static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8504{
8505 struct tg3_ethtool_stats *estats = &tp->estats;
8506 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8507 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8508
8509 if (!hw_stats)
8510 return old_estats;
8511
8512 ESTAT_ADD(rx_octets);
8513 ESTAT_ADD(rx_fragments);
8514 ESTAT_ADD(rx_ucast_packets);
8515 ESTAT_ADD(rx_mcast_packets);
8516 ESTAT_ADD(rx_bcast_packets);
8517 ESTAT_ADD(rx_fcs_errors);
8518 ESTAT_ADD(rx_align_errors);
8519 ESTAT_ADD(rx_xon_pause_rcvd);
8520 ESTAT_ADD(rx_xoff_pause_rcvd);
8521 ESTAT_ADD(rx_mac_ctrl_rcvd);
8522 ESTAT_ADD(rx_xoff_entered);
8523 ESTAT_ADD(rx_frame_too_long_errors);
8524 ESTAT_ADD(rx_jabbers);
8525 ESTAT_ADD(rx_undersize_packets);
8526 ESTAT_ADD(rx_in_length_errors);
8527 ESTAT_ADD(rx_out_length_errors);
8528 ESTAT_ADD(rx_64_or_less_octet_packets);
8529 ESTAT_ADD(rx_65_to_127_octet_packets);
8530 ESTAT_ADD(rx_128_to_255_octet_packets);
8531 ESTAT_ADD(rx_256_to_511_octet_packets);
8532 ESTAT_ADD(rx_512_to_1023_octet_packets);
8533 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8534 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8535 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8536 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8537 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8538
8539 ESTAT_ADD(tx_octets);
8540 ESTAT_ADD(tx_collisions);
8541 ESTAT_ADD(tx_xon_sent);
8542 ESTAT_ADD(tx_xoff_sent);
8543 ESTAT_ADD(tx_flow_control);
8544 ESTAT_ADD(tx_mac_errors);
8545 ESTAT_ADD(tx_single_collisions);
8546 ESTAT_ADD(tx_mult_collisions);
8547 ESTAT_ADD(tx_deferred);
8548 ESTAT_ADD(tx_excessive_collisions);
8549 ESTAT_ADD(tx_late_collisions);
8550 ESTAT_ADD(tx_collide_2times);
8551 ESTAT_ADD(tx_collide_3times);
8552 ESTAT_ADD(tx_collide_4times);
8553 ESTAT_ADD(tx_collide_5times);
8554 ESTAT_ADD(tx_collide_6times);
8555 ESTAT_ADD(tx_collide_7times);
8556 ESTAT_ADD(tx_collide_8times);
8557 ESTAT_ADD(tx_collide_9times);
8558 ESTAT_ADD(tx_collide_10times);
8559 ESTAT_ADD(tx_collide_11times);
8560 ESTAT_ADD(tx_collide_12times);
8561 ESTAT_ADD(tx_collide_13times);
8562 ESTAT_ADD(tx_collide_14times);
8563 ESTAT_ADD(tx_collide_15times);
8564 ESTAT_ADD(tx_ucast_packets);
8565 ESTAT_ADD(tx_mcast_packets);
8566 ESTAT_ADD(tx_bcast_packets);
8567 ESTAT_ADD(tx_carrier_sense_errors);
8568 ESTAT_ADD(tx_discards);
8569 ESTAT_ADD(tx_errors);
8570
8571 ESTAT_ADD(dma_writeq_full);
8572 ESTAT_ADD(dma_write_prioq_full);
8573 ESTAT_ADD(rxbds_empty);
8574 ESTAT_ADD(rx_discards);
8575 ESTAT_ADD(rx_errors);
8576 ESTAT_ADD(rx_threshold_hit);
8577
8578 ESTAT_ADD(dma_readq_full);
8579 ESTAT_ADD(dma_read_prioq_full);
8580 ESTAT_ADD(tx_comp_queue_full);
8581
8582 ESTAT_ADD(ring_set_send_prod_index);
8583 ESTAT_ADD(ring_status_update);
8584 ESTAT_ADD(nic_irqs);
8585 ESTAT_ADD(nic_avoided_irqs);
8586 ESTAT_ADD(nic_tx_threshold_hit);
8587
8588 return estats;
8589}
8590
8591static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8592{
8593 struct tg3 *tp = netdev_priv(dev);
8594 struct net_device_stats *stats = &tp->net_stats;
8595 struct net_device_stats *old_stats = &tp->net_stats_prev;
8596 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8597
8598 if (!hw_stats)
8599 return old_stats;
8600
8601 stats->rx_packets = old_stats->rx_packets +
8602 get_stat64(&hw_stats->rx_ucast_packets) +
8603 get_stat64(&hw_stats->rx_mcast_packets) +
8604 get_stat64(&hw_stats->rx_bcast_packets);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008605
Linus Torvalds1da177e2005-04-16 15:20:36 -07008606 stats->tx_packets = old_stats->tx_packets +
8607 get_stat64(&hw_stats->tx_ucast_packets) +
8608 get_stat64(&hw_stats->tx_mcast_packets) +
8609 get_stat64(&hw_stats->tx_bcast_packets);
8610
8611 stats->rx_bytes = old_stats->rx_bytes +
8612 get_stat64(&hw_stats->rx_octets);
8613 stats->tx_bytes = old_stats->tx_bytes +
8614 get_stat64(&hw_stats->tx_octets);
8615
8616 stats->rx_errors = old_stats->rx_errors +
John W. Linville4f63b872005-09-12 14:43:18 -07008617 get_stat64(&hw_stats->rx_errors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008618 stats->tx_errors = old_stats->tx_errors +
8619 get_stat64(&hw_stats->tx_errors) +
8620 get_stat64(&hw_stats->tx_mac_errors) +
8621 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8622 get_stat64(&hw_stats->tx_discards);
8623
8624 stats->multicast = old_stats->multicast +
8625 get_stat64(&hw_stats->rx_mcast_packets);
8626 stats->collisions = old_stats->collisions +
8627 get_stat64(&hw_stats->tx_collisions);
8628
8629 stats->rx_length_errors = old_stats->rx_length_errors +
8630 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8631 get_stat64(&hw_stats->rx_undersize_packets);
8632
8633 stats->rx_over_errors = old_stats->rx_over_errors +
8634 get_stat64(&hw_stats->rxbds_empty);
8635 stats->rx_frame_errors = old_stats->rx_frame_errors +
8636 get_stat64(&hw_stats->rx_align_errors);
8637 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8638 get_stat64(&hw_stats->tx_discards);
8639 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8640 get_stat64(&hw_stats->tx_carrier_sense_errors);
8641
8642 stats->rx_crc_errors = old_stats->rx_crc_errors +
8643 calc_crc_errors(tp);
8644
John W. Linville4f63b872005-09-12 14:43:18 -07008645 stats->rx_missed_errors = old_stats->rx_missed_errors +
8646 get_stat64(&hw_stats->rx_discards);
8647
Linus Torvalds1da177e2005-04-16 15:20:36 -07008648 return stats;
8649}
8650
8651static inline u32 calc_crc(unsigned char *buf, int len)
8652{
8653 u32 reg;
8654 u32 tmp;
8655 int j, k;
8656
8657 reg = 0xffffffff;
8658
8659 for (j = 0; j < len; j++) {
8660 reg ^= buf[j];
8661
8662 for (k = 0; k < 8; k++) {
8663 tmp = reg & 0x01;
8664
8665 reg >>= 1;
8666
8667 if (tmp) {
8668 reg ^= 0xedb88320;
8669 }
8670 }
8671 }
8672
8673 return ~reg;
8674}
8675
8676static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8677{
8678 /* accept or reject all multicast frames */
8679 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8680 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8681 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8682 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8683}
8684
8685static void __tg3_set_rx_mode(struct net_device *dev)
8686{
8687 struct tg3 *tp = netdev_priv(dev);
8688 u32 rx_mode;
8689
8690 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8691 RX_MODE_KEEP_VLAN_TAG);
8692
8693 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8694 * flag clear.
8695 */
8696#if TG3_VLAN_TAG_USED
8697 if (!tp->vlgrp &&
8698 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8699 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8700#else
8701 /* By definition, VLAN is disabled always in this
8702 * case.
8703 */
8704 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8705 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8706#endif
8707
8708 if (dev->flags & IFF_PROMISC) {
8709 /* Promiscuous mode. */
8710 rx_mode |= RX_MODE_PROMISC;
8711 } else if (dev->flags & IFF_ALLMULTI) {
8712 /* Accept all multicast. */
8713 tg3_set_multi (tp, 1);
8714 } else if (dev->mc_count < 1) {
8715 /* Reject all multicast. */
8716 tg3_set_multi (tp, 0);
8717 } else {
8718 /* Accept one or more multicast(s). */
8719 struct dev_mc_list *mclist;
8720 unsigned int i;
8721 u32 mc_filter[4] = { 0, };
8722 u32 regidx;
8723 u32 bit;
8724 u32 crc;
8725
8726 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8727 i++, mclist = mclist->next) {
8728
8729 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8730 bit = ~crc & 0x7f;
8731 regidx = (bit & 0x60) >> 5;
8732 bit &= 0x1f;
8733 mc_filter[regidx] |= (1 << bit);
8734 }
8735
8736 tw32(MAC_HASH_REG_0, mc_filter[0]);
8737 tw32(MAC_HASH_REG_1, mc_filter[1]);
8738 tw32(MAC_HASH_REG_2, mc_filter[2]);
8739 tw32(MAC_HASH_REG_3, mc_filter[3]);
8740 }
8741
8742 if (rx_mode != tp->rx_mode) {
8743 tp->rx_mode = rx_mode;
8744 tw32_f(MAC_RX_MODE, rx_mode);
8745 udelay(10);
8746 }
8747}
8748
8749static void tg3_set_rx_mode(struct net_device *dev)
8750{
8751 struct tg3 *tp = netdev_priv(dev);
8752
Michael Chane75f7c92006-03-20 21:33:26 -08008753 if (!netif_running(dev))
8754 return;
8755
David S. Millerf47c11e2005-06-24 20:18:35 -07008756 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008757 __tg3_set_rx_mode(dev);
David S. Millerf47c11e2005-06-24 20:18:35 -07008758 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008759}
8760
8761#define TG3_REGDUMP_LEN (32 * 1024)
8762
8763static int tg3_get_regs_len(struct net_device *dev)
8764{
8765 return TG3_REGDUMP_LEN;
8766}
8767
8768static void tg3_get_regs(struct net_device *dev,
8769 struct ethtool_regs *regs, void *_p)
8770{
8771 u32 *p = _p;
8772 struct tg3 *tp = netdev_priv(dev);
8773 u8 *orig_p = _p;
8774 int i;
8775
8776 regs->version = 0;
8777
8778 memset(p, 0, TG3_REGDUMP_LEN);
8779
Michael Chanbc1c7562006-03-20 17:48:03 -08008780 if (tp->link_config.phy_is_low_power)
8781 return;
8782
David S. Millerf47c11e2005-06-24 20:18:35 -07008783 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008784
8785#define __GET_REG32(reg) (*(p)++ = tr32(reg))
8786#define GET_REG32_LOOP(base,len) \
8787do { p = (u32 *)(orig_p + (base)); \
8788 for (i = 0; i < len; i += 4) \
8789 __GET_REG32((base) + i); \
8790} while (0)
8791#define GET_REG32_1(reg) \
8792do { p = (u32 *)(orig_p + (reg)); \
8793 __GET_REG32((reg)); \
8794} while (0)
8795
8796 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8797 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8798 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8799 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8800 GET_REG32_1(SNDDATAC_MODE);
8801 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8802 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8803 GET_REG32_1(SNDBDC_MODE);
8804 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8805 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8806 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8807 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8808 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8809 GET_REG32_1(RCVDCC_MODE);
8810 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8811 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8812 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8813 GET_REG32_1(MBFREE_MODE);
8814 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8815 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8816 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8817 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8818 GET_REG32_LOOP(WDMAC_MODE, 0x08);
Chris Elmquist091465d2005-12-20 13:25:19 -08008819 GET_REG32_1(RX_CPU_MODE);
8820 GET_REG32_1(RX_CPU_STATE);
8821 GET_REG32_1(RX_CPU_PGMCTR);
8822 GET_REG32_1(RX_CPU_HWBKPT);
8823 GET_REG32_1(TX_CPU_MODE);
8824 GET_REG32_1(TX_CPU_STATE);
8825 GET_REG32_1(TX_CPU_PGMCTR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008826 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8827 GET_REG32_LOOP(FTQ_RESET, 0x120);
8828 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8829 GET_REG32_1(DMAC_MODE);
8830 GET_REG32_LOOP(GRC_MODE, 0x4c);
8831 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8832 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8833
8834#undef __GET_REG32
8835#undef GET_REG32_LOOP
8836#undef GET_REG32_1
8837
David S. Millerf47c11e2005-06-24 20:18:35 -07008838 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008839}
8840
8841static int tg3_get_eeprom_len(struct net_device *dev)
8842{
8843 struct tg3 *tp = netdev_priv(dev);
8844
8845 return tp->nvram_size;
8846}
8847
8848static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
Al Virob9fc7dc2007-12-17 22:59:57 -08008849static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
Michael Chan18201802006-03-20 22:29:15 -08008850static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008851
8852static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8853{
8854 struct tg3 *tp = netdev_priv(dev);
8855 int ret;
8856 u8 *pd;
Al Virob9fc7dc2007-12-17 22:59:57 -08008857 u32 i, offset, len, b_offset, b_count;
8858 __le32 val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008859
Michael Chanbc1c7562006-03-20 17:48:03 -08008860 if (tp->link_config.phy_is_low_power)
8861 return -EAGAIN;
8862
Linus Torvalds1da177e2005-04-16 15:20:36 -07008863 offset = eeprom->offset;
8864 len = eeprom->len;
8865 eeprom->len = 0;
8866
8867 eeprom->magic = TG3_EEPROM_MAGIC;
8868
8869 if (offset & 3) {
8870 /* adjustments to start on required 4 byte boundary */
8871 b_offset = offset & 3;
8872 b_count = 4 - b_offset;
8873 if (b_count > len) {
8874 /* i.e. offset=1 len=2 */
8875 b_count = len;
8876 }
Al Virob9fc7dc2007-12-17 22:59:57 -08008877 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008878 if (ret)
8879 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008880 memcpy(data, ((char*)&val) + b_offset, b_count);
8881 len -= b_count;
8882 offset += b_count;
8883 eeprom->len += b_count;
8884 }
8885
8886 /* read bytes upto the last 4 byte boundary */
8887 pd = &data[eeprom->len];
8888 for (i = 0; i < (len - (len & 3)); i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -08008889 ret = tg3_nvram_read_le(tp, offset + i, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008890 if (ret) {
8891 eeprom->len += i;
8892 return ret;
8893 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008894 memcpy(pd + i, &val, 4);
8895 }
8896 eeprom->len += i;
8897
8898 if (len & 3) {
8899 /* read last bytes not ending on 4 byte boundary */
8900 pd = &data[eeprom->len];
8901 b_count = len & 3;
8902 b_offset = offset + len - b_count;
Al Virob9fc7dc2007-12-17 22:59:57 -08008903 ret = tg3_nvram_read_le(tp, b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008904 if (ret)
8905 return ret;
Al Virob9fc7dc2007-12-17 22:59:57 -08008906 memcpy(pd, &val, b_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008907 eeprom->len += b_count;
8908 }
8909 return 0;
8910}
8911
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008912static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008913
8914static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8915{
8916 struct tg3 *tp = netdev_priv(dev);
8917 int ret;
Al Virob9fc7dc2007-12-17 22:59:57 -08008918 u32 offset, len, b_offset, odd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008919 u8 *buf;
Al Virob9fc7dc2007-12-17 22:59:57 -08008920 __le32 start, end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008921
Michael Chanbc1c7562006-03-20 17:48:03 -08008922 if (tp->link_config.phy_is_low_power)
8923 return -EAGAIN;
8924
Linus Torvalds1da177e2005-04-16 15:20:36 -07008925 if (eeprom->magic != TG3_EEPROM_MAGIC)
8926 return -EINVAL;
8927
8928 offset = eeprom->offset;
8929 len = eeprom->len;
8930
8931 if ((b_offset = (offset & 3))) {
8932 /* adjustments to start on required 4 byte boundary */
Al Virob9fc7dc2007-12-17 22:59:57 -08008933 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008934 if (ret)
8935 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008936 len += b_offset;
8937 offset &= ~3;
Michael Chan1c8594b2005-04-21 17:12:46 -07008938 if (len < 4)
8939 len = 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008940 }
8941
8942 odd_len = 0;
Michael Chan1c8594b2005-04-21 17:12:46 -07008943 if (len & 3) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008944 /* adjustments to end on required 4 byte boundary */
8945 odd_len = 1;
8946 len = (len + 3) & ~3;
Al Virob9fc7dc2007-12-17 22:59:57 -08008947 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008948 if (ret)
8949 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008950 }
8951
8952 buf = data;
8953 if (b_offset || odd_len) {
8954 buf = kmalloc(len, GFP_KERNEL);
Andy Gospodarekab0049b2007-09-06 20:42:14 +01008955 if (!buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008956 return -ENOMEM;
8957 if (b_offset)
8958 memcpy(buf, &start, 4);
8959 if (odd_len)
8960 memcpy(buf+len-4, &end, 4);
8961 memcpy(buf + b_offset, data, eeprom->len);
8962 }
8963
8964 ret = tg3_nvram_write_block(tp, offset, len, buf);
8965
8966 if (buf != data)
8967 kfree(buf);
8968
8969 return ret;
8970}
8971
8972static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8973{
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07008974 struct tg3 *tp = netdev_priv(dev);
8975
8976 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8977 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8978 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07008979 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07008980 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008981
Linus Torvalds1da177e2005-04-16 15:20:36 -07008982 cmd->supported = (SUPPORTED_Autoneg);
8983
8984 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8985 cmd->supported |= (SUPPORTED_1000baseT_Half |
8986 SUPPORTED_1000baseT_Full);
8987
Karsten Keilef348142006-05-12 12:49:08 -07008988 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008989 cmd->supported |= (SUPPORTED_100baseT_Half |
8990 SUPPORTED_100baseT_Full |
8991 SUPPORTED_10baseT_Half |
8992 SUPPORTED_10baseT_Full |
Matt Carlson3bebab52007-11-12 21:22:40 -08008993 SUPPORTED_TP);
Karsten Keilef348142006-05-12 12:49:08 -07008994 cmd->port = PORT_TP;
8995 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008996 cmd->supported |= SUPPORTED_FIBRE;
Karsten Keilef348142006-05-12 12:49:08 -07008997 cmd->port = PORT_FIBRE;
8998 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008999
Linus Torvalds1da177e2005-04-16 15:20:36 -07009000 cmd->advertising = tp->link_config.advertising;
9001 if (netif_running(dev)) {
9002 cmd->speed = tp->link_config.active_speed;
9003 cmd->duplex = tp->link_config.active_duplex;
9004 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009005 cmd->phy_address = PHY_ADDR;
9006 cmd->transceiver = 0;
9007 cmd->autoneg = tp->link_config.autoneg;
9008 cmd->maxtxpkt = 0;
9009 cmd->maxrxpkt = 0;
9010 return 0;
9011}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009012
Linus Torvalds1da177e2005-04-16 15:20:36 -07009013static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9014{
9015 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009016
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009017 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9018 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9019 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009020 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009021 }
9022
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009023 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009024 /* These are the only valid advertisement bits allowed. */
9025 if (cmd->autoneg == AUTONEG_ENABLE &&
9026 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
9027 ADVERTISED_1000baseT_Full |
9028 ADVERTISED_Autoneg |
9029 ADVERTISED_FIBRE)))
9030 return -EINVAL;
Michael Chan37ff2382005-10-26 15:49:51 -07009031 /* Fiber can only do SPEED_1000. */
9032 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9033 (cmd->speed != SPEED_1000))
9034 return -EINVAL;
9035 /* Copper cannot force SPEED_1000. */
9036 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9037 (cmd->speed == SPEED_1000))
9038 return -EINVAL;
9039 else if ((cmd->speed == SPEED_1000) &&
Matt Carlson0ba11fb2008-06-09 15:40:26 -07009040 (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
Michael Chan37ff2382005-10-26 15:49:51 -07009041 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009042
David S. Millerf47c11e2005-06-24 20:18:35 -07009043 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009044
9045 tp->link_config.autoneg = cmd->autoneg;
9046 if (cmd->autoneg == AUTONEG_ENABLE) {
Andy Gospodarek405d8e52007-10-08 01:08:47 -07009047 tp->link_config.advertising = (cmd->advertising |
9048 ADVERTISED_Autoneg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009049 tp->link_config.speed = SPEED_INVALID;
9050 tp->link_config.duplex = DUPLEX_INVALID;
9051 } else {
9052 tp->link_config.advertising = 0;
9053 tp->link_config.speed = cmd->speed;
9054 tp->link_config.duplex = cmd->duplex;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009055 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009056
Michael Chan24fcad62006-12-17 17:06:46 -08009057 tp->link_config.orig_speed = tp->link_config.speed;
9058 tp->link_config.orig_duplex = tp->link_config.duplex;
9059 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9060
Linus Torvalds1da177e2005-04-16 15:20:36 -07009061 if (netif_running(dev))
9062 tg3_setup_phy(tp, 1);
9063
David S. Millerf47c11e2005-06-24 20:18:35 -07009064 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009065
Linus Torvalds1da177e2005-04-16 15:20:36 -07009066 return 0;
9067}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009068
Linus Torvalds1da177e2005-04-16 15:20:36 -07009069static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9070{
9071 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009072
Linus Torvalds1da177e2005-04-16 15:20:36 -07009073 strcpy(info->driver, DRV_MODULE_NAME);
9074 strcpy(info->version, DRV_MODULE_VERSION);
Michael Chanc4e65752006-03-20 22:29:32 -08009075 strcpy(info->fw_version, tp->fw_ver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009076 strcpy(info->bus_info, pci_name(tp->pdev));
9077}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009078
Linus Torvalds1da177e2005-04-16 15:20:36 -07009079static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9080{
9081 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009082
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009083 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9084 device_can_wakeup(&tp->pdev->dev))
Gary Zambranoa85feb82007-05-05 11:52:19 -07009085 wol->supported = WAKE_MAGIC;
9086 else
9087 wol->supported = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009088 wol->wolopts = 0;
9089 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
9090 wol->wolopts = WAKE_MAGIC;
9091 memset(&wol->sopass, 0, sizeof(wol->sopass));
9092}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009093
Linus Torvalds1da177e2005-04-16 15:20:36 -07009094static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9095{
9096 struct tg3 *tp = netdev_priv(dev);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009097 struct device *dp = &tp->pdev->dev;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009098
Linus Torvalds1da177e2005-04-16 15:20:36 -07009099 if (wol->wolopts & ~WAKE_MAGIC)
9100 return -EINVAL;
9101 if ((wol->wolopts & WAKE_MAGIC) &&
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009102 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009103 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009104
David S. Millerf47c11e2005-06-24 20:18:35 -07009105 spin_lock_bh(&tp->lock);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009106 if (wol->wolopts & WAKE_MAGIC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009107 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009108 device_set_wakeup_enable(dp, true);
9109 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009110 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009111 device_set_wakeup_enable(dp, false);
9112 }
David S. Millerf47c11e2005-06-24 20:18:35 -07009113 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009114
Linus Torvalds1da177e2005-04-16 15:20:36 -07009115 return 0;
9116}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009117
Linus Torvalds1da177e2005-04-16 15:20:36 -07009118static u32 tg3_get_msglevel(struct net_device *dev)
9119{
9120 struct tg3 *tp = netdev_priv(dev);
9121 return tp->msg_enable;
9122}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009123
Linus Torvalds1da177e2005-04-16 15:20:36 -07009124static void tg3_set_msglevel(struct net_device *dev, u32 value)
9125{
9126 struct tg3 *tp = netdev_priv(dev);
9127 tp->msg_enable = value;
9128}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009129
Linus Torvalds1da177e2005-04-16 15:20:36 -07009130static int tg3_set_tso(struct net_device *dev, u32 value)
9131{
9132 struct tg3 *tp = netdev_priv(dev);
9133
9134 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9135 if (value)
9136 return -EINVAL;
9137 return 0;
9138 }
Michael Chanb5d37722006-09-27 16:06:21 -07009139 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
9140 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
Matt Carlson9936bcf2007-10-10 18:03:07 -07009141 if (value) {
Michael Chanb0026622006-07-03 19:42:14 -07009142 dev->features |= NETIF_F_TSO6;
Matt Carlson57e69832008-05-25 23:48:31 -07009143 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9144 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9145 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9146 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlson9936bcf2007-10-10 18:03:07 -07009147 dev->features |= NETIF_F_TSO_ECN;
9148 } else
9149 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
Michael Chanb0026622006-07-03 19:42:14 -07009150 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009151 return ethtool_op_set_tso(dev, value);
9152}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009153
Linus Torvalds1da177e2005-04-16 15:20:36 -07009154static int tg3_nway_reset(struct net_device *dev)
9155{
9156 struct tg3 *tp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009157 int r;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009158
Linus Torvalds1da177e2005-04-16 15:20:36 -07009159 if (!netif_running(dev))
9160 return -EAGAIN;
9161
Michael Chanc94e3942005-09-27 12:12:42 -07009162 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9163 return -EINVAL;
9164
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009165 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9166 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9167 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009168 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009169 } else {
9170 u32 bmcr;
9171
9172 spin_lock_bh(&tp->lock);
9173 r = -EINVAL;
9174 tg3_readphy(tp, MII_BMCR, &bmcr);
9175 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9176 ((bmcr & BMCR_ANENABLE) ||
9177 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9178 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9179 BMCR_ANENABLE);
9180 r = 0;
9181 }
9182 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009183 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009184
Linus Torvalds1da177e2005-04-16 15:20:36 -07009185 return r;
9186}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009187
Linus Torvalds1da177e2005-04-16 15:20:36 -07009188static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9189{
9190 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009191
Linus Torvalds1da177e2005-04-16 15:20:36 -07009192 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9193 ering->rx_mini_max_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08009194 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9195 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9196 else
9197 ering->rx_jumbo_max_pending = 0;
9198
9199 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009200
9201 ering->rx_pending = tp->rx_pending;
9202 ering->rx_mini_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08009203 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9204 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9205 else
9206 ering->rx_jumbo_pending = 0;
9207
Linus Torvalds1da177e2005-04-16 15:20:36 -07009208 ering->tx_pending = tp->tx_pending;
9209}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009210
Linus Torvalds1da177e2005-04-16 15:20:36 -07009211static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9212{
9213 struct tg3 *tp = netdev_priv(dev);
Michael Chanb9ec6c12006-07-25 16:37:27 -07009214 int irq_sync = 0, err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009215
Linus Torvalds1da177e2005-04-16 15:20:36 -07009216 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9217 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
Michael Chanbc3a9252006-10-18 20:55:18 -07009218 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9219 (ering->tx_pending <= MAX_SKB_FRAGS) ||
Michael Chan7f62ad52007-02-20 23:25:40 -08009220 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
Michael Chanbc3a9252006-10-18 20:55:18 -07009221 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009222 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009223
Michael Chanbbe832c2005-06-24 20:20:04 -07009224 if (netif_running(dev)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009225 tg3_phy_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009226 tg3_netif_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07009227 irq_sync = 1;
9228 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009229
Michael Chanbbe832c2005-06-24 20:20:04 -07009230 tg3_full_lock(tp, irq_sync);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009231
Linus Torvalds1da177e2005-04-16 15:20:36 -07009232 tp->rx_pending = ering->rx_pending;
9233
9234 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9235 tp->rx_pending > 63)
9236 tp->rx_pending = 63;
9237 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9238 tp->tx_pending = ering->tx_pending;
9239
9240 if (netif_running(dev)) {
Michael Chan944d9802005-05-29 14:57:48 -07009241 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chanb9ec6c12006-07-25 16:37:27 -07009242 err = tg3_restart_hw(tp, 1);
9243 if (!err)
9244 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009245 }
9246
David S. Millerf47c11e2005-06-24 20:18:35 -07009247 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009248
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009249 if (irq_sync && !err)
9250 tg3_phy_start(tp);
9251
Michael Chanb9ec6c12006-07-25 16:37:27 -07009252 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009253}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009254
Linus Torvalds1da177e2005-04-16 15:20:36 -07009255static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9256{
9257 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009258
Linus Torvalds1da177e2005-04-16 15:20:36 -07009259 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
Matt Carlson8d018622007-12-20 20:05:44 -08009260
9261 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
9262 epause->rx_pause = 1;
9263 else
9264 epause->rx_pause = 0;
9265
9266 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
9267 epause->tx_pause = 1;
9268 else
9269 epause->tx_pause = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009270}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009271
Linus Torvalds1da177e2005-04-16 15:20:36 -07009272static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9273{
9274 struct tg3 *tp = netdev_priv(dev);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009275 int err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009276
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009277 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9278 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9279 return -EAGAIN;
9280
9281 if (epause->autoneg) {
9282 u32 newadv;
9283 struct phy_device *phydev;
9284
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009285 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009286
9287 if (epause->rx_pause) {
9288 if (epause->tx_pause)
9289 newadv = ADVERTISED_Pause;
9290 else
9291 newadv = ADVERTISED_Pause |
9292 ADVERTISED_Asym_Pause;
9293 } else if (epause->tx_pause) {
9294 newadv = ADVERTISED_Asym_Pause;
9295 } else
9296 newadv = 0;
9297
9298 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9299 u32 oldadv = phydev->advertising &
9300 (ADVERTISED_Pause |
9301 ADVERTISED_Asym_Pause);
9302 if (oldadv != newadv) {
9303 phydev->advertising &=
9304 ~(ADVERTISED_Pause |
9305 ADVERTISED_Asym_Pause);
9306 phydev->advertising |= newadv;
9307 err = phy_start_aneg(phydev);
9308 }
9309 } else {
9310 tp->link_config.advertising &=
9311 ~(ADVERTISED_Pause |
9312 ADVERTISED_Asym_Pause);
9313 tp->link_config.advertising |= newadv;
9314 }
9315 } else {
9316 if (epause->rx_pause)
9317 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9318 else
9319 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9320
9321 if (epause->tx_pause)
9322 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9323 else
9324 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9325
9326 if (netif_running(dev))
9327 tg3_setup_flow_control(tp, 0, 0);
9328 }
9329 } else {
9330 int irq_sync = 0;
9331
9332 if (netif_running(dev)) {
9333 tg3_netif_stop(tp);
9334 irq_sync = 1;
9335 }
9336
9337 tg3_full_lock(tp, irq_sync);
9338
9339 if (epause->autoneg)
9340 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9341 else
9342 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9343 if (epause->rx_pause)
9344 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9345 else
9346 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9347 if (epause->tx_pause)
9348 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9349 else
9350 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9351
9352 if (netif_running(dev)) {
9353 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9354 err = tg3_restart_hw(tp, 1);
9355 if (!err)
9356 tg3_netif_start(tp);
9357 }
9358
9359 tg3_full_unlock(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07009360 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009361
Michael Chanb9ec6c12006-07-25 16:37:27 -07009362 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009363}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009364
Linus Torvalds1da177e2005-04-16 15:20:36 -07009365static u32 tg3_get_rx_csum(struct net_device *dev)
9366{
9367 struct tg3 *tp = netdev_priv(dev);
9368 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9369}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009370
Linus Torvalds1da177e2005-04-16 15:20:36 -07009371static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9372{
9373 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009374
Linus Torvalds1da177e2005-04-16 15:20:36 -07009375 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9376 if (data != 0)
9377 return -EINVAL;
9378 return 0;
9379 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009380
David S. Millerf47c11e2005-06-24 20:18:35 -07009381 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009382 if (data)
9383 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9384 else
9385 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
David S. Millerf47c11e2005-06-24 20:18:35 -07009386 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009387
Linus Torvalds1da177e2005-04-16 15:20:36 -07009388 return 0;
9389}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009390
Linus Torvalds1da177e2005-04-16 15:20:36 -07009391static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9392{
9393 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009394
Linus Torvalds1da177e2005-04-16 15:20:36 -07009395 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9396 if (data != 0)
9397 return -EINVAL;
9398 return 0;
9399 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009400
Michael Chanaf36e6b2006-03-23 01:28:06 -08009401 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07009402 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07009403 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -07009404 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9405 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan6460d942007-07-14 19:07:52 -07009406 ethtool_op_set_tx_ipv6_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009407 else
Michael Chan9c27dbd2006-03-20 22:28:27 -08009408 ethtool_op_set_tx_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009409
9410 return 0;
9411}
9412
Jeff Garzikb9f2c042007-10-03 18:07:32 -07009413static int tg3_get_sset_count (struct net_device *dev, int sset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009414{
Jeff Garzikb9f2c042007-10-03 18:07:32 -07009415 switch (sset) {
9416 case ETH_SS_TEST:
9417 return TG3_NUM_TEST;
9418 case ETH_SS_STATS:
9419 return TG3_NUM_STATS;
9420 default:
9421 return -EOPNOTSUPP;
9422 }
Michael Chan4cafd3f2005-05-29 14:56:34 -07009423}
9424
Linus Torvalds1da177e2005-04-16 15:20:36 -07009425static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9426{
9427 switch (stringset) {
9428 case ETH_SS_STATS:
9429 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9430 break;
Michael Chan4cafd3f2005-05-29 14:56:34 -07009431 case ETH_SS_TEST:
9432 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9433 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009434 default:
9435 WARN_ON(1); /* we need a WARN() */
9436 break;
9437 }
9438}
9439
Michael Chan4009a932005-09-05 17:52:54 -07009440static int tg3_phys_id(struct net_device *dev, u32 data)
9441{
9442 struct tg3 *tp = netdev_priv(dev);
9443 int i;
9444
9445 if (!netif_running(tp->dev))
9446 return -EAGAIN;
9447
9448 if (data == 0)
Stephen Hemminger759afc32008-02-23 19:51:59 -08009449 data = UINT_MAX / 2;
Michael Chan4009a932005-09-05 17:52:54 -07009450
9451 for (i = 0; i < (data * 2); i++) {
9452 if ((i % 2) == 0)
9453 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9454 LED_CTRL_1000MBPS_ON |
9455 LED_CTRL_100MBPS_ON |
9456 LED_CTRL_10MBPS_ON |
9457 LED_CTRL_TRAFFIC_OVERRIDE |
9458 LED_CTRL_TRAFFIC_BLINK |
9459 LED_CTRL_TRAFFIC_LED);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009460
Michael Chan4009a932005-09-05 17:52:54 -07009461 else
9462 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9463 LED_CTRL_TRAFFIC_OVERRIDE);
9464
9465 if (msleep_interruptible(500))
9466 break;
9467 }
9468 tw32(MAC_LED_CTRL, tp->led_ctrl);
9469 return 0;
9470}
9471
Linus Torvalds1da177e2005-04-16 15:20:36 -07009472static void tg3_get_ethtool_stats (struct net_device *dev,
9473 struct ethtool_stats *estats, u64 *tmp_stats)
9474{
9475 struct tg3 *tp = netdev_priv(dev);
9476 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9477}
9478
Michael Chan566f86a2005-05-29 14:56:58 -07009479#define NVRAM_TEST_SIZE 0x100
Matt Carlsona5767de2007-11-12 21:10:58 -08009480#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
9481#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
9482#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
Michael Chanb16250e2006-09-27 16:10:14 -07009483#define NVRAM_SELFBOOT_HW_SIZE 0x20
9484#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
Michael Chan566f86a2005-05-29 14:56:58 -07009485
9486static int tg3_test_nvram(struct tg3 *tp)
9487{
Al Virob9fc7dc2007-12-17 22:59:57 -08009488 u32 csum, magic;
9489 __le32 *buf;
Andy Gospodarekab0049b2007-09-06 20:42:14 +01009490 int i, j, k, err = 0, size;
Michael Chan566f86a2005-05-29 14:56:58 -07009491
Michael Chan18201802006-03-20 22:29:15 -08009492 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Michael Chan1b277772006-03-20 22:27:48 -08009493 return -EIO;
9494
Michael Chan1b277772006-03-20 22:27:48 -08009495 if (magic == TG3_EEPROM_MAGIC)
9496 size = NVRAM_TEST_SIZE;
Michael Chanb16250e2006-09-27 16:10:14 -07009497 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
Matt Carlsona5767de2007-11-12 21:10:58 -08009498 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9499 TG3_EEPROM_SB_FORMAT_1) {
9500 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9501 case TG3_EEPROM_SB_REVISION_0:
9502 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9503 break;
9504 case TG3_EEPROM_SB_REVISION_2:
9505 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9506 break;
9507 case TG3_EEPROM_SB_REVISION_3:
9508 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9509 break;
9510 default:
9511 return 0;
9512 }
9513 } else
Michael Chan1b277772006-03-20 22:27:48 -08009514 return 0;
Michael Chanb16250e2006-09-27 16:10:14 -07009515 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9516 size = NVRAM_SELFBOOT_HW_SIZE;
9517 else
Michael Chan1b277772006-03-20 22:27:48 -08009518 return -EIO;
9519
9520 buf = kmalloc(size, GFP_KERNEL);
Michael Chan566f86a2005-05-29 14:56:58 -07009521 if (buf == NULL)
9522 return -ENOMEM;
9523
Michael Chan1b277772006-03-20 22:27:48 -08009524 err = -EIO;
9525 for (i = 0, j = 0; i < size; i += 4, j++) {
Al Virob9fc7dc2007-12-17 22:59:57 -08009526 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
Michael Chan566f86a2005-05-29 14:56:58 -07009527 break;
Michael Chan566f86a2005-05-29 14:56:58 -07009528 }
Michael Chan1b277772006-03-20 22:27:48 -08009529 if (i < size)
Michael Chan566f86a2005-05-29 14:56:58 -07009530 goto out;
9531
Michael Chan1b277772006-03-20 22:27:48 -08009532 /* Selfboot format */
Al Virob9fc7dc2007-12-17 22:59:57 -08009533 magic = swab32(le32_to_cpu(buf[0]));
9534 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -07009535 TG3_EEPROM_MAGIC_FW) {
Michael Chan1b277772006-03-20 22:27:48 -08009536 u8 *buf8 = (u8 *) buf, csum8 = 0;
9537
Al Virob9fc7dc2007-12-17 22:59:57 -08009538 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
Matt Carlsona5767de2007-11-12 21:10:58 -08009539 TG3_EEPROM_SB_REVISION_2) {
9540 /* For rev 2, the csum doesn't include the MBA. */
9541 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9542 csum8 += buf8[i];
9543 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9544 csum8 += buf8[i];
9545 } else {
9546 for (i = 0; i < size; i++)
9547 csum8 += buf8[i];
9548 }
Michael Chan1b277772006-03-20 22:27:48 -08009549
Adrian Bunkad96b482006-04-05 22:21:04 -07009550 if (csum8 == 0) {
9551 err = 0;
9552 goto out;
9553 }
9554
9555 err = -EIO;
9556 goto out;
Michael Chan1b277772006-03-20 22:27:48 -08009557 }
Michael Chan566f86a2005-05-29 14:56:58 -07009558
Al Virob9fc7dc2007-12-17 22:59:57 -08009559 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -07009560 TG3_EEPROM_MAGIC_HW) {
9561 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9562 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9563 u8 *buf8 = (u8 *) buf;
Michael Chanb16250e2006-09-27 16:10:14 -07009564
9565 /* Separate the parity bits and the data bytes. */
9566 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9567 if ((i == 0) || (i == 8)) {
9568 int l;
9569 u8 msk;
9570
9571 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9572 parity[k++] = buf8[i] & msk;
9573 i++;
9574 }
9575 else if (i == 16) {
9576 int l;
9577 u8 msk;
9578
9579 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9580 parity[k++] = buf8[i] & msk;
9581 i++;
9582
9583 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9584 parity[k++] = buf8[i] & msk;
9585 i++;
9586 }
9587 data[j++] = buf8[i];
9588 }
9589
9590 err = -EIO;
9591 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9592 u8 hw8 = hweight8(data[i]);
9593
9594 if ((hw8 & 0x1) && parity[i])
9595 goto out;
9596 else if (!(hw8 & 0x1) && !parity[i])
9597 goto out;
9598 }
9599 err = 0;
9600 goto out;
9601 }
9602
Michael Chan566f86a2005-05-29 14:56:58 -07009603 /* Bootstrap checksum at offset 0x10 */
9604 csum = calc_crc((unsigned char *) buf, 0x10);
Al Virob9fc7dc2007-12-17 22:59:57 -08009605 if(csum != le32_to_cpu(buf[0x10/4]))
Michael Chan566f86a2005-05-29 14:56:58 -07009606 goto out;
9607
9608 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9609 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
Al Virob9fc7dc2007-12-17 22:59:57 -08009610 if (csum != le32_to_cpu(buf[0xfc/4]))
Michael Chan566f86a2005-05-29 14:56:58 -07009611 goto out;
9612
9613 err = 0;
9614
9615out:
9616 kfree(buf);
9617 return err;
9618}
9619
Michael Chanca430072005-05-29 14:57:23 -07009620#define TG3_SERDES_TIMEOUT_SEC 2
9621#define TG3_COPPER_TIMEOUT_SEC 6
9622
9623static int tg3_test_link(struct tg3 *tp)
9624{
9625 int i, max;
9626
9627 if (!netif_running(tp->dev))
9628 return -ENODEV;
9629
Michael Chan4c987482005-09-05 17:52:38 -07009630 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Michael Chanca430072005-05-29 14:57:23 -07009631 max = TG3_SERDES_TIMEOUT_SEC;
9632 else
9633 max = TG3_COPPER_TIMEOUT_SEC;
9634
9635 for (i = 0; i < max; i++) {
9636 if (netif_carrier_ok(tp->dev))
9637 return 0;
9638
9639 if (msleep_interruptible(1000))
9640 break;
9641 }
9642
9643 return -EIO;
9644}
9645
Michael Chana71116d2005-05-29 14:58:11 -07009646/* Only test the commonly used registers */
David S. Miller30ca3e32006-03-20 23:02:36 -08009647static int tg3_test_registers(struct tg3 *tp)
Michael Chana71116d2005-05-29 14:58:11 -07009648{
Michael Chanb16250e2006-09-27 16:10:14 -07009649 int i, is_5705, is_5750;
Michael Chana71116d2005-05-29 14:58:11 -07009650 u32 offset, read_mask, write_mask, val, save_val, read_val;
9651 static struct {
9652 u16 offset;
9653 u16 flags;
9654#define TG3_FL_5705 0x1
9655#define TG3_FL_NOT_5705 0x2
9656#define TG3_FL_NOT_5788 0x4
Michael Chanb16250e2006-09-27 16:10:14 -07009657#define TG3_FL_NOT_5750 0x8
Michael Chana71116d2005-05-29 14:58:11 -07009658 u32 read_mask;
9659 u32 write_mask;
9660 } reg_tbl[] = {
9661 /* MAC Control Registers */
9662 { MAC_MODE, TG3_FL_NOT_5705,
9663 0x00000000, 0x00ef6f8c },
9664 { MAC_MODE, TG3_FL_5705,
9665 0x00000000, 0x01ef6b8c },
9666 { MAC_STATUS, TG3_FL_NOT_5705,
9667 0x03800107, 0x00000000 },
9668 { MAC_STATUS, TG3_FL_5705,
9669 0x03800100, 0x00000000 },
9670 { MAC_ADDR_0_HIGH, 0x0000,
9671 0x00000000, 0x0000ffff },
9672 { MAC_ADDR_0_LOW, 0x0000,
9673 0x00000000, 0xffffffff },
9674 { MAC_RX_MTU_SIZE, 0x0000,
9675 0x00000000, 0x0000ffff },
9676 { MAC_TX_MODE, 0x0000,
9677 0x00000000, 0x00000070 },
9678 { MAC_TX_LENGTHS, 0x0000,
9679 0x00000000, 0x00003fff },
9680 { MAC_RX_MODE, TG3_FL_NOT_5705,
9681 0x00000000, 0x000007fc },
9682 { MAC_RX_MODE, TG3_FL_5705,
9683 0x00000000, 0x000007dc },
9684 { MAC_HASH_REG_0, 0x0000,
9685 0x00000000, 0xffffffff },
9686 { MAC_HASH_REG_1, 0x0000,
9687 0x00000000, 0xffffffff },
9688 { MAC_HASH_REG_2, 0x0000,
9689 0x00000000, 0xffffffff },
9690 { MAC_HASH_REG_3, 0x0000,
9691 0x00000000, 0xffffffff },
9692
9693 /* Receive Data and Receive BD Initiator Control Registers. */
9694 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9695 0x00000000, 0xffffffff },
9696 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9697 0x00000000, 0xffffffff },
9698 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9699 0x00000000, 0x00000003 },
9700 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9701 0x00000000, 0xffffffff },
9702 { RCVDBDI_STD_BD+0, 0x0000,
9703 0x00000000, 0xffffffff },
9704 { RCVDBDI_STD_BD+4, 0x0000,
9705 0x00000000, 0xffffffff },
9706 { RCVDBDI_STD_BD+8, 0x0000,
9707 0x00000000, 0xffff0002 },
9708 { RCVDBDI_STD_BD+0xc, 0x0000,
9709 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009710
Michael Chana71116d2005-05-29 14:58:11 -07009711 /* Receive BD Initiator Control Registers. */
9712 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9713 0x00000000, 0xffffffff },
9714 { RCVBDI_STD_THRESH, TG3_FL_5705,
9715 0x00000000, 0x000003ff },
9716 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9717 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009718
Michael Chana71116d2005-05-29 14:58:11 -07009719 /* Host Coalescing Control Registers. */
9720 { HOSTCC_MODE, TG3_FL_NOT_5705,
9721 0x00000000, 0x00000004 },
9722 { HOSTCC_MODE, TG3_FL_5705,
9723 0x00000000, 0x000000f6 },
9724 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9725 0x00000000, 0xffffffff },
9726 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9727 0x00000000, 0x000003ff },
9728 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9729 0x00000000, 0xffffffff },
9730 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9731 0x00000000, 0x000003ff },
9732 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9733 0x00000000, 0xffffffff },
9734 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9735 0x00000000, 0x000000ff },
9736 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9737 0x00000000, 0xffffffff },
9738 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9739 0x00000000, 0x000000ff },
9740 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9741 0x00000000, 0xffffffff },
9742 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9743 0x00000000, 0xffffffff },
9744 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9745 0x00000000, 0xffffffff },
9746 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9747 0x00000000, 0x000000ff },
9748 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9749 0x00000000, 0xffffffff },
9750 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9751 0x00000000, 0x000000ff },
9752 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9753 0x00000000, 0xffffffff },
9754 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9755 0x00000000, 0xffffffff },
9756 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9757 0x00000000, 0xffffffff },
9758 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9759 0x00000000, 0xffffffff },
9760 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9761 0x00000000, 0xffffffff },
9762 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9763 0xffffffff, 0x00000000 },
9764 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9765 0xffffffff, 0x00000000 },
9766
9767 /* Buffer Manager Control Registers. */
Michael Chanb16250e2006-09-27 16:10:14 -07009768 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -07009769 0x00000000, 0x007fff80 },
Michael Chanb16250e2006-09-27 16:10:14 -07009770 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -07009771 0x00000000, 0x007fffff },
9772 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9773 0x00000000, 0x0000003f },
9774 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9775 0x00000000, 0x000001ff },
9776 { BUFMGR_MB_HIGH_WATER, 0x0000,
9777 0x00000000, 0x000001ff },
9778 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9779 0xffffffff, 0x00000000 },
9780 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9781 0xffffffff, 0x00000000 },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009782
Michael Chana71116d2005-05-29 14:58:11 -07009783 /* Mailbox Registers */
9784 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9785 0x00000000, 0x000001ff },
9786 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9787 0x00000000, 0x000001ff },
9788 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9789 0x00000000, 0x000007ff },
9790 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9791 0x00000000, 0x000001ff },
9792
9793 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9794 };
9795
Michael Chanb16250e2006-09-27 16:10:14 -07009796 is_5705 = is_5750 = 0;
9797 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
Michael Chana71116d2005-05-29 14:58:11 -07009798 is_5705 = 1;
Michael Chanb16250e2006-09-27 16:10:14 -07009799 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9800 is_5750 = 1;
9801 }
Michael Chana71116d2005-05-29 14:58:11 -07009802
9803 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9804 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9805 continue;
9806
9807 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9808 continue;
9809
9810 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9811 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9812 continue;
9813
Michael Chanb16250e2006-09-27 16:10:14 -07009814 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9815 continue;
9816
Michael Chana71116d2005-05-29 14:58:11 -07009817 offset = (u32) reg_tbl[i].offset;
9818 read_mask = reg_tbl[i].read_mask;
9819 write_mask = reg_tbl[i].write_mask;
9820
9821 /* Save the original register content */
9822 save_val = tr32(offset);
9823
9824 /* Determine the read-only value. */
9825 read_val = save_val & read_mask;
9826
9827 /* Write zero to the register, then make sure the read-only bits
9828 * are not changed and the read/write bits are all zeros.
9829 */
9830 tw32(offset, 0);
9831
9832 val = tr32(offset);
9833
9834 /* Test the read-only and read/write bits. */
9835 if (((val & read_mask) != read_val) || (val & write_mask))
9836 goto out;
9837
9838 /* Write ones to all the bits defined by RdMask and WrMask, then
9839 * make sure the read-only bits are not changed and the
9840 * read/write bits are all ones.
9841 */
9842 tw32(offset, read_mask | write_mask);
9843
9844 val = tr32(offset);
9845
9846 /* Test the read-only bits. */
9847 if ((val & read_mask) != read_val)
9848 goto out;
9849
9850 /* Test the read/write bits. */
9851 if ((val & write_mask) != write_mask)
9852 goto out;
9853
9854 tw32(offset, save_val);
9855 }
9856
9857 return 0;
9858
9859out:
Michael Chan9f88f292006-12-07 00:22:54 -08009860 if (netif_msg_hw(tp))
9861 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9862 offset);
Michael Chana71116d2005-05-29 14:58:11 -07009863 tw32(offset, save_val);
9864 return -EIO;
9865}
9866
Michael Chan7942e1d2005-05-29 14:58:36 -07009867static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9868{
Arjan van de Venf71e1302006-03-03 21:33:57 -05009869 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
Michael Chan7942e1d2005-05-29 14:58:36 -07009870 int i;
9871 u32 j;
9872
Alejandro Martinez Ruize9edda62007-10-15 03:37:43 +02009873 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
Michael Chan7942e1d2005-05-29 14:58:36 -07009874 for (j = 0; j < len; j += 4) {
9875 u32 val;
9876
9877 tg3_write_mem(tp, offset + j, test_pattern[i]);
9878 tg3_read_mem(tp, offset + j, &val);
9879 if (val != test_pattern[i])
9880 return -EIO;
9881 }
9882 }
9883 return 0;
9884}
9885
9886static int tg3_test_memory(struct tg3 *tp)
9887{
9888 static struct mem_entry {
9889 u32 offset;
9890 u32 len;
9891 } mem_tbl_570x[] = {
Michael Chan38690192005-12-19 16:27:28 -08009892 { 0x00000000, 0x00b50},
Michael Chan7942e1d2005-05-29 14:58:36 -07009893 { 0x00002000, 0x1c000},
9894 { 0xffffffff, 0x00000}
9895 }, mem_tbl_5705[] = {
9896 { 0x00000100, 0x0000c},
9897 { 0x00000200, 0x00008},
Michael Chan7942e1d2005-05-29 14:58:36 -07009898 { 0x00004000, 0x00800},
9899 { 0x00006000, 0x01000},
9900 { 0x00008000, 0x02000},
9901 { 0x00010000, 0x0e000},
9902 { 0xffffffff, 0x00000}
Michael Chan79f4d132006-03-20 22:28:57 -08009903 }, mem_tbl_5755[] = {
9904 { 0x00000200, 0x00008},
9905 { 0x00004000, 0x00800},
9906 { 0x00006000, 0x00800},
9907 { 0x00008000, 0x02000},
9908 { 0x00010000, 0x0c000},
9909 { 0xffffffff, 0x00000}
Michael Chanb16250e2006-09-27 16:10:14 -07009910 }, mem_tbl_5906[] = {
9911 { 0x00000200, 0x00008},
9912 { 0x00004000, 0x00400},
9913 { 0x00006000, 0x00400},
9914 { 0x00008000, 0x01000},
9915 { 0x00010000, 0x01000},
9916 { 0xffffffff, 0x00000}
Michael Chan7942e1d2005-05-29 14:58:36 -07009917 };
9918 struct mem_entry *mem_tbl;
9919 int err = 0;
9920 int i;
9921
Michael Chan79f4d132006-03-20 22:28:57 -08009922 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
Michael Chanaf36e6b2006-03-23 01:28:06 -08009923 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07009924 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07009925 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -07009926 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9927 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan79f4d132006-03-20 22:28:57 -08009928 mem_tbl = mem_tbl_5755;
Michael Chanb16250e2006-09-27 16:10:14 -07009929 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9930 mem_tbl = mem_tbl_5906;
Michael Chan79f4d132006-03-20 22:28:57 -08009931 else
9932 mem_tbl = mem_tbl_5705;
9933 } else
Michael Chan7942e1d2005-05-29 14:58:36 -07009934 mem_tbl = mem_tbl_570x;
9935
9936 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9937 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9938 mem_tbl[i].len)) != 0)
9939 break;
9940 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009941
Michael Chan7942e1d2005-05-29 14:58:36 -07009942 return err;
9943}
9944
Michael Chan9f40dea2005-09-05 17:53:06 -07009945#define TG3_MAC_LOOPBACK 0
9946#define TG3_PHY_LOOPBACK 1
9947
9948static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
Michael Chanc76949a2005-05-29 14:58:59 -07009949{
Michael Chan9f40dea2005-09-05 17:53:06 -07009950 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
Michael Chanc76949a2005-05-29 14:58:59 -07009951 u32 desc_idx;
9952 struct sk_buff *skb, *rx_skb;
9953 u8 *tx_data;
9954 dma_addr_t map;
9955 int num_pkts, tx_len, rx_len, i, err;
9956 struct tg3_rx_buffer_desc *desc;
9957
Michael Chan9f40dea2005-09-05 17:53:06 -07009958 if (loopback_mode == TG3_MAC_LOOPBACK) {
Michael Chanc94e3942005-09-27 12:12:42 -07009959 /* HW errata - mac loopback fails in some cases on 5780.
9960 * Normal traffic and PHY loopback are not affected by
9961 * errata.
9962 */
9963 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9964 return 0;
9965
Michael Chan9f40dea2005-09-05 17:53:06 -07009966 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07009967 MAC_MODE_PORT_INT_LPBACK;
9968 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9969 mac_mode |= MAC_MODE_LINK_POLARITY;
Michael Chan3f7045c2006-09-27 16:02:29 -07009970 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9971 mac_mode |= MAC_MODE_PORT_MODE_MII;
9972 else
9973 mac_mode |= MAC_MODE_PORT_MODE_GMII;
Michael Chan9f40dea2005-09-05 17:53:06 -07009974 tw32(MAC_MODE, mac_mode);
9975 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
Michael Chan3f7045c2006-09-27 16:02:29 -07009976 u32 val;
9977
Michael Chanb16250e2006-09-27 16:10:14 -07009978 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9979 u32 phytest;
9980
9981 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9982 u32 phy;
9983
9984 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9985 phytest | MII_TG3_EPHY_SHADOW_EN);
9986 if (!tg3_readphy(tp, 0x1b, &phy))
9987 tg3_writephy(tp, 0x1b, phy & ~0x20);
Michael Chanb16250e2006-09-27 16:10:14 -07009988 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9989 }
Michael Chan5d64ad32006-12-07 00:19:40 -08009990 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9991 } else
9992 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
Michael Chan3f7045c2006-09-27 16:02:29 -07009993
Matt Carlson9ef8ca92007-07-11 19:48:29 -07009994 tg3_phy_toggle_automdix(tp, 0);
9995
Michael Chan3f7045c2006-09-27 16:02:29 -07009996 tg3_writephy(tp, MII_BMCR, val);
Michael Chanc94e3942005-09-27 12:12:42 -07009997 udelay(40);
Michael Chan5d64ad32006-12-07 00:19:40 -08009998
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07009999 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
Michael Chan5d64ad32006-12-07 00:19:40 -080010000 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chanb16250e2006-09-27 16:10:14 -070010001 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
Michael Chan5d64ad32006-12-07 00:19:40 -080010002 mac_mode |= MAC_MODE_PORT_MODE_MII;
10003 } else
10004 mac_mode |= MAC_MODE_PORT_MODE_GMII;
Michael Chanb16250e2006-09-27 16:10:14 -070010005
Michael Chanc94e3942005-09-27 12:12:42 -070010006 /* reset to prevent losing 1st rx packet intermittently */
10007 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10008 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10009 udelay(10);
10010 tw32_f(MAC_RX_MODE, tp->rx_mode);
10011 }
Matt Carlsone8f3f6c2007-07-11 19:47:55 -070010012 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10013 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10014 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10015 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10016 mac_mode |= MAC_MODE_LINK_POLARITY;
Michael Chanff18ff02006-03-27 23:17:27 -080010017 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10018 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10019 }
Michael Chan9f40dea2005-09-05 17:53:06 -070010020 tw32(MAC_MODE, mac_mode);
Michael Chan9f40dea2005-09-05 17:53:06 -070010021 }
10022 else
10023 return -EINVAL;
Michael Chanc76949a2005-05-29 14:58:59 -070010024
10025 err = -EIO;
10026
Michael Chanc76949a2005-05-29 14:58:59 -070010027 tx_len = 1514;
David S. Millera20e9c62006-07-31 22:38:16 -070010028 skb = netdev_alloc_skb(tp->dev, tx_len);
Jesper Juhla50bb7b2006-05-09 23:14:35 -070010029 if (!skb)
10030 return -ENOMEM;
10031
Michael Chanc76949a2005-05-29 14:58:59 -070010032 tx_data = skb_put(skb, tx_len);
10033 memcpy(tx_data, tp->dev->dev_addr, 6);
10034 memset(tx_data + 6, 0x0, 8);
10035
10036 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10037
10038 for (i = 14; i < tx_len; i++)
10039 tx_data[i] = (u8) (i & 0xff);
10040
10041 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10042
10043 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10044 HOSTCC_MODE_NOW);
10045
10046 udelay(10);
10047
10048 rx_start_idx = tp->hw_status->idx[0].rx_producer;
10049
Michael Chanc76949a2005-05-29 14:58:59 -070010050 num_pkts = 0;
10051
Michael Chan9f40dea2005-09-05 17:53:06 -070010052 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
Michael Chanc76949a2005-05-29 14:58:59 -070010053
Michael Chan9f40dea2005-09-05 17:53:06 -070010054 tp->tx_prod++;
Michael Chanc76949a2005-05-29 14:58:59 -070010055 num_pkts++;
10056
Michael Chan9f40dea2005-09-05 17:53:06 -070010057 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10058 tp->tx_prod);
Michael Chan09ee9292005-08-09 20:17:00 -070010059 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
Michael Chanc76949a2005-05-29 14:58:59 -070010060
10061 udelay(10);
10062
Michael Chan3f7045c2006-09-27 16:02:29 -070010063 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
10064 for (i = 0; i < 25; i++) {
Michael Chanc76949a2005-05-29 14:58:59 -070010065 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10066 HOSTCC_MODE_NOW);
10067
10068 udelay(10);
10069
10070 tx_idx = tp->hw_status->idx[0].tx_consumer;
10071 rx_idx = tp->hw_status->idx[0].rx_producer;
Michael Chan9f40dea2005-09-05 17:53:06 -070010072 if ((tx_idx == tp->tx_prod) &&
Michael Chanc76949a2005-05-29 14:58:59 -070010073 (rx_idx == (rx_start_idx + num_pkts)))
10074 break;
10075 }
10076
10077 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10078 dev_kfree_skb(skb);
10079
Michael Chan9f40dea2005-09-05 17:53:06 -070010080 if (tx_idx != tp->tx_prod)
Michael Chanc76949a2005-05-29 14:58:59 -070010081 goto out;
10082
10083 if (rx_idx != rx_start_idx + num_pkts)
10084 goto out;
10085
10086 desc = &tp->rx_rcb[rx_start_idx];
10087 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10088 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10089 if (opaque_key != RXD_OPAQUE_RING_STD)
10090 goto out;
10091
10092 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10093 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10094 goto out;
10095
10096 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10097 if (rx_len != tx_len)
10098 goto out;
10099
10100 rx_skb = tp->rx_std_buffers[desc_idx].skb;
10101
10102 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10103 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10104
10105 for (i = 14; i < tx_len; i++) {
10106 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10107 goto out;
10108 }
10109 err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010110
Michael Chanc76949a2005-05-29 14:58:59 -070010111 /* tg3_free_rings will unmap and free the rx_skb */
10112out:
10113 return err;
10114}
10115
Michael Chan9f40dea2005-09-05 17:53:06 -070010116#define TG3_MAC_LOOPBACK_FAILED 1
10117#define TG3_PHY_LOOPBACK_FAILED 2
10118#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10119 TG3_PHY_LOOPBACK_FAILED)
10120
10121static int tg3_test_loopback(struct tg3 *tp)
10122{
10123 int err = 0;
Matt Carlson9936bcf2007-10-10 18:03:07 -070010124 u32 cpmuctrl = 0;
Michael Chan9f40dea2005-09-05 17:53:06 -070010125
10126 if (!netif_running(tp->dev))
10127 return TG3_LOOPBACK_FAILED;
10128
Michael Chanb9ec6c12006-07-25 16:37:27 -070010129 err = tg3_reset_hw(tp, 1);
10130 if (err)
10131 return TG3_LOOPBACK_FAILED;
Michael Chan9f40dea2005-09-05 17:53:06 -070010132
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010133 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010134 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10135 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Matt Carlson9936bcf2007-10-10 18:03:07 -070010136 int i;
10137 u32 status;
10138
10139 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10140
10141 /* Wait for up to 40 microseconds to acquire lock. */
10142 for (i = 0; i < 4; i++) {
10143 status = tr32(TG3_CPMU_MUTEX_GNT);
10144 if (status == CPMU_MUTEX_GNT_DRIVER)
10145 break;
10146 udelay(10);
10147 }
10148
10149 if (status != CPMU_MUTEX_GNT_DRIVER)
10150 return TG3_LOOPBACK_FAILED;
10151
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010152 /* Turn off link-based power management. */
Matt Carlsone8750932007-11-12 21:11:51 -080010153 cpmuctrl = tr32(TG3_CPMU_CTRL);
Matt Carlson109115e2008-05-02 16:48:59 -070010154 tw32(TG3_CPMU_CTRL,
10155 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10156 CPMU_CTRL_LINK_AWARE_MODE));
Matt Carlson9936bcf2007-10-10 18:03:07 -070010157 }
10158
Michael Chan9f40dea2005-09-05 17:53:06 -070010159 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10160 err |= TG3_MAC_LOOPBACK_FAILED;
Matt Carlson9936bcf2007-10-10 18:03:07 -070010161
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010162 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010163 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10164 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Matt Carlson9936bcf2007-10-10 18:03:07 -070010165 tw32(TG3_CPMU_CTRL, cpmuctrl);
10166
10167 /* Release the mutex */
10168 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10169 }
10170
Matt Carlsondd477002008-05-25 23:45:58 -070010171 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10172 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
Michael Chan9f40dea2005-09-05 17:53:06 -070010173 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10174 err |= TG3_PHY_LOOPBACK_FAILED;
10175 }
10176
10177 return err;
10178}
10179
Michael Chan4cafd3f2005-05-29 14:56:34 -070010180static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10181 u64 *data)
10182{
Michael Chan566f86a2005-05-29 14:56:58 -070010183 struct tg3 *tp = netdev_priv(dev);
10184
Michael Chanbc1c7562006-03-20 17:48:03 -080010185 if (tp->link_config.phy_is_low_power)
10186 tg3_set_power_state(tp, PCI_D0);
10187
Michael Chan566f86a2005-05-29 14:56:58 -070010188 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10189
10190 if (tg3_test_nvram(tp) != 0) {
10191 etest->flags |= ETH_TEST_FL_FAILED;
10192 data[0] = 1;
10193 }
Michael Chanca430072005-05-29 14:57:23 -070010194 if (tg3_test_link(tp) != 0) {
10195 etest->flags |= ETH_TEST_FL_FAILED;
10196 data[1] = 1;
10197 }
Michael Chana71116d2005-05-29 14:58:11 -070010198 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010199 int err, err2 = 0, irq_sync = 0;
Michael Chana71116d2005-05-29 14:58:11 -070010200
Michael Chanbbe832c2005-06-24 20:20:04 -070010201 if (netif_running(dev)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010202 tg3_phy_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -070010203 tg3_netif_stop(tp);
10204 irq_sync = 1;
10205 }
10206
10207 tg3_full_lock(tp, irq_sync);
Michael Chana71116d2005-05-29 14:58:11 -070010208
10209 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
Michael Chanec41c7d2006-01-17 02:40:55 -080010210 err = tg3_nvram_lock(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010211 tg3_halt_cpu(tp, RX_CPU_BASE);
10212 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10213 tg3_halt_cpu(tp, TX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -080010214 if (!err)
10215 tg3_nvram_unlock(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010216
Michael Chand9ab5ad2006-03-20 22:27:35 -080010217 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10218 tg3_phy_reset(tp);
10219
Michael Chana71116d2005-05-29 14:58:11 -070010220 if (tg3_test_registers(tp) != 0) {
10221 etest->flags |= ETH_TEST_FL_FAILED;
10222 data[2] = 1;
10223 }
Michael Chan7942e1d2005-05-29 14:58:36 -070010224 if (tg3_test_memory(tp) != 0) {
10225 etest->flags |= ETH_TEST_FL_FAILED;
10226 data[3] = 1;
10227 }
Michael Chan9f40dea2005-09-05 17:53:06 -070010228 if ((data[4] = tg3_test_loopback(tp)) != 0)
Michael Chanc76949a2005-05-29 14:58:59 -070010229 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chana71116d2005-05-29 14:58:11 -070010230
David S. Millerf47c11e2005-06-24 20:18:35 -070010231 tg3_full_unlock(tp);
10232
Michael Chand4bc3922005-05-29 14:59:20 -070010233 if (tg3_test_interrupt(tp) != 0) {
10234 etest->flags |= ETH_TEST_FL_FAILED;
10235 data[5] = 1;
10236 }
David S. Millerf47c11e2005-06-24 20:18:35 -070010237
10238 tg3_full_lock(tp, 0);
Michael Chand4bc3922005-05-29 14:59:20 -070010239
Michael Chana71116d2005-05-29 14:58:11 -070010240 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10241 if (netif_running(dev)) {
10242 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010243 err2 = tg3_restart_hw(tp, 1);
10244 if (!err2)
Michael Chanb9ec6c12006-07-25 16:37:27 -070010245 tg3_netif_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010246 }
David S. Millerf47c11e2005-06-24 20:18:35 -070010247
10248 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010249
10250 if (irq_sync && !err2)
10251 tg3_phy_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010252 }
Michael Chanbc1c7562006-03-20 17:48:03 -080010253 if (tp->link_config.phy_is_low_power)
10254 tg3_set_power_state(tp, PCI_D3hot);
10255
Michael Chan4cafd3f2005-05-29 14:56:34 -070010256}
10257
Linus Torvalds1da177e2005-04-16 15:20:36 -070010258static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10259{
10260 struct mii_ioctl_data *data = if_mii(ifr);
10261 struct tg3 *tp = netdev_priv(dev);
10262 int err;
10263
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010264 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10265 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10266 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -070010267 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010268 }
10269
Linus Torvalds1da177e2005-04-16 15:20:36 -070010270 switch(cmd) {
10271 case SIOCGMIIPHY:
10272 data->phy_id = PHY_ADDR;
10273
10274 /* fallthru */
10275 case SIOCGMIIREG: {
10276 u32 mii_regval;
10277
10278 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10279 break; /* We have no PHY */
10280
Michael Chanbc1c7562006-03-20 17:48:03 -080010281 if (tp->link_config.phy_is_low_power)
10282 return -EAGAIN;
10283
David S. Millerf47c11e2005-06-24 20:18:35 -070010284 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010285 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
David S. Millerf47c11e2005-06-24 20:18:35 -070010286 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010287
10288 data->val_out = mii_regval;
10289
10290 return err;
10291 }
10292
10293 case SIOCSMIIREG:
10294 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10295 break; /* We have no PHY */
10296
10297 if (!capable(CAP_NET_ADMIN))
10298 return -EPERM;
10299
Michael Chanbc1c7562006-03-20 17:48:03 -080010300 if (tp->link_config.phy_is_low_power)
10301 return -EAGAIN;
10302
David S. Millerf47c11e2005-06-24 20:18:35 -070010303 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010304 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
David S. Millerf47c11e2005-06-24 20:18:35 -070010305 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010306
10307 return err;
10308
10309 default:
10310 /* do nothing */
10311 break;
10312 }
10313 return -EOPNOTSUPP;
10314}
10315
10316#if TG3_VLAN_TAG_USED
10317static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10318{
10319 struct tg3 *tp = netdev_priv(dev);
10320
Michael Chan29315e82006-06-29 20:12:30 -070010321 if (netif_running(dev))
10322 tg3_netif_stop(tp);
10323
David S. Millerf47c11e2005-06-24 20:18:35 -070010324 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010325
10326 tp->vlgrp = grp;
10327
10328 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10329 __tg3_set_rx_mode(dev);
10330
Michael Chan29315e82006-06-29 20:12:30 -070010331 if (netif_running(dev))
10332 tg3_netif_start(tp);
Michael Chan46966542007-07-11 19:47:19 -070010333
10334 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010335}
Linus Torvalds1da177e2005-04-16 15:20:36 -070010336#endif
10337
David S. Miller15f98502005-05-18 22:49:26 -070010338static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10339{
10340 struct tg3 *tp = netdev_priv(dev);
10341
10342 memcpy(ec, &tp->coal, sizeof(*ec));
10343 return 0;
10344}
10345
Michael Chand244c892005-07-05 14:42:33 -070010346static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10347{
10348 struct tg3 *tp = netdev_priv(dev);
10349 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10350 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10351
10352 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10353 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10354 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10355 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10356 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10357 }
10358
10359 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10360 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10361 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10362 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10363 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10364 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10365 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10366 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10367 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10368 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10369 return -EINVAL;
10370
10371 /* No rx interrupts will be generated if both are zero */
10372 if ((ec->rx_coalesce_usecs == 0) &&
10373 (ec->rx_max_coalesced_frames == 0))
10374 return -EINVAL;
10375
10376 /* No tx interrupts will be generated if both are zero */
10377 if ((ec->tx_coalesce_usecs == 0) &&
10378 (ec->tx_max_coalesced_frames == 0))
10379 return -EINVAL;
10380
10381 /* Only copy relevant parameters, ignore all others. */
10382 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10383 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10384 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10385 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10386 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10387 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10388 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10389 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10390 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10391
10392 if (netif_running(dev)) {
10393 tg3_full_lock(tp, 0);
10394 __tg3_set_coalesce(tp, &tp->coal);
10395 tg3_full_unlock(tp);
10396 }
10397 return 0;
10398}
10399
Jeff Garzik7282d492006-09-13 14:30:00 -040010400static const struct ethtool_ops tg3_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010401 .get_settings = tg3_get_settings,
10402 .set_settings = tg3_set_settings,
10403 .get_drvinfo = tg3_get_drvinfo,
10404 .get_regs_len = tg3_get_regs_len,
10405 .get_regs = tg3_get_regs,
10406 .get_wol = tg3_get_wol,
10407 .set_wol = tg3_set_wol,
10408 .get_msglevel = tg3_get_msglevel,
10409 .set_msglevel = tg3_set_msglevel,
10410 .nway_reset = tg3_nway_reset,
10411 .get_link = ethtool_op_get_link,
10412 .get_eeprom_len = tg3_get_eeprom_len,
10413 .get_eeprom = tg3_get_eeprom,
10414 .set_eeprom = tg3_set_eeprom,
10415 .get_ringparam = tg3_get_ringparam,
10416 .set_ringparam = tg3_set_ringparam,
10417 .get_pauseparam = tg3_get_pauseparam,
10418 .set_pauseparam = tg3_set_pauseparam,
10419 .get_rx_csum = tg3_get_rx_csum,
10420 .set_rx_csum = tg3_set_rx_csum,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010421 .set_tx_csum = tg3_set_tx_csum,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010422 .set_sg = ethtool_op_set_sg,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010423 .set_tso = tg3_set_tso,
Michael Chan4cafd3f2005-05-29 14:56:34 -070010424 .self_test = tg3_self_test,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010425 .get_strings = tg3_get_strings,
Michael Chan4009a932005-09-05 17:52:54 -070010426 .phys_id = tg3_phys_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010427 .get_ethtool_stats = tg3_get_ethtool_stats,
David S. Miller15f98502005-05-18 22:49:26 -070010428 .get_coalesce = tg3_get_coalesce,
Michael Chand244c892005-07-05 14:42:33 -070010429 .set_coalesce = tg3_set_coalesce,
Jeff Garzikb9f2c042007-10-03 18:07:32 -070010430 .get_sset_count = tg3_get_sset_count,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010431};
10432
10433static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10434{
Michael Chan1b277772006-03-20 22:27:48 -080010435 u32 cursize, val, magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010436
10437 tp->nvram_size = EEPROM_CHIP_SIZE;
10438
Michael Chan18201802006-03-20 22:29:15 -080010439 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010440 return;
10441
Michael Chanb16250e2006-09-27 16:10:14 -070010442 if ((magic != TG3_EEPROM_MAGIC) &&
10443 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10444 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
Linus Torvalds1da177e2005-04-16 15:20:36 -070010445 return;
10446
10447 /*
10448 * Size the chip by reading offsets at increasing powers of two.
10449 * When we encounter our validation signature, we know the addressing
10450 * has wrapped around, and thus have our chip size.
10451 */
Michael Chan1b277772006-03-20 22:27:48 -080010452 cursize = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010453
10454 while (cursize < tp->nvram_size) {
Michael Chan18201802006-03-20 22:29:15 -080010455 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010456 return;
10457
Michael Chan18201802006-03-20 22:29:15 -080010458 if (val == magic)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010459 break;
10460
10461 cursize <<= 1;
10462 }
10463
10464 tp->nvram_size = cursize;
10465}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010466
Linus Torvalds1da177e2005-04-16 15:20:36 -070010467static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10468{
10469 u32 val;
10470
Michael Chan18201802006-03-20 22:29:15 -080010471 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
Michael Chan1b277772006-03-20 22:27:48 -080010472 return;
10473
10474 /* Selfboot format */
Michael Chan18201802006-03-20 22:29:15 -080010475 if (val != TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -080010476 tg3_get_eeprom_size(tp);
10477 return;
10478 }
10479
Linus Torvalds1da177e2005-04-16 15:20:36 -070010480 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10481 if (val != 0) {
10482 tp->nvram_size = (val >> 16) * 1024;
10483 return;
10484 }
10485 }
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010486 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010487}
10488
10489static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10490{
10491 u32 nvcfg1;
10492
10493 nvcfg1 = tr32(NVRAM_CFG1);
10494 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10495 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10496 }
10497 else {
10498 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10499 tw32(NVRAM_CFG1, nvcfg1);
10500 }
10501
Michael Chan4c987482005-09-05 17:52:38 -070010502 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
Michael Chana4e2b342005-10-26 15:46:52 -070010503 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010504 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10505 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10506 tp->nvram_jedecnum = JEDEC_ATMEL;
10507 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10508 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10509 break;
10510 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10511 tp->nvram_jedecnum = JEDEC_ATMEL;
10512 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10513 break;
10514 case FLASH_VENDOR_ATMEL_EEPROM:
10515 tp->nvram_jedecnum = JEDEC_ATMEL;
10516 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10517 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10518 break;
10519 case FLASH_VENDOR_ST:
10520 tp->nvram_jedecnum = JEDEC_ST;
10521 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10522 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10523 break;
10524 case FLASH_VENDOR_SAIFUN:
10525 tp->nvram_jedecnum = JEDEC_SAIFUN;
10526 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10527 break;
10528 case FLASH_VENDOR_SST_SMALL:
10529 case FLASH_VENDOR_SST_LARGE:
10530 tp->nvram_jedecnum = JEDEC_SST;
10531 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10532 break;
10533 }
10534 }
10535 else {
10536 tp->nvram_jedecnum = JEDEC_ATMEL;
10537 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10538 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10539 }
10540}
10541
Michael Chan361b4ac2005-04-21 17:11:21 -070010542static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10543{
10544 u32 nvcfg1;
10545
10546 nvcfg1 = tr32(NVRAM_CFG1);
10547
Michael Chane6af3012005-04-21 17:12:05 -070010548 /* NVRAM protection for TPM */
10549 if (nvcfg1 & (1 << 27))
10550 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10551
Michael Chan361b4ac2005-04-21 17:11:21 -070010552 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10553 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10554 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10555 tp->nvram_jedecnum = JEDEC_ATMEL;
10556 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10557 break;
10558 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10559 tp->nvram_jedecnum = JEDEC_ATMEL;
10560 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10561 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10562 break;
10563 case FLASH_5752VENDOR_ST_M45PE10:
10564 case FLASH_5752VENDOR_ST_M45PE20:
10565 case FLASH_5752VENDOR_ST_M45PE40:
10566 tp->nvram_jedecnum = JEDEC_ST;
10567 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10568 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10569 break;
10570 }
10571
10572 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10573 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10574 case FLASH_5752PAGE_SIZE_256:
10575 tp->nvram_pagesize = 256;
10576 break;
10577 case FLASH_5752PAGE_SIZE_512:
10578 tp->nvram_pagesize = 512;
10579 break;
10580 case FLASH_5752PAGE_SIZE_1K:
10581 tp->nvram_pagesize = 1024;
10582 break;
10583 case FLASH_5752PAGE_SIZE_2K:
10584 tp->nvram_pagesize = 2048;
10585 break;
10586 case FLASH_5752PAGE_SIZE_4K:
10587 tp->nvram_pagesize = 4096;
10588 break;
10589 case FLASH_5752PAGE_SIZE_264:
10590 tp->nvram_pagesize = 264;
10591 break;
10592 }
10593 }
10594 else {
10595 /* For eeprom, set pagesize to maximum eeprom size */
10596 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10597
10598 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10599 tw32(NVRAM_CFG1, nvcfg1);
10600 }
10601}
10602
Michael Chand3c7b882006-03-23 01:28:25 -080010603static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10604{
Matt Carlson989a9d22007-05-05 11:51:05 -070010605 u32 nvcfg1, protect = 0;
Michael Chand3c7b882006-03-23 01:28:25 -080010606
10607 nvcfg1 = tr32(NVRAM_CFG1);
10608
10609 /* NVRAM protection for TPM */
Matt Carlson989a9d22007-05-05 11:51:05 -070010610 if (nvcfg1 & (1 << 27)) {
Michael Chand3c7b882006-03-23 01:28:25 -080010611 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
Matt Carlson989a9d22007-05-05 11:51:05 -070010612 protect = 1;
10613 }
Michael Chand3c7b882006-03-23 01:28:25 -080010614
Matt Carlson989a9d22007-05-05 11:51:05 -070010615 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10616 switch (nvcfg1) {
Michael Chand3c7b882006-03-23 01:28:25 -080010617 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10618 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10619 case FLASH_5755VENDOR_ATMEL_FLASH_3:
Matt Carlson70b65a22007-07-11 19:48:50 -070010620 case FLASH_5755VENDOR_ATMEL_FLASH_5:
Michael Chand3c7b882006-03-23 01:28:25 -080010621 tp->nvram_jedecnum = JEDEC_ATMEL;
10622 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10623 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10624 tp->nvram_pagesize = 264;
Matt Carlson70b65a22007-07-11 19:48:50 -070010625 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10626 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010627 tp->nvram_size = (protect ? 0x3e200 :
10628 TG3_NVRAM_SIZE_512KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010629 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010630 tp->nvram_size = (protect ? 0x1f200 :
10631 TG3_NVRAM_SIZE_256KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010632 else
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010633 tp->nvram_size = (protect ? 0x1f200 :
10634 TG3_NVRAM_SIZE_128KB);
Michael Chand3c7b882006-03-23 01:28:25 -080010635 break;
10636 case FLASH_5752VENDOR_ST_M45PE10:
10637 case FLASH_5752VENDOR_ST_M45PE20:
10638 case FLASH_5752VENDOR_ST_M45PE40:
10639 tp->nvram_jedecnum = JEDEC_ST;
10640 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10641 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10642 tp->nvram_pagesize = 256;
Matt Carlson989a9d22007-05-05 11:51:05 -070010643 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010644 tp->nvram_size = (protect ?
10645 TG3_NVRAM_SIZE_64KB :
10646 TG3_NVRAM_SIZE_128KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010647 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010648 tp->nvram_size = (protect ?
10649 TG3_NVRAM_SIZE_64KB :
10650 TG3_NVRAM_SIZE_256KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010651 else
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010652 tp->nvram_size = (protect ?
10653 TG3_NVRAM_SIZE_128KB :
10654 TG3_NVRAM_SIZE_512KB);
Michael Chand3c7b882006-03-23 01:28:25 -080010655 break;
10656 }
10657}
10658
Michael Chan1b277772006-03-20 22:27:48 -080010659static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10660{
10661 u32 nvcfg1;
10662
10663 nvcfg1 = tr32(NVRAM_CFG1);
10664
10665 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10666 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10667 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10668 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10669 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10670 tp->nvram_jedecnum = JEDEC_ATMEL;
10671 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10672 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10673
10674 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10675 tw32(NVRAM_CFG1, nvcfg1);
10676 break;
10677 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10678 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10679 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10680 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10681 tp->nvram_jedecnum = JEDEC_ATMEL;
10682 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10683 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10684 tp->nvram_pagesize = 264;
10685 break;
10686 case FLASH_5752VENDOR_ST_M45PE10:
10687 case FLASH_5752VENDOR_ST_M45PE20:
10688 case FLASH_5752VENDOR_ST_M45PE40:
10689 tp->nvram_jedecnum = JEDEC_ST;
10690 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10691 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10692 tp->nvram_pagesize = 256;
10693 break;
10694 }
10695}
10696
Matt Carlson6b91fa02007-10-10 18:01:09 -070010697static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10698{
10699 u32 nvcfg1, protect = 0;
10700
10701 nvcfg1 = tr32(NVRAM_CFG1);
10702
10703 /* NVRAM protection for TPM */
10704 if (nvcfg1 & (1 << 27)) {
10705 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10706 protect = 1;
10707 }
10708
10709 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10710 switch (nvcfg1) {
10711 case FLASH_5761VENDOR_ATMEL_ADB021D:
10712 case FLASH_5761VENDOR_ATMEL_ADB041D:
10713 case FLASH_5761VENDOR_ATMEL_ADB081D:
10714 case FLASH_5761VENDOR_ATMEL_ADB161D:
10715 case FLASH_5761VENDOR_ATMEL_MDB021D:
10716 case FLASH_5761VENDOR_ATMEL_MDB041D:
10717 case FLASH_5761VENDOR_ATMEL_MDB081D:
10718 case FLASH_5761VENDOR_ATMEL_MDB161D:
10719 tp->nvram_jedecnum = JEDEC_ATMEL;
10720 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10721 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10722 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10723 tp->nvram_pagesize = 256;
10724 break;
10725 case FLASH_5761VENDOR_ST_A_M45PE20:
10726 case FLASH_5761VENDOR_ST_A_M45PE40:
10727 case FLASH_5761VENDOR_ST_A_M45PE80:
10728 case FLASH_5761VENDOR_ST_A_M45PE16:
10729 case FLASH_5761VENDOR_ST_M_M45PE20:
10730 case FLASH_5761VENDOR_ST_M_M45PE40:
10731 case FLASH_5761VENDOR_ST_M_M45PE80:
10732 case FLASH_5761VENDOR_ST_M_M45PE16:
10733 tp->nvram_jedecnum = JEDEC_ST;
10734 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10735 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10736 tp->nvram_pagesize = 256;
10737 break;
10738 }
10739
10740 if (protect) {
10741 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10742 } else {
10743 switch (nvcfg1) {
10744 case FLASH_5761VENDOR_ATMEL_ADB161D:
10745 case FLASH_5761VENDOR_ATMEL_MDB161D:
10746 case FLASH_5761VENDOR_ST_A_M45PE16:
10747 case FLASH_5761VENDOR_ST_M_M45PE16:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010748 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010749 break;
10750 case FLASH_5761VENDOR_ATMEL_ADB081D:
10751 case FLASH_5761VENDOR_ATMEL_MDB081D:
10752 case FLASH_5761VENDOR_ST_A_M45PE80:
10753 case FLASH_5761VENDOR_ST_M_M45PE80:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010754 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010755 break;
10756 case FLASH_5761VENDOR_ATMEL_ADB041D:
10757 case FLASH_5761VENDOR_ATMEL_MDB041D:
10758 case FLASH_5761VENDOR_ST_A_M45PE40:
10759 case FLASH_5761VENDOR_ST_M_M45PE40:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010760 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010761 break;
10762 case FLASH_5761VENDOR_ATMEL_ADB021D:
10763 case FLASH_5761VENDOR_ATMEL_MDB021D:
10764 case FLASH_5761VENDOR_ST_A_M45PE20:
10765 case FLASH_5761VENDOR_ST_M_M45PE20:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010766 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010767 break;
10768 }
10769 }
10770}
10771
Michael Chanb5d37722006-09-27 16:06:21 -070010772static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10773{
10774 tp->nvram_jedecnum = JEDEC_ATMEL;
10775 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10776 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10777}
10778
Linus Torvalds1da177e2005-04-16 15:20:36 -070010779/* Chips other than 5700/5701 use the NVRAM for fetching info. */
10780static void __devinit tg3_nvram_init(struct tg3 *tp)
10781{
Linus Torvalds1da177e2005-04-16 15:20:36 -070010782 tw32_f(GRC_EEPROM_ADDR,
10783 (EEPROM_ADDR_FSM_RESET |
10784 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10785 EEPROM_ADDR_CLKPERD_SHIFT)));
10786
Michael Chan9d57f012006-12-07 00:23:25 -080010787 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010788
10789 /* Enable seeprom accesses. */
10790 tw32_f(GRC_LOCAL_CTRL,
10791 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10792 udelay(100);
10793
10794 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10795 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10796 tp->tg3_flags |= TG3_FLAG_NVRAM;
10797
Michael Chanec41c7d2006-01-17 02:40:55 -080010798 if (tg3_nvram_lock(tp)) {
10799 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10800 "tg3_nvram_init failed.\n", tp->dev->name);
10801 return;
10802 }
Michael Chane6af3012005-04-21 17:12:05 -070010803 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010804
Matt Carlson989a9d22007-05-05 11:51:05 -070010805 tp->nvram_size = 0;
10806
Michael Chan361b4ac2005-04-21 17:11:21 -070010807 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10808 tg3_get_5752_nvram_info(tp);
Michael Chand3c7b882006-03-23 01:28:25 -080010809 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10810 tg3_get_5755_nvram_info(tp);
Matt Carlsond30cdd22007-10-07 23:28:35 -070010811 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010812 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10813 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan1b277772006-03-20 22:27:48 -080010814 tg3_get_5787_nvram_info(tp);
Matt Carlson6b91fa02007-10-10 18:01:09 -070010815 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10816 tg3_get_5761_nvram_info(tp);
Michael Chanb5d37722006-09-27 16:06:21 -070010817 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10818 tg3_get_5906_nvram_info(tp);
Michael Chan361b4ac2005-04-21 17:11:21 -070010819 else
10820 tg3_get_nvram_info(tp);
10821
Matt Carlson989a9d22007-05-05 11:51:05 -070010822 if (tp->nvram_size == 0)
10823 tg3_get_nvram_size(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010824
Michael Chane6af3012005-04-21 17:12:05 -070010825 tg3_disable_nvram_access(tp);
Michael Chan381291b2005-12-13 21:08:21 -080010826 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010827
10828 } else {
10829 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10830
10831 tg3_get_eeprom_size(tp);
10832 }
10833}
10834
10835static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10836 u32 offset, u32 *val)
10837{
10838 u32 tmp;
10839 int i;
10840
10841 if (offset > EEPROM_ADDR_ADDR_MASK ||
10842 (offset % 4) != 0)
10843 return -EINVAL;
10844
10845 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10846 EEPROM_ADDR_DEVID_MASK |
10847 EEPROM_ADDR_READ);
10848 tw32(GRC_EEPROM_ADDR,
10849 tmp |
10850 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10851 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10852 EEPROM_ADDR_ADDR_MASK) |
10853 EEPROM_ADDR_READ | EEPROM_ADDR_START);
10854
Michael Chan9d57f012006-12-07 00:23:25 -080010855 for (i = 0; i < 1000; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010856 tmp = tr32(GRC_EEPROM_ADDR);
10857
10858 if (tmp & EEPROM_ADDR_COMPLETE)
10859 break;
Michael Chan9d57f012006-12-07 00:23:25 -080010860 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010861 }
10862 if (!(tmp & EEPROM_ADDR_COMPLETE))
10863 return -EBUSY;
10864
10865 *val = tr32(GRC_EEPROM_DATA);
10866 return 0;
10867}
10868
10869#define NVRAM_CMD_TIMEOUT 10000
10870
10871static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10872{
10873 int i;
10874
10875 tw32(NVRAM_CMD, nvram_cmd);
10876 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10877 udelay(10);
10878 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10879 udelay(10);
10880 break;
10881 }
10882 }
10883 if (i == NVRAM_CMD_TIMEOUT) {
10884 return -EBUSY;
10885 }
10886 return 0;
10887}
10888
Michael Chan18201802006-03-20 22:29:15 -080010889static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10890{
10891 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10892 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10893 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
Matt Carlson6b91fa02007-10-10 18:01:09 -070010894 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
Michael Chan18201802006-03-20 22:29:15 -080010895 (tp->nvram_jedecnum == JEDEC_ATMEL))
10896
10897 addr = ((addr / tp->nvram_pagesize) <<
10898 ATMEL_AT45DB0X1B_PAGE_POS) +
10899 (addr % tp->nvram_pagesize);
10900
10901 return addr;
10902}
10903
Michael Chanc4e65752006-03-20 22:29:32 -080010904static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10905{
10906 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10907 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10908 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
Matt Carlson6b91fa02007-10-10 18:01:09 -070010909 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
Michael Chanc4e65752006-03-20 22:29:32 -080010910 (tp->nvram_jedecnum == JEDEC_ATMEL))
10911
10912 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10913 tp->nvram_pagesize) +
10914 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10915
10916 return addr;
10917}
10918
Linus Torvalds1da177e2005-04-16 15:20:36 -070010919static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10920{
10921 int ret;
10922
Linus Torvalds1da177e2005-04-16 15:20:36 -070010923 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10924 return tg3_nvram_read_using_eeprom(tp, offset, val);
10925
Michael Chan18201802006-03-20 22:29:15 -080010926 offset = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010927
10928 if (offset > NVRAM_ADDR_MSK)
10929 return -EINVAL;
10930
Michael Chanec41c7d2006-01-17 02:40:55 -080010931 ret = tg3_nvram_lock(tp);
10932 if (ret)
10933 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010934
Michael Chane6af3012005-04-21 17:12:05 -070010935 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010936
10937 tw32(NVRAM_ADDR, offset);
10938 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10939 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10940
10941 if (ret == 0)
10942 *val = swab32(tr32(NVRAM_RDDATA));
10943
Michael Chane6af3012005-04-21 17:12:05 -070010944 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010945
Michael Chan381291b2005-12-13 21:08:21 -080010946 tg3_nvram_unlock(tp);
10947
Linus Torvalds1da177e2005-04-16 15:20:36 -070010948 return ret;
10949}
10950
Al Virob9fc7dc2007-12-17 22:59:57 -080010951static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10952{
10953 u32 v;
10954 int res = tg3_nvram_read(tp, offset, &v);
10955 if (!res)
10956 *val = cpu_to_le32(v);
10957 return res;
10958}
10959
Michael Chan18201802006-03-20 22:29:15 -080010960static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10961{
10962 int err;
10963 u32 tmp;
10964
10965 err = tg3_nvram_read(tp, offset, &tmp);
10966 *val = swab32(tmp);
10967 return err;
10968}
10969
Linus Torvalds1da177e2005-04-16 15:20:36 -070010970static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10971 u32 offset, u32 len, u8 *buf)
10972{
10973 int i, j, rc = 0;
10974 u32 val;
10975
10976 for (i = 0; i < len; i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080010977 u32 addr;
10978 __le32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010979
10980 addr = offset + i;
10981
10982 memcpy(&data, buf + i, 4);
10983
Al Virob9fc7dc2007-12-17 22:59:57 -080010984 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070010985
10986 val = tr32(GRC_EEPROM_ADDR);
10987 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10988
10989 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10990 EEPROM_ADDR_READ);
10991 tw32(GRC_EEPROM_ADDR, val |
10992 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10993 (addr & EEPROM_ADDR_ADDR_MASK) |
10994 EEPROM_ADDR_START |
10995 EEPROM_ADDR_WRITE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010996
Michael Chan9d57f012006-12-07 00:23:25 -080010997 for (j = 0; j < 1000; j++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010998 val = tr32(GRC_EEPROM_ADDR);
10999
11000 if (val & EEPROM_ADDR_COMPLETE)
11001 break;
Michael Chan9d57f012006-12-07 00:23:25 -080011002 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011003 }
11004 if (!(val & EEPROM_ADDR_COMPLETE)) {
11005 rc = -EBUSY;
11006 break;
11007 }
11008 }
11009
11010 return rc;
11011}
11012
11013/* offset and length are dword aligned */
11014static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11015 u8 *buf)
11016{
11017 int ret = 0;
11018 u32 pagesize = tp->nvram_pagesize;
11019 u32 pagemask = pagesize - 1;
11020 u32 nvram_cmd;
11021 u8 *tmp;
11022
11023 tmp = kmalloc(pagesize, GFP_KERNEL);
11024 if (tmp == NULL)
11025 return -ENOMEM;
11026
11027 while (len) {
11028 int j;
Michael Chane6af3012005-04-21 17:12:05 -070011029 u32 phy_addr, page_off, size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011030
11031 phy_addr = offset & ~pagemask;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011032
Linus Torvalds1da177e2005-04-16 15:20:36 -070011033 for (j = 0; j < pagesize; j += 4) {
Al Viro286e3102007-12-17 23:00:31 -080011034 if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
Al Virob9fc7dc2007-12-17 22:59:57 -080011035 (__le32 *) (tmp + j))))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011036 break;
11037 }
11038 if (ret)
11039 break;
11040
11041 page_off = offset & pagemask;
11042 size = pagesize;
11043 if (len < size)
11044 size = len;
11045
11046 len -= size;
11047
11048 memcpy(tmp + page_off, buf, size);
11049
11050 offset = offset + (pagesize - page_off);
11051
Michael Chane6af3012005-04-21 17:12:05 -070011052 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011053
11054 /*
11055 * Before we can erase the flash page, we need
11056 * to issue a special "write enable" command.
11057 */
11058 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11059
11060 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11061 break;
11062
11063 /* Erase the target page */
11064 tw32(NVRAM_ADDR, phy_addr);
11065
11066 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11067 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11068
11069 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11070 break;
11071
11072 /* Issue another write enable to start the write. */
11073 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11074
11075 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11076 break;
11077
11078 for (j = 0; j < pagesize; j += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011079 __be32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011080
Al Virob9fc7dc2007-12-17 22:59:57 -080011081 data = *((__be32 *) (tmp + j));
11082 /* swab32(le32_to_cpu(data)), actually */
11083 tw32(NVRAM_WRDATA, be32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011084
11085 tw32(NVRAM_ADDR, phy_addr + j);
11086
11087 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11088 NVRAM_CMD_WR;
11089
11090 if (j == 0)
11091 nvram_cmd |= NVRAM_CMD_FIRST;
11092 else if (j == (pagesize - 4))
11093 nvram_cmd |= NVRAM_CMD_LAST;
11094
11095 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11096 break;
11097 }
11098 if (ret)
11099 break;
11100 }
11101
11102 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11103 tg3_nvram_exec_cmd(tp, nvram_cmd);
11104
11105 kfree(tmp);
11106
11107 return ret;
11108}
11109
11110/* offset and length are dword aligned */
11111static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11112 u8 *buf)
11113{
11114 int i, ret = 0;
11115
11116 for (i = 0; i < len; i += 4, offset += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011117 u32 page_off, phy_addr, nvram_cmd;
11118 __be32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011119
11120 memcpy(&data, buf + i, 4);
Al Virob9fc7dc2007-12-17 22:59:57 -080011121 tw32(NVRAM_WRDATA, be32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011122
11123 page_off = offset % tp->nvram_pagesize;
11124
Michael Chan18201802006-03-20 22:29:15 -080011125 phy_addr = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011126
11127 tw32(NVRAM_ADDR, phy_addr);
11128
11129 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11130
11131 if ((page_off == 0) || (i == 0))
11132 nvram_cmd |= NVRAM_CMD_FIRST;
Michael Chanf6d9a252006-04-29 19:00:24 -070011133 if (page_off == (tp->nvram_pagesize - 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011134 nvram_cmd |= NVRAM_CMD_LAST;
11135
11136 if (i == (len - 4))
11137 nvram_cmd |= NVRAM_CMD_LAST;
11138
Michael Chan4c987482005-09-05 17:52:38 -070011139 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
Michael Chanaf36e6b2006-03-23 01:28:06 -080011140 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
Michael Chan1b277772006-03-20 22:27:48 -080011141 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
Matt Carlsond30cdd22007-10-07 23:28:35 -070011142 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
Matt Carlson9936bcf2007-10-10 18:03:07 -070011143 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
Matt Carlson57e69832008-05-25 23:48:31 -070011144 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
Michael Chan4c987482005-09-05 17:52:38 -070011145 (tp->nvram_jedecnum == JEDEC_ST) &&
11146 (nvram_cmd & NVRAM_CMD_FIRST)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011147
11148 if ((ret = tg3_nvram_exec_cmd(tp,
11149 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11150 NVRAM_CMD_DONE)))
11151
11152 break;
11153 }
11154 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11155 /* We always do complete word writes to eeprom. */
11156 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11157 }
11158
11159 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11160 break;
11161 }
11162 return ret;
11163}
11164
11165/* offset and length are dword aligned */
11166static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11167{
11168 int ret;
11169
Linus Torvalds1da177e2005-04-16 15:20:36 -070011170 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -070011171 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11172 ~GRC_LCLCTRL_GPIO_OUTPUT1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011173 udelay(40);
11174 }
11175
11176 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11177 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11178 }
11179 else {
11180 u32 grc_mode;
11181
Michael Chanec41c7d2006-01-17 02:40:55 -080011182 ret = tg3_nvram_lock(tp);
11183 if (ret)
11184 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011185
Michael Chane6af3012005-04-21 17:12:05 -070011186 tg3_enable_nvram_access(tp);
11187 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11188 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011189 tw32(NVRAM_WRITE1, 0x406);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011190
11191 grc_mode = tr32(GRC_MODE);
11192 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11193
11194 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11195 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11196
11197 ret = tg3_nvram_write_block_buffered(tp, offset, len,
11198 buf);
11199 }
11200 else {
11201 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11202 buf);
11203 }
11204
11205 grc_mode = tr32(GRC_MODE);
11206 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11207
Michael Chane6af3012005-04-21 17:12:05 -070011208 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011209 tg3_nvram_unlock(tp);
11210 }
11211
11212 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -070011213 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011214 udelay(40);
11215 }
11216
11217 return ret;
11218}
11219
11220struct subsys_tbl_ent {
11221 u16 subsys_vendor, subsys_devid;
11222 u32 phy_id;
11223};
11224
11225static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11226 /* Broadcom boards. */
11227 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11228 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11229 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11230 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
11231 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11232 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11233 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
11234 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11235 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11236 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11237 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11238
11239 /* 3com boards. */
11240 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11241 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11242 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
11243 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11244 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11245
11246 /* DELL boards. */
11247 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11248 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11249 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11250 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11251
11252 /* Compaq boards. */
11253 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11254 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11255 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
11256 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11257 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11258
11259 /* IBM boards. */
11260 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11261};
11262
11263static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11264{
11265 int i;
11266
11267 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11268 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11269 tp->pdev->subsystem_vendor) &&
11270 (subsys_id_to_phy_id[i].subsys_devid ==
11271 tp->pdev->subsystem_device))
11272 return &subsys_id_to_phy_id[i];
11273 }
11274 return NULL;
11275}
11276
Michael Chan7d0c41e2005-04-21 17:06:20 -070011277static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011278{
Linus Torvalds1da177e2005-04-16 15:20:36 -070011279 u32 val;
Michael Chancaf636c72006-03-22 01:05:31 -080011280 u16 pmcsr;
11281
11282 /* On some early chips the SRAM cannot be accessed in D3hot state,
11283 * so need make sure we're in D0.
11284 */
11285 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11286 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11287 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11288 msleep(1);
Michael Chan7d0c41e2005-04-21 17:06:20 -070011289
11290 /* Make sure register accesses (indirect or otherwise)
11291 * will function correctly.
11292 */
11293 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11294 tp->misc_host_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011295
David S. Millerf49639e2006-06-09 11:58:36 -070011296 /* The memory arbiter has to be enabled in order for SRAM accesses
11297 * to succeed. Normally on powerup the tg3 chip firmware will make
11298 * sure it is enabled, but other entities such as system netboot
11299 * code might disable it.
11300 */
11301 val = tr32(MEMARB_MODE);
11302 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11303
Linus Torvalds1da177e2005-04-16 15:20:36 -070011304 tp->phy_id = PHY_ID_INVALID;
Michael Chan7d0c41e2005-04-21 17:06:20 -070011305 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11306
Gary Zambranoa85feb82007-05-05 11:52:19 -070011307 /* Assume an onboard device and WOL capable by default. */
11308 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
David S. Miller72b845e2006-03-14 14:11:48 -080011309
Michael Chanb5d37722006-09-27 16:06:21 -070011310 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan9d26e212006-12-07 00:21:14 -080011311 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
Michael Chanb5d37722006-09-27 16:06:21 -070011312 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011313 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11314 }
Matt Carlson0527ba32007-10-10 18:03:30 -070011315 val = tr32(VCPU_CFGSHDW);
11316 if (val & VCPU_CFGSHDW_ASPM_DBNC)
Matt Carlson8ed5d972007-05-07 00:25:49 -070011317 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
Matt Carlson0527ba32007-10-10 18:03:30 -070011318 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070011319 (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11320 device_may_wakeup(&tp->pdev->dev))
Matt Carlson0527ba32007-10-10 18:03:30 -070011321 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
Michael Chanb5d37722006-09-27 16:06:21 -070011322 return;
11323 }
11324
Linus Torvalds1da177e2005-04-16 15:20:36 -070011325 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11326 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11327 u32 nic_cfg, led_cfg;
Matt Carlsona9daf362008-05-25 23:49:44 -070011328 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
Michael Chan7d0c41e2005-04-21 17:06:20 -070011329 int eeprom_phy_serdes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011330
11331 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11332 tp->nic_sram_data_cfg = nic_cfg;
11333
11334 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11335 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11336 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11337 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11338 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11339 (ver > 0) && (ver < 0x100))
11340 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11341
Matt Carlsona9daf362008-05-25 23:49:44 -070011342 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11343 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11344
Linus Torvalds1da177e2005-04-16 15:20:36 -070011345 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11346 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11347 eeprom_phy_serdes = 1;
11348
11349 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11350 if (nic_phy_id != 0) {
11351 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11352 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11353
11354 eeprom_phy_id = (id1 >> 16) << 10;
11355 eeprom_phy_id |= (id2 & 0xfc00) << 16;
11356 eeprom_phy_id |= (id2 & 0x03ff) << 0;
11357 } else
11358 eeprom_phy_id = 0;
11359
Michael Chan7d0c41e2005-04-21 17:06:20 -070011360 tp->phy_id = eeprom_phy_id;
Michael Chan747e8f82005-07-25 12:33:22 -070011361 if (eeprom_phy_serdes) {
Michael Chana4e2b342005-10-26 15:46:52 -070011362 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan747e8f82005-07-25 12:33:22 -070011363 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11364 else
11365 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11366 }
Michael Chan7d0c41e2005-04-21 17:06:20 -070011367
John W. Linvillecbf46852005-04-21 17:01:29 -070011368 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011369 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11370 SHASTA_EXT_LED_MODE_MASK);
John W. Linvillecbf46852005-04-21 17:01:29 -070011371 else
Linus Torvalds1da177e2005-04-16 15:20:36 -070011372 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11373
11374 switch (led_cfg) {
11375 default:
11376 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11377 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11378 break;
11379
11380 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11381 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11382 break;
11383
11384 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11385 tp->led_ctrl = LED_CTRL_MODE_MAC;
Michael Chan9ba27792005-06-06 15:16:20 -070011386
11387 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11388 * read on some older 5700/5701 bootcode.
11389 */
11390 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11391 ASIC_REV_5700 ||
11392 GET_ASIC_REV(tp->pci_chip_rev_id) ==
11393 ASIC_REV_5701)
11394 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11395
Linus Torvalds1da177e2005-04-16 15:20:36 -070011396 break;
11397
11398 case SHASTA_EXT_LED_SHARED:
11399 tp->led_ctrl = LED_CTRL_MODE_SHARED;
11400 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11401 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11402 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11403 LED_CTRL_MODE_PHY_2);
11404 break;
11405
11406 case SHASTA_EXT_LED_MAC:
11407 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11408 break;
11409
11410 case SHASTA_EXT_LED_COMBO:
11411 tp->led_ctrl = LED_CTRL_MODE_COMBO;
11412 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11413 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11414 LED_CTRL_MODE_PHY_2);
11415 break;
11416
Stephen Hemminger855e1112008-04-16 16:37:28 -070011417 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011418
11419 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11420 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11421 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11422 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11423
Matt Carlsonb2a5c192008-04-03 21:44:44 -070011424 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11425 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
Matt Carlson5f608912007-11-12 21:17:07 -080011426
Michael Chan9d26e212006-12-07 00:21:14 -080011427 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011428 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011429 if ((tp->pdev->subsystem_vendor ==
11430 PCI_VENDOR_ID_ARIMA) &&
11431 (tp->pdev->subsystem_device == 0x205a ||
11432 tp->pdev->subsystem_device == 0x2063))
11433 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11434 } else {
David S. Millerf49639e2006-06-09 11:58:36 -070011435 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011436 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11437 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011438
11439 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11440 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
John W. Linvillecbf46852005-04-21 17:01:29 -070011441 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011442 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11443 }
Matt Carlson0d3031d2007-10-10 18:02:43 -070011444 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
11445 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
Gary Zambranoa85feb82007-05-05 11:52:19 -070011446 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11447 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11448 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011449
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070011450 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11451 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE) &&
11452 device_may_wakeup(&tp->pdev->dev))
Matt Carlson0527ba32007-10-10 18:03:30 -070011453 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11454
Linus Torvalds1da177e2005-04-16 15:20:36 -070011455 if (cfg2 & (1 << 17))
11456 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11457
11458 /* serdes signal pre-emphasis in register 0x590 set by */
11459 /* bootcode if bit 18 is set */
11460 if (cfg2 & (1 << 18))
11461 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
Matt Carlson8ed5d972007-05-07 00:25:49 -070011462
11463 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11464 u32 cfg3;
11465
11466 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11467 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11468 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11469 }
Matt Carlsona9daf362008-05-25 23:49:44 -070011470
11471 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11472 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11473 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11474 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11475 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11476 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011477 }
Michael Chan7d0c41e2005-04-21 17:06:20 -070011478}
11479
Matt Carlsonb2a5c192008-04-03 21:44:44 -070011480static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11481{
11482 int i;
11483 u32 val;
11484
11485 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11486 tw32(OTP_CTRL, cmd);
11487
11488 /* Wait for up to 1 ms for command to execute. */
11489 for (i = 0; i < 100; i++) {
11490 val = tr32(OTP_STATUS);
11491 if (val & OTP_STATUS_CMD_DONE)
11492 break;
11493 udelay(10);
11494 }
11495
11496 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11497}
11498
11499/* Read the gphy configuration from the OTP region of the chip. The gphy
11500 * configuration is a 32-bit value that straddles the alignment boundary.
11501 * We do two 32-bit reads and then shift and merge the results.
11502 */
11503static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11504{
11505 u32 bhalf_otp, thalf_otp;
11506
11507 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11508
11509 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11510 return 0;
11511
11512 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11513
11514 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11515 return 0;
11516
11517 thalf_otp = tr32(OTP_READ_DATA);
11518
11519 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11520
11521 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11522 return 0;
11523
11524 bhalf_otp = tr32(OTP_READ_DATA);
11525
11526 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11527}
11528
Michael Chan7d0c41e2005-04-21 17:06:20 -070011529static int __devinit tg3_phy_probe(struct tg3 *tp)
11530{
11531 u32 hw_phy_id_1, hw_phy_id_2;
11532 u32 hw_phy_id, hw_phy_id_masked;
11533 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011534
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011535 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11536 return tg3_phy_init(tp);
11537
Linus Torvalds1da177e2005-04-16 15:20:36 -070011538 /* Reading the PHY ID register can conflict with ASF
11539 * firwmare access to the PHY hardware.
11540 */
11541 err = 0;
Matt Carlson0d3031d2007-10-10 18:02:43 -070011542 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11543 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011544 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11545 } else {
11546 /* Now read the physical PHY_ID from the chip and verify
11547 * that it is sane. If it doesn't look good, we fall back
11548 * to either the hard-coded table based PHY_ID and failing
11549 * that the value found in the eeprom area.
11550 */
11551 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11552 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11553
11554 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
11555 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11556 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
11557
11558 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11559 }
11560
11561 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11562 tp->phy_id = hw_phy_id;
11563 if (hw_phy_id_masked == PHY_ID_BCM8002)
11564 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
Michael Chanda6b2d02005-08-19 12:54:29 -070011565 else
11566 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011567 } else {
Michael Chan7d0c41e2005-04-21 17:06:20 -070011568 if (tp->phy_id != PHY_ID_INVALID) {
11569 /* Do nothing, phy ID already set up in
11570 * tg3_get_eeprom_hw_cfg().
11571 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070011572 } else {
11573 struct subsys_tbl_ent *p;
11574
11575 /* No eeprom signature? Try the hardcoded
11576 * subsys device table.
11577 */
11578 p = lookup_by_subsys(tp);
11579 if (!p)
11580 return -ENODEV;
11581
11582 tp->phy_id = p->phy_id;
11583 if (!tp->phy_id ||
11584 tp->phy_id == PHY_ID_BCM8002)
11585 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11586 }
11587 }
11588
Michael Chan747e8f82005-07-25 12:33:22 -070011589 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
Matt Carlson0d3031d2007-10-10 18:02:43 -070011590 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070011591 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chan3600d912006-12-07 00:21:48 -080011592 u32 bmsr, adv_reg, tg3_ctrl, mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011593
11594 tg3_readphy(tp, MII_BMSR, &bmsr);
11595 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11596 (bmsr & BMSR_LSTATUS))
11597 goto skip_phy_reset;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011598
Linus Torvalds1da177e2005-04-16 15:20:36 -070011599 err = tg3_phy_reset(tp);
11600 if (err)
11601 return err;
11602
11603 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11604 ADVERTISE_100HALF | ADVERTISE_100FULL |
11605 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11606 tg3_ctrl = 0;
11607 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11608 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11609 MII_TG3_CTRL_ADV_1000_FULL);
11610 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11611 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11612 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11613 MII_TG3_CTRL_ENABLE_AS_MASTER);
11614 }
11615
Michael Chan3600d912006-12-07 00:21:48 -080011616 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11617 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11618 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11619 if (!tg3_copper_is_advertising_all(tp, mask)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011620 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11621
11622 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11623 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11624
11625 tg3_writephy(tp, MII_BMCR,
11626 BMCR_ANENABLE | BMCR_ANRESTART);
11627 }
11628 tg3_phy_set_wirespeed(tp);
11629
11630 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11631 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11632 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11633 }
11634
11635skip_phy_reset:
11636 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11637 err = tg3_init_5401phy_dsp(tp);
11638 if (err)
11639 return err;
11640 }
11641
11642 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11643 err = tg3_init_5401phy_dsp(tp);
11644 }
11645
Michael Chan747e8f82005-07-25 12:33:22 -070011646 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011647 tp->link_config.advertising =
11648 (ADVERTISED_1000baseT_Half |
11649 ADVERTISED_1000baseT_Full |
11650 ADVERTISED_Autoneg |
11651 ADVERTISED_FIBRE);
11652 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11653 tp->link_config.advertising &=
11654 ~(ADVERTISED_1000baseT_Half |
11655 ADVERTISED_1000baseT_Full);
11656
11657 return err;
11658}
11659
11660static void __devinit tg3_read_partno(struct tg3 *tp)
11661{
11662 unsigned char vpd_data[256];
Michael Chanaf2c6a42006-11-07 14:57:51 -080011663 unsigned int i;
Michael Chan1b277772006-03-20 22:27:48 -080011664 u32 magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011665
Michael Chan18201802006-03-20 22:29:15 -080011666 if (tg3_nvram_read_swab(tp, 0x0, &magic))
David S. Millerf49639e2006-06-09 11:58:36 -070011667 goto out_not_found;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011668
Michael Chan18201802006-03-20 22:29:15 -080011669 if (magic == TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -080011670 for (i = 0; i < 256; i += 4) {
11671 u32 tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011672
Michael Chan1b277772006-03-20 22:27:48 -080011673 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11674 goto out_not_found;
11675
11676 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
11677 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
11678 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11679 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11680 }
11681 } else {
11682 int vpd_cap;
11683
11684 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11685 for (i = 0; i < 256; i += 4) {
11686 u32 tmp, j = 0;
Al Virob9fc7dc2007-12-17 22:59:57 -080011687 __le32 v;
Michael Chan1b277772006-03-20 22:27:48 -080011688 u16 tmp16;
11689
11690 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11691 i);
11692 while (j++ < 100) {
11693 pci_read_config_word(tp->pdev, vpd_cap +
11694 PCI_VPD_ADDR, &tmp16);
11695 if (tmp16 & 0x8000)
11696 break;
11697 msleep(1);
11698 }
David S. Millerf49639e2006-06-09 11:58:36 -070011699 if (!(tmp16 & 0x8000))
11700 goto out_not_found;
11701
Michael Chan1b277772006-03-20 22:27:48 -080011702 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11703 &tmp);
Al Virob9fc7dc2007-12-17 22:59:57 -080011704 v = cpu_to_le32(tmp);
11705 memcpy(&vpd_data[i], &v, 4);
Michael Chan1b277772006-03-20 22:27:48 -080011706 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011707 }
11708
11709 /* Now parse and find the part number. */
Michael Chanaf2c6a42006-11-07 14:57:51 -080011710 for (i = 0; i < 254; ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011711 unsigned char val = vpd_data[i];
Michael Chanaf2c6a42006-11-07 14:57:51 -080011712 unsigned int block_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011713
11714 if (val == 0x82 || val == 0x91) {
11715 i = (i + 3 +
11716 (vpd_data[i + 1] +
11717 (vpd_data[i + 2] << 8)));
11718 continue;
11719 }
11720
11721 if (val != 0x90)
11722 goto out_not_found;
11723
11724 block_end = (i + 3 +
11725 (vpd_data[i + 1] +
11726 (vpd_data[i + 2] << 8)));
11727 i += 3;
Michael Chanaf2c6a42006-11-07 14:57:51 -080011728
11729 if (block_end > 256)
11730 goto out_not_found;
11731
11732 while (i < (block_end - 2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011733 if (vpd_data[i + 0] == 'P' &&
11734 vpd_data[i + 1] == 'N') {
11735 int partno_len = vpd_data[i + 2];
11736
Michael Chanaf2c6a42006-11-07 14:57:51 -080011737 i += 3;
11738 if (partno_len > 24 || (partno_len + i) > 256)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011739 goto out_not_found;
11740
11741 memcpy(tp->board_part_number,
Michael Chanaf2c6a42006-11-07 14:57:51 -080011742 &vpd_data[i], partno_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011743
11744 /* Success. */
11745 return;
11746 }
Michael Chanaf2c6a42006-11-07 14:57:51 -080011747 i += 3 + vpd_data[i + 2];
Linus Torvalds1da177e2005-04-16 15:20:36 -070011748 }
11749
11750 /* Part number not found. */
11751 goto out_not_found;
11752 }
11753
11754out_not_found:
Michael Chanb5d37722006-09-27 16:06:21 -070011755 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11756 strcpy(tp->board_part_number, "BCM95906");
11757 else
11758 strcpy(tp->board_part_number, "none");
Linus Torvalds1da177e2005-04-16 15:20:36 -070011759}
11760
Matt Carlson9c8a6202007-10-21 16:16:08 -070011761static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11762{
11763 u32 val;
11764
11765 if (tg3_nvram_read_swab(tp, offset, &val) ||
11766 (val & 0xfc000000) != 0x0c000000 ||
11767 tg3_nvram_read_swab(tp, offset + 4, &val) ||
11768 val != 0)
11769 return 0;
11770
11771 return 1;
11772}
11773
Michael Chanc4e65752006-03-20 22:29:32 -080011774static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11775{
11776 u32 val, offset, start;
Matt Carlson9c8a6202007-10-21 16:16:08 -070011777 u32 ver_offset;
11778 int i, bcnt;
Michael Chanc4e65752006-03-20 22:29:32 -080011779
11780 if (tg3_nvram_read_swab(tp, 0, &val))
11781 return;
11782
11783 if (val != TG3_EEPROM_MAGIC)
11784 return;
11785
11786 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11787 tg3_nvram_read_swab(tp, 0x4, &start))
11788 return;
11789
11790 offset = tg3_nvram_logical_addr(tp, offset);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011791
11792 if (!tg3_fw_img_is_valid(tp, offset) ||
11793 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
Michael Chanc4e65752006-03-20 22:29:32 -080011794 return;
11795
Matt Carlson9c8a6202007-10-21 16:16:08 -070011796 offset = offset + ver_offset - start;
11797 for (i = 0; i < 16; i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011798 __le32 v;
11799 if (tg3_nvram_read_le(tp, offset + i, &v))
Michael Chanc4e65752006-03-20 22:29:32 -080011800 return;
11801
Al Virob9fc7dc2007-12-17 22:59:57 -080011802 memcpy(tp->fw_ver + i, &v, 4);
Michael Chanc4e65752006-03-20 22:29:32 -080011803 }
Matt Carlson9c8a6202007-10-21 16:16:08 -070011804
11805 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
Matt Carlson84af67f2007-11-12 21:08:59 -080011806 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
Matt Carlson9c8a6202007-10-21 16:16:08 -070011807 return;
11808
11809 for (offset = TG3_NVM_DIR_START;
11810 offset < TG3_NVM_DIR_END;
11811 offset += TG3_NVM_DIRENT_SIZE) {
11812 if (tg3_nvram_read_swab(tp, offset, &val))
11813 return;
11814
11815 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11816 break;
11817 }
11818
11819 if (offset == TG3_NVM_DIR_END)
11820 return;
11821
11822 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11823 start = 0x08000000;
11824 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11825 return;
11826
11827 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11828 !tg3_fw_img_is_valid(tp, offset) ||
11829 tg3_nvram_read_swab(tp, offset + 8, &val))
11830 return;
11831
11832 offset += val - start;
11833
11834 bcnt = strlen(tp->fw_ver);
11835
11836 tp->fw_ver[bcnt++] = ',';
11837 tp->fw_ver[bcnt++] = ' ';
11838
11839 for (i = 0; i < 4; i++) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011840 __le32 v;
11841 if (tg3_nvram_read_le(tp, offset, &v))
Matt Carlson9c8a6202007-10-21 16:16:08 -070011842 return;
11843
Al Virob9fc7dc2007-12-17 22:59:57 -080011844 offset += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011845
Al Virob9fc7dc2007-12-17 22:59:57 -080011846 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11847 memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011848 break;
11849 }
11850
Al Virob9fc7dc2007-12-17 22:59:57 -080011851 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11852 bcnt += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011853 }
11854
11855 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
Michael Chanc4e65752006-03-20 22:29:32 -080011856}
11857
Michael Chan7544b092007-05-05 13:08:32 -070011858static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11859
Linus Torvalds1da177e2005-04-16 15:20:36 -070011860static int __devinit tg3_get_invariants(struct tg3 *tp)
11861{
11862 static struct pci_device_id write_reorder_chipsets[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011863 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11864 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
John W. Linvillec165b002006-07-08 13:28:53 -070011865 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11866 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
Michael Chan399de502005-10-03 14:02:39 -070011867 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11868 PCI_DEVICE_ID_VIA_8385_0) },
Linus Torvalds1da177e2005-04-16 15:20:36 -070011869 { },
11870 };
11871 u32 misc_ctrl_reg;
11872 u32 cacheline_sz_reg;
11873 u32 pci_state_reg, grc_misc_cfg;
11874 u32 val;
11875 u16 pci_cmd;
Michael Chanc7835a72006-11-15 21:14:42 -080011876 int err, pcie_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011877
Linus Torvalds1da177e2005-04-16 15:20:36 -070011878 /* Force memory write invalidate off. If we leave it on,
11879 * then on 5700_BX chips we have to enable a workaround.
11880 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11881 * to match the cacheline size. The Broadcom driver have this
11882 * workaround but turns MWI off all the times so never uses
11883 * it. This seems to suggest that the workaround is insufficient.
11884 */
11885 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11886 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11887 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11888
11889 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11890 * has the register indirect write enable bit set before
11891 * we try to access any of the MMIO registers. It is also
11892 * critical that the PCI-X hw workaround situation is decided
11893 * before that as well.
11894 */
11895 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11896 &misc_ctrl_reg);
11897
11898 tp->pci_chip_rev_id = (misc_ctrl_reg >>
11899 MISC_HOST_CTRL_CHIPREV_SHIFT);
Matt Carlson795d01c2007-10-07 23:28:17 -070011900 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11901 u32 prod_id_asic_rev;
11902
11903 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11904 &prod_id_asic_rev);
11905 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11906 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011907
Michael Chanff645be2005-04-21 17:09:53 -070011908 /* Wrong chip ID in 5752 A0. This code can be removed later
11909 * as A0 is not in production.
11910 */
11911 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11912 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11913
Michael Chan68929142005-08-09 20:17:14 -070011914 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11915 * we need to disable memory and use config. cycles
11916 * only to access all registers. The 5702/03 chips
11917 * can mistakenly decode the special cycles from the
11918 * ICH chipsets as memory write cycles, causing corruption
11919 * of register and memory space. Only certain ICH bridges
11920 * will drive special cycles with non-zero data during the
11921 * address phase which can fall within the 5703's address
11922 * range. This is not an ICH bug as the PCI spec allows
11923 * non-zero address during special cycles. However, only
11924 * these ICH bridges are known to drive non-zero addresses
11925 * during special cycles.
11926 *
11927 * Since special cycles do not cross PCI bridges, we only
11928 * enable this workaround if the 5703 is on the secondary
11929 * bus of these ICH bridges.
11930 */
11931 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11932 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11933 static struct tg3_dev_id {
11934 u32 vendor;
11935 u32 device;
11936 u32 rev;
11937 } ich_chipsets[] = {
11938 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11939 PCI_ANY_ID },
11940 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11941 PCI_ANY_ID },
11942 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11943 0xa },
11944 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11945 PCI_ANY_ID },
11946 { },
11947 };
11948 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11949 struct pci_dev *bridge = NULL;
11950
11951 while (pci_id->vendor != 0) {
11952 bridge = pci_get_device(pci_id->vendor, pci_id->device,
11953 bridge);
11954 if (!bridge) {
11955 pci_id++;
11956 continue;
11957 }
11958 if (pci_id->rev != PCI_ANY_ID) {
Auke Kok44c10132007-06-08 15:46:36 -070011959 if (bridge->revision > pci_id->rev)
Michael Chan68929142005-08-09 20:17:14 -070011960 continue;
11961 }
11962 if (bridge->subordinate &&
11963 (bridge->subordinate->number ==
11964 tp->pdev->bus->number)) {
11965
11966 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11967 pci_dev_put(bridge);
11968 break;
11969 }
11970 }
11971 }
11972
Matt Carlson41588ba2008-04-19 18:12:33 -070011973 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11974 static struct tg3_dev_id {
11975 u32 vendor;
11976 u32 device;
11977 } bridge_chipsets[] = {
11978 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11979 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11980 { },
11981 };
11982 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11983 struct pci_dev *bridge = NULL;
11984
11985 while (pci_id->vendor != 0) {
11986 bridge = pci_get_device(pci_id->vendor,
11987 pci_id->device,
11988 bridge);
11989 if (!bridge) {
11990 pci_id++;
11991 continue;
11992 }
11993 if (bridge->subordinate &&
11994 (bridge->subordinate->number <=
11995 tp->pdev->bus->number) &&
11996 (bridge->subordinate->subordinate >=
11997 tp->pdev->bus->number)) {
11998 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11999 pci_dev_put(bridge);
12000 break;
12001 }
12002 }
12003 }
12004
Michael Chan4a29cc22006-03-19 13:21:12 -080012005 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12006 * DMA addresses > 40-bit. This bridge may have other additional
12007 * 57xx devices behind it in some 4-port NIC designs for example.
12008 * Any tg3 device found behind the bridge will also need the 40-bit
12009 * DMA workaround.
12010 */
Michael Chana4e2b342005-10-26 15:46:52 -070012011 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12012 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12013 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
Michael Chan4a29cc22006-03-19 13:21:12 -080012014 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
Michael Chan4cf78e42005-07-25 12:29:19 -070012015 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
Michael Chana4e2b342005-10-26 15:46:52 -070012016 }
Michael Chan4a29cc22006-03-19 13:21:12 -080012017 else {
12018 struct pci_dev *bridge = NULL;
12019
12020 do {
12021 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12022 PCI_DEVICE_ID_SERVERWORKS_EPB,
12023 bridge);
12024 if (bridge && bridge->subordinate &&
12025 (bridge->subordinate->number <=
12026 tp->pdev->bus->number) &&
12027 (bridge->subordinate->subordinate >=
12028 tp->pdev->bus->number)) {
12029 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12030 pci_dev_put(bridge);
12031 break;
12032 }
12033 } while (bridge);
12034 }
Michael Chan4cf78e42005-07-25 12:29:19 -070012035
Linus Torvalds1da177e2005-04-16 15:20:36 -070012036 /* Initialize misc host control in PCI block. */
12037 tp->misc_host_ctrl |= (misc_ctrl_reg &
12038 MISC_HOST_CTRL_CHIPREV);
12039 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12040 tp->misc_host_ctrl);
12041
12042 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12043 &cacheline_sz_reg);
12044
12045 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
12046 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
12047 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
12048 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
12049
Michael Chan7544b092007-05-05 13:08:32 -070012050 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12051 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12052 tp->pdev_peer = tg3_find_peer(tp);
12053
John W. Linville2052da92005-04-21 16:56:08 -070012054 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
Michael Chan4cf78e42005-07-25 12:29:19 -070012055 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -080012056 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chand9ab5ad2006-03-20 22:27:35 -080012057 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012058 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012059 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012060 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012061 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
Michael Chana4e2b342005-10-26 15:46:52 -070012062 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
John W. Linville6708e5c2005-04-21 17:00:52 -070012063 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12064
John W. Linville1b440c562005-04-21 17:03:18 -070012065 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12066 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12067 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12068
Michael Chan5a6f3072006-03-20 22:28:05 -080012069 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Michael Chan7544b092007-05-05 13:08:32 -070012070 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12071 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12072 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12073 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12074 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12075 tp->pdev_peer == tp->pdev))
12076 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12077
Michael Chanaf36e6b2006-03-23 01:28:06 -080012078 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012079 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012080 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012081 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012082 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012083 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan5a6f3072006-03-20 22:28:05 -080012084 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
Michael Chanfcfa0a32006-03-20 22:28:41 -080012085 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
Michael Chan52c0fd82006-06-29 20:15:54 -070012086 } else {
Michael Chan7f62ad52007-02-20 23:25:40 -080012087 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
Michael Chan52c0fd82006-06-29 20:15:54 -070012088 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12089 ASIC_REV_5750 &&
12090 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
Michael Chan7f62ad52007-02-20 23:25:40 -080012091 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
Michael Chan52c0fd82006-06-29 20:15:54 -070012092 }
Michael Chan5a6f3072006-03-20 22:28:05 -080012093 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012094
Matt Carlsonf51f3562008-05-25 23:45:08 -070012095 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12096 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan0f893dc2005-07-25 12:30:38 -070012097 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12098
Michael Chanc7835a72006-11-15 21:14:42 -080012099 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12100 if (pcie_cap != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012101 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
Matt Carlson5f5c51e2007-11-12 21:19:37 -080012102
12103 pcie_set_readrq(tp->pdev, 4096);
12104
Michael Chanc7835a72006-11-15 21:14:42 -080012105 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12106 u16 lnkctl;
12107
12108 pci_read_config_word(tp->pdev,
12109 pcie_cap + PCI_EXP_LNKCTL,
12110 &lnkctl);
12111 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
12112 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12113 }
12114 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012115
Michael Chan399de502005-10-03 14:02:39 -070012116 /* If we have an AMD 762 or VIA K8T800 chipset, write
12117 * reordering to the mailbox registers done by the host
12118 * controller can cause major troubles. We read back from
12119 * every mailbox register write to force the writes to be
12120 * posted to the chip in order.
12121 */
12122 if (pci_dev_present(write_reorder_chipsets) &&
12123 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12124 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12125
Linus Torvalds1da177e2005-04-16 15:20:36 -070012126 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12127 tp->pci_lat_timer < 64) {
12128 tp->pci_lat_timer = 64;
12129
12130 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
12131 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
12132 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
12133 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
12134
12135 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12136 cacheline_sz_reg);
12137 }
12138
Matt Carlson9974a352007-10-07 23:27:28 -070012139 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12140 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12141 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12142 if (!tp->pcix_cap) {
12143 printk(KERN_ERR PFX "Cannot find PCI-X "
12144 "capability, aborting.\n");
12145 return -EIO;
12146 }
12147 }
12148
Linus Torvalds1da177e2005-04-16 15:20:36 -070012149 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12150 &pci_state_reg);
12151
Matt Carlson9974a352007-10-07 23:27:28 -070012152 if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012153 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12154
12155 /* If this is a 5700 BX chipset, and we are in PCI-X
12156 * mode, enable register write workaround.
12157 *
12158 * The workaround is to use indirect register accesses
12159 * for all chip writes not to mailbox registers.
12160 */
12161 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12162 u32 pm_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012163
12164 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12165
12166 /* The chip can have it's power management PCI config
12167 * space registers clobbered due to this bug.
12168 * So explicitly force the chip into D0 here.
12169 */
Matt Carlson9974a352007-10-07 23:27:28 -070012170 pci_read_config_dword(tp->pdev,
12171 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012172 &pm_reg);
12173 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12174 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
Matt Carlson9974a352007-10-07 23:27:28 -070012175 pci_write_config_dword(tp->pdev,
12176 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012177 pm_reg);
12178
12179 /* Also, force SERR#/PERR# in PCI command. */
12180 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12181 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12182 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12183 }
12184 }
12185
Michael Chan087fe252005-08-09 20:17:41 -070012186 /* 5700 BX chips need to have their TX producer index mailboxes
12187 * written twice to workaround a bug.
12188 */
12189 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
12190 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12191
Linus Torvalds1da177e2005-04-16 15:20:36 -070012192 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12193 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12194 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12195 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12196
12197 /* Chip-specific fixup from Broadcom driver */
12198 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12199 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12200 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12201 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12202 }
12203
Michael Chan1ee582d2005-08-09 20:16:46 -070012204 /* Default fast path register access methods */
Michael Chan20094932005-08-09 20:16:32 -070012205 tp->read32 = tg3_read32;
Michael Chan1ee582d2005-08-09 20:16:46 -070012206 tp->write32 = tg3_write32;
Michael Chan09ee9292005-08-09 20:17:00 -070012207 tp->read32_mbox = tg3_read32;
Michael Chan20094932005-08-09 20:16:32 -070012208 tp->write32_mbox = tg3_write32;
Michael Chan1ee582d2005-08-09 20:16:46 -070012209 tp->write32_tx_mbox = tg3_write32;
12210 tp->write32_rx_mbox = tg3_write32;
12211
12212 /* Various workaround register access methods */
12213 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12214 tp->write32 = tg3_write_indirect_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070012215 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12216 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12217 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12218 /*
12219 * Back to back register writes can cause problems on these
12220 * chips, the workaround is to read back all reg writes
12221 * except those to mailbox regs.
12222 *
12223 * See tg3_write_indirect_reg32().
12224 */
Michael Chan1ee582d2005-08-09 20:16:46 -070012225 tp->write32 = tg3_write_flush_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070012226 }
12227
Michael Chan1ee582d2005-08-09 20:16:46 -070012228
12229 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12230 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12231 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12232 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12233 tp->write32_rx_mbox = tg3_write_flush_reg32;
12234 }
Michael Chan20094932005-08-09 20:16:32 -070012235
Michael Chan68929142005-08-09 20:17:14 -070012236 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12237 tp->read32 = tg3_read_indirect_reg32;
12238 tp->write32 = tg3_write_indirect_reg32;
12239 tp->read32_mbox = tg3_read_indirect_mbox;
12240 tp->write32_mbox = tg3_write_indirect_mbox;
12241 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12242 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12243
12244 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070012245 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070012246
12247 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12248 pci_cmd &= ~PCI_COMMAND_MEMORY;
12249 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12250 }
Michael Chanb5d37722006-09-27 16:06:21 -070012251 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12252 tp->read32_mbox = tg3_read32_mbox_5906;
12253 tp->write32_mbox = tg3_write32_mbox_5906;
12254 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12255 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12256 }
Michael Chan68929142005-08-09 20:17:14 -070012257
Michael Chanbbadf502006-04-06 21:46:34 -070012258 if (tp->write32 == tg3_write_indirect_reg32 ||
12259 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12260 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
David S. Millerf49639e2006-06-09 11:58:36 -070012261 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
Michael Chanbbadf502006-04-06 21:46:34 -070012262 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12263
Michael Chan7d0c41e2005-04-21 17:06:20 -070012264 /* Get eeprom hw config before calling tg3_set_power_state().
Michael Chan9d26e212006-12-07 00:21:14 -080012265 * In particular, the TG3_FLG2_IS_NIC flag must be
Michael Chan7d0c41e2005-04-21 17:06:20 -070012266 * determined before calling tg3_set_power_state() so that
12267 * we know whether or not to switch out of Vaux power.
12268 * When the flag is set, it means that GPIO1 is used for eeprom
12269 * write protect and also implies that it is a LOM where GPIOs
12270 * are not used to switch power.
Jeff Garzik6aa20a22006-09-13 13:24:59 -040012271 */
Michael Chan7d0c41e2005-04-21 17:06:20 -070012272 tg3_get_eeprom_hw_cfg(tp);
12273
Matt Carlson0d3031d2007-10-10 18:02:43 -070012274 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12275 /* Allow reads and writes to the
12276 * APE register and memory space.
12277 */
12278 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12279 PCISTATE_ALLOW_APE_SHMEM_WR;
12280 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12281 pci_state_reg);
12282 }
12283
Matt Carlson9936bcf2007-10-10 18:03:07 -070012284 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012285 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12286 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Matt Carlsond30cdd22007-10-07 23:28:35 -070012287 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12288
Matt Carlsonb5af7122007-11-12 21:22:02 -080012289 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
12290 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
12291 tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
12292 tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
12293 tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
12294 }
12295
Michael Chan314fba32005-04-21 17:07:04 -070012296 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12297 * GPIO1 driven high will bring 5700's external PHY out of reset.
12298 * It is also used as eeprom write protect on LOMs.
12299 */
12300 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12301 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12302 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12303 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12304 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan3e7d83b2005-04-21 17:10:36 -070012305 /* Unused GPIO3 must be driven as output on 5752 because there
12306 * are no pull-up resistors on unused GPIO pins.
12307 */
12308 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12309 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chan314fba32005-04-21 17:07:04 -070012310
Michael Chanaf36e6b2006-03-23 01:28:06 -080012311 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12312 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12313
Matt Carlson5f0c4a32008-06-09 15:41:12 -070012314 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12315 /* Turn off the debug UART. */
12316 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12317 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12318 /* Keep VMain power. */
12319 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12320 GRC_LCLCTRL_GPIO_OUTPUT0;
12321 }
12322
Linus Torvalds1da177e2005-04-16 15:20:36 -070012323 /* Force the chip into D0. */
Michael Chanbc1c7562006-03-20 17:48:03 -080012324 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012325 if (err) {
12326 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12327 pci_name(tp->pdev));
12328 return err;
12329 }
12330
12331 /* 5700 B0 chips do not support checksumming correctly due
12332 * to hardware bugs.
12333 */
12334 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12335 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12336
Linus Torvalds1da177e2005-04-16 15:20:36 -070012337 /* Derive initial jumbo mode from MTU assigned in
12338 * ether_setup() via the alloc_etherdev() call
12339 */
Michael Chan0f893dc2005-07-25 12:30:38 -070012340 if (tp->dev->mtu > ETH_DATA_LEN &&
Michael Chana4e2b342005-10-26 15:46:52 -070012341 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan0f893dc2005-07-25 12:30:38 -070012342 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012343
12344 /* Determine WakeOnLan speed to use. */
12345 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12346 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12347 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12348 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12349 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12350 } else {
12351 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12352 }
12353
12354 /* A few boards don't want Ethernet@WireSpeed phy feature */
12355 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12356 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12357 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
Michael Chan747e8f82005-07-25 12:33:22 -070012358 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
Michael Chanb5d37722006-09-27 16:06:21 -070012359 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
Michael Chan747e8f82005-07-25 12:33:22 -070012360 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
Linus Torvalds1da177e2005-04-16 15:20:36 -070012361 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12362
12363 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12364 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12365 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12366 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12367 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12368
Michael Chanc424cb22006-04-29 18:56:34 -070012369 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12370 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012371 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012372 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12373 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
Michael Chand4011ad2007-02-13 12:17:25 -080012374 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12375 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12376 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
Michael Chanc1d2a192007-01-08 19:57:20 -080012377 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12378 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
Matt Carlson57e69832008-05-25 23:48:31 -070012379 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12380 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
Michael Chanc424cb22006-04-29 18:56:34 -070012381 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12382 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012383
Matt Carlsonb2a5c192008-04-03 21:44:44 -070012384 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12385 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12386 tp->phy_otp = tg3_read_otp_phycfg(tp);
12387 if (tp->phy_otp == 0)
12388 tp->phy_otp = TG3_OTP_DEFAULT;
12389 }
12390
Matt Carlsonf51f3562008-05-25 23:45:08 -070012391 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
Matt Carlson8ef21422008-05-02 16:47:53 -070012392 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12393 else
12394 tp->mi_mode = MAC_MI_MODE_BASE;
12395
Linus Torvalds1da177e2005-04-16 15:20:36 -070012396 tp->coalesce_mode = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012397 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12398 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12399 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12400
Matt Carlson57e69832008-05-25 23:48:31 -070012401 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12402 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12403
Matt Carlson158d7ab2008-05-29 01:37:54 -070012404 err = tg3_mdio_init(tp);
12405 if (err)
12406 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012407
12408 /* Initialize data/descriptor byte/word swapping. */
12409 val = tr32(GRC_MODE);
12410 val &= GRC_MODE_HOST_STACKUP;
12411 tw32(GRC_MODE, val | tp->grc_mode);
12412
12413 tg3_switch_clocks(tp);
12414
12415 /* Clear this out for sanity. */
12416 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12417
12418 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12419 &pci_state_reg);
12420 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12421 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12422 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12423
12424 if (chiprevid == CHIPREV_ID_5701_A0 ||
12425 chiprevid == CHIPREV_ID_5701_B0 ||
12426 chiprevid == CHIPREV_ID_5701_B2 ||
12427 chiprevid == CHIPREV_ID_5701_B5) {
12428 void __iomem *sram_base;
12429
12430 /* Write some dummy words into the SRAM status block
12431 * area, see if it reads back correctly. If the return
12432 * value is bad, force enable the PCIX workaround.
12433 */
12434 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12435
12436 writel(0x00000000, sram_base);
12437 writel(0x00000000, sram_base + 4);
12438 writel(0xffffffff, sram_base + 4);
12439 if (readl(sram_base) != 0x00000000)
12440 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12441 }
12442 }
12443
12444 udelay(50);
12445 tg3_nvram_init(tp);
12446
12447 grc_misc_cfg = tr32(GRC_MISC_CFG);
12448 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12449
Linus Torvalds1da177e2005-04-16 15:20:36 -070012450 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12451 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12452 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12453 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12454
David S. Millerfac9b832005-05-18 22:46:34 -070012455 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12456 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12457 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12458 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12459 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12460 HOSTCC_MODE_CLRTICK_TXBD);
12461
12462 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12463 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12464 tp->misc_host_ctrl);
12465 }
12466
Matt Carlson3bda1252008-08-15 14:08:22 -070012467 /* Preserve the APE MAC_MODE bits */
12468 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12469 tp->mac_mode = tr32(MAC_MODE) |
12470 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12471 else
12472 tp->mac_mode = TG3_DEF_MAC_MODE;
12473
Linus Torvalds1da177e2005-04-16 15:20:36 -070012474 /* these are limited to 10/100 only */
12475 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12476 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12477 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12478 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12479 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12480 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12481 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12482 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12483 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
Michael Chan676917d2006-12-07 00:20:22 -080012484 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12485 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
Michael Chanb5d37722006-09-27 16:06:21 -070012486 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012487 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12488
12489 err = tg3_phy_probe(tp);
12490 if (err) {
12491 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12492 pci_name(tp->pdev), err);
12493 /* ... but do not return immediately ... */
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070012494 tg3_mdio_fini(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012495 }
12496
12497 tg3_read_partno(tp);
Michael Chanc4e65752006-03-20 22:29:32 -080012498 tg3_read_fw_ver(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012499
12500 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12501 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12502 } else {
12503 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12504 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12505 else
12506 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12507 }
12508
12509 /* 5700 {AX,BX} chips have a broken status block link
12510 * change bit implementation, so we must use the
12511 * status register in those cases.
12512 */
12513 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12514 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12515 else
12516 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12517
12518 /* The led_ctrl is set during tg3_phy_probe, here we might
12519 * have to force the link status polling mechanism based
12520 * upon subsystem IDs.
12521 */
12522 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
Michael Chan007a880d2007-05-31 14:49:51 -070012523 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070012524 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12525 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12526 TG3_FLAG_USE_LINKCHG_REG);
12527 }
12528
12529 /* For all SERDES we poll the MAC status register. */
12530 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12531 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12532 else
12533 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12534
Michael Chan5a6f3072006-03-20 22:28:05 -080012535 /* All chips before 5787 can get confused if TX buffers
Linus Torvalds1da177e2005-04-16 15:20:36 -070012536 * straddle the 4GB address boundary in some cases.
12537 */
Michael Chanaf36e6b2006-03-23 01:28:06 -080012538 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012539 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012540 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012541 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012542 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012543 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Michael Chan5a6f3072006-03-20 22:28:05 -080012544 tp->dev->hard_start_xmit = tg3_start_xmit;
12545 else
12546 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012547
12548 tp->rx_offset = 2;
12549 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12550 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12551 tp->rx_offset = 0;
12552
Michael Chanf92905d2006-06-29 20:14:29 -070012553 tp->rx_std_max_post = TG3_RX_RING_SIZE;
12554
12555 /* Increment the rx prod index on the rx std ring by at most
12556 * 8 for these chips to workaround hw errata.
12557 */
12558 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12559 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12560 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12561 tp->rx_std_max_post = 8;
12562
Matt Carlson8ed5d972007-05-07 00:25:49 -070012563 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12564 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12565 PCIE_PWR_MGMT_L1_THRESH_MSK;
12566
Linus Torvalds1da177e2005-04-16 15:20:36 -070012567 return err;
12568}
12569
David S. Miller49b6e95f2007-03-29 01:38:42 -070012570#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012571static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12572{
12573 struct net_device *dev = tp->dev;
12574 struct pci_dev *pdev = tp->pdev;
David S. Miller49b6e95f2007-03-29 01:38:42 -070012575 struct device_node *dp = pci_device_to_OF_node(pdev);
David S. Miller374d4ca2007-03-29 01:57:57 -070012576 const unsigned char *addr;
David S. Miller49b6e95f2007-03-29 01:38:42 -070012577 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012578
David S. Miller49b6e95f2007-03-29 01:38:42 -070012579 addr = of_get_property(dp, "local-mac-address", &len);
12580 if (addr && len == 6) {
12581 memcpy(dev->dev_addr, addr, 6);
12582 memcpy(dev->perm_addr, dev->dev_addr, 6);
12583 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012584 }
12585 return -ENODEV;
12586}
12587
12588static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12589{
12590 struct net_device *dev = tp->dev;
12591
12592 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
John W. Linville2ff43692005-09-12 14:44:20 -070012593 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012594 return 0;
12595}
12596#endif
12597
12598static int __devinit tg3_get_device_address(struct tg3 *tp)
12599{
12600 struct net_device *dev = tp->dev;
12601 u32 hi, lo, mac_offset;
Michael Chan008652b2006-03-27 23:14:53 -080012602 int addr_ok = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012603
David S. Miller49b6e95f2007-03-29 01:38:42 -070012604#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012605 if (!tg3_get_macaddr_sparc(tp))
12606 return 0;
12607#endif
12608
12609 mac_offset = 0x7c;
David S. Millerf49639e2006-06-09 11:58:36 -070012610 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
Michael Chana4e2b342005-10-26 15:46:52 -070012611 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012612 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12613 mac_offset = 0xcc;
12614 if (tg3_nvram_lock(tp))
12615 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12616 else
12617 tg3_nvram_unlock(tp);
12618 }
Michael Chanb5d37722006-09-27 16:06:21 -070012619 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12620 mac_offset = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012621
12622 /* First try to get it from MAC address mailbox. */
12623 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12624 if ((hi >> 16) == 0x484b) {
12625 dev->dev_addr[0] = (hi >> 8) & 0xff;
12626 dev->dev_addr[1] = (hi >> 0) & 0xff;
12627
12628 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12629 dev->dev_addr[2] = (lo >> 24) & 0xff;
12630 dev->dev_addr[3] = (lo >> 16) & 0xff;
12631 dev->dev_addr[4] = (lo >> 8) & 0xff;
12632 dev->dev_addr[5] = (lo >> 0) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012633
Michael Chan008652b2006-03-27 23:14:53 -080012634 /* Some old bootcode may report a 0 MAC address in SRAM */
12635 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12636 }
12637 if (!addr_ok) {
12638 /* Next, try NVRAM. */
David S. Millerf49639e2006-06-09 11:58:36 -070012639 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
Michael Chan008652b2006-03-27 23:14:53 -080012640 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12641 dev->dev_addr[0] = ((hi >> 16) & 0xff);
12642 dev->dev_addr[1] = ((hi >> 24) & 0xff);
12643 dev->dev_addr[2] = ((lo >> 0) & 0xff);
12644 dev->dev_addr[3] = ((lo >> 8) & 0xff);
12645 dev->dev_addr[4] = ((lo >> 16) & 0xff);
12646 dev->dev_addr[5] = ((lo >> 24) & 0xff);
12647 }
12648 /* Finally just fetch it out of the MAC control regs. */
12649 else {
12650 hi = tr32(MAC_ADDR_0_HIGH);
12651 lo = tr32(MAC_ADDR_0_LOW);
12652
12653 dev->dev_addr[5] = lo & 0xff;
12654 dev->dev_addr[4] = (lo >> 8) & 0xff;
12655 dev->dev_addr[3] = (lo >> 16) & 0xff;
12656 dev->dev_addr[2] = (lo >> 24) & 0xff;
12657 dev->dev_addr[1] = hi & 0xff;
12658 dev->dev_addr[0] = (hi >> 8) & 0xff;
12659 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012660 }
12661
12662 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
David S. Miller7582a332008-03-20 15:53:15 -070012663#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012664 if (!tg3_get_default_macaddr_sparc(tp))
12665 return 0;
12666#endif
12667 return -EINVAL;
12668 }
John W. Linville2ff43692005-09-12 14:44:20 -070012669 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012670 return 0;
12671}
12672
David S. Miller59e6b432005-05-18 22:50:10 -070012673#define BOUNDARY_SINGLE_CACHELINE 1
12674#define BOUNDARY_MULTI_CACHELINE 2
12675
12676static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12677{
12678 int cacheline_size;
12679 u8 byte;
12680 int goal;
12681
12682 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12683 if (byte == 0)
12684 cacheline_size = 1024;
12685 else
12686 cacheline_size = (int) byte * 4;
12687
12688 /* On 5703 and later chips, the boundary bits have no
12689 * effect.
12690 */
12691 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12692 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12693 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12694 goto out;
12695
12696#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12697 goal = BOUNDARY_MULTI_CACHELINE;
12698#else
12699#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12700 goal = BOUNDARY_SINGLE_CACHELINE;
12701#else
12702 goal = 0;
12703#endif
12704#endif
12705
12706 if (!goal)
12707 goto out;
12708
12709 /* PCI controllers on most RISC systems tend to disconnect
12710 * when a device tries to burst across a cache-line boundary.
12711 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12712 *
12713 * Unfortunately, for PCI-E there are only limited
12714 * write-side controls for this, and thus for reads
12715 * we will still get the disconnects. We'll also waste
12716 * these PCI cycles for both read and write for chips
12717 * other than 5700 and 5701 which do not implement the
12718 * boundary bits.
12719 */
12720 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12721 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12722 switch (cacheline_size) {
12723 case 16:
12724 case 32:
12725 case 64:
12726 case 128:
12727 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12728 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12729 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12730 } else {
12731 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12732 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12733 }
12734 break;
12735
12736 case 256:
12737 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12738 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12739 break;
12740
12741 default:
12742 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12743 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12744 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012745 }
David S. Miller59e6b432005-05-18 22:50:10 -070012746 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12747 switch (cacheline_size) {
12748 case 16:
12749 case 32:
12750 case 64:
12751 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12752 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12753 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12754 break;
12755 }
12756 /* fallthrough */
12757 case 128:
12758 default:
12759 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12760 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12761 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012762 }
David S. Miller59e6b432005-05-18 22:50:10 -070012763 } else {
12764 switch (cacheline_size) {
12765 case 16:
12766 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12767 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12768 DMA_RWCTRL_WRITE_BNDRY_16);
12769 break;
12770 }
12771 /* fallthrough */
12772 case 32:
12773 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12774 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12775 DMA_RWCTRL_WRITE_BNDRY_32);
12776 break;
12777 }
12778 /* fallthrough */
12779 case 64:
12780 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12781 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12782 DMA_RWCTRL_WRITE_BNDRY_64);
12783 break;
12784 }
12785 /* fallthrough */
12786 case 128:
12787 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12788 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12789 DMA_RWCTRL_WRITE_BNDRY_128);
12790 break;
12791 }
12792 /* fallthrough */
12793 case 256:
12794 val |= (DMA_RWCTRL_READ_BNDRY_256 |
12795 DMA_RWCTRL_WRITE_BNDRY_256);
12796 break;
12797 case 512:
12798 val |= (DMA_RWCTRL_READ_BNDRY_512 |
12799 DMA_RWCTRL_WRITE_BNDRY_512);
12800 break;
12801 case 1024:
12802 default:
12803 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12804 DMA_RWCTRL_WRITE_BNDRY_1024);
12805 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012806 }
David S. Miller59e6b432005-05-18 22:50:10 -070012807 }
12808
12809out:
12810 return val;
12811}
12812
Linus Torvalds1da177e2005-04-16 15:20:36 -070012813static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12814{
12815 struct tg3_internal_buffer_desc test_desc;
12816 u32 sram_dma_descs;
12817 int i, ret;
12818
12819 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12820
12821 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12822 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12823 tw32(RDMAC_STATUS, 0);
12824 tw32(WDMAC_STATUS, 0);
12825
12826 tw32(BUFMGR_MODE, 0);
12827 tw32(FTQ_RESET, 0);
12828
12829 test_desc.addr_hi = ((u64) buf_dma) >> 32;
12830 test_desc.addr_lo = buf_dma & 0xffffffff;
12831 test_desc.nic_mbuf = 0x00002100;
12832 test_desc.len = size;
12833
12834 /*
12835 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12836 * the *second* time the tg3 driver was getting loaded after an
12837 * initial scan.
12838 *
12839 * Broadcom tells me:
12840 * ...the DMA engine is connected to the GRC block and a DMA
12841 * reset may affect the GRC block in some unpredictable way...
12842 * The behavior of resets to individual blocks has not been tested.
12843 *
12844 * Broadcom noted the GRC reset will also reset all sub-components.
12845 */
12846 if (to_device) {
12847 test_desc.cqid_sqid = (13 << 8) | 2;
12848
12849 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12850 udelay(40);
12851 } else {
12852 test_desc.cqid_sqid = (16 << 8) | 7;
12853
12854 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12855 udelay(40);
12856 }
12857 test_desc.flags = 0x00000005;
12858
12859 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12860 u32 val;
12861
12862 val = *(((u32 *)&test_desc) + i);
12863 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12864 sram_dma_descs + (i * sizeof(u32)));
12865 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12866 }
12867 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12868
12869 if (to_device) {
12870 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12871 } else {
12872 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12873 }
12874
12875 ret = -ENODEV;
12876 for (i = 0; i < 40; i++) {
12877 u32 val;
12878
12879 if (to_device)
12880 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12881 else
12882 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12883 if ((val & 0xffff) == sram_dma_descs) {
12884 ret = 0;
12885 break;
12886 }
12887
12888 udelay(100);
12889 }
12890
12891 return ret;
12892}
12893
David S. Millerded73402005-05-23 13:59:47 -070012894#define TEST_BUFFER_SIZE 0x2000
Linus Torvalds1da177e2005-04-16 15:20:36 -070012895
12896static int __devinit tg3_test_dma(struct tg3 *tp)
12897{
12898 dma_addr_t buf_dma;
David S. Miller59e6b432005-05-18 22:50:10 -070012899 u32 *buf, saved_dma_rwctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012900 int ret;
12901
12902 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12903 if (!buf) {
12904 ret = -ENOMEM;
12905 goto out_nofree;
12906 }
12907
12908 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12909 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12910
David S. Miller59e6b432005-05-18 22:50:10 -070012911 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012912
12913 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12914 /* DMA read watermark not used on PCIE */
12915 tp->dma_rwctrl |= 0x00180000;
12916 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
Michael Chan85e94ce2005-04-21 17:05:28 -070012917 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12918 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012919 tp->dma_rwctrl |= 0x003f0000;
12920 else
12921 tp->dma_rwctrl |= 0x003f000f;
12922 } else {
12923 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12924 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12925 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
Michael Chan49afdeb2007-02-13 12:17:03 -080012926 u32 read_water = 0x7;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012927
Michael Chan4a29cc22006-03-19 13:21:12 -080012928 /* If the 5704 is behind the EPB bridge, we can
12929 * do the less restrictive ONE_DMA workaround for
12930 * better performance.
12931 */
12932 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12933 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12934 tp->dma_rwctrl |= 0x8000;
12935 else if (ccval == 0x6 || ccval == 0x7)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012936 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12937
Michael Chan49afdeb2007-02-13 12:17:03 -080012938 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12939 read_water = 4;
David S. Miller59e6b432005-05-18 22:50:10 -070012940 /* Set bit 23 to enable PCIX hw bug fix */
Michael Chan49afdeb2007-02-13 12:17:03 -080012941 tp->dma_rwctrl |=
12942 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12943 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12944 (1 << 23);
Michael Chan4cf78e42005-07-25 12:29:19 -070012945 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12946 /* 5780 always in PCIX mode */
12947 tp->dma_rwctrl |= 0x00144000;
Michael Chana4e2b342005-10-26 15:46:52 -070012948 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12949 /* 5714 always in PCIX mode */
12950 tp->dma_rwctrl |= 0x00148000;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012951 } else {
12952 tp->dma_rwctrl |= 0x001b000f;
12953 }
12954 }
12955
12956 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12957 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12958 tp->dma_rwctrl &= 0xfffffff0;
12959
12960 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12961 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12962 /* Remove this if it causes problems for some boards. */
12963 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12964
12965 /* On 5700/5701 chips, we need to set this bit.
12966 * Otherwise the chip will issue cacheline transactions
12967 * to streamable DMA memory with not all the byte
12968 * enables turned on. This is an error on several
12969 * RISC PCI controllers, in particular sparc64.
12970 *
12971 * On 5703/5704 chips, this bit has been reassigned
12972 * a different meaning. In particular, it is used
12973 * on those chips to enable a PCI-X workaround.
12974 */
12975 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12976 }
12977
12978 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12979
12980#if 0
12981 /* Unneeded, already done by tg3_get_invariants. */
12982 tg3_switch_clocks(tp);
12983#endif
12984
12985 ret = 0;
12986 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12987 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12988 goto out;
12989
David S. Miller59e6b432005-05-18 22:50:10 -070012990 /* It is best to perform DMA test with maximum write burst size
12991 * to expose the 5700/5701 write DMA bug.
12992 */
12993 saved_dma_rwctrl = tp->dma_rwctrl;
12994 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12995 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12996
Linus Torvalds1da177e2005-04-16 15:20:36 -070012997 while (1) {
12998 u32 *p = buf, i;
12999
13000 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
13001 p[i] = i;
13002
13003 /* Send the buffer to the chip. */
13004 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13005 if (ret) {
13006 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
13007 break;
13008 }
13009
13010#if 0
13011 /* validate data reached card RAM correctly. */
13012 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13013 u32 val;
13014 tg3_read_mem(tp, 0x2100 + (i*4), &val);
13015 if (le32_to_cpu(val) != p[i]) {
13016 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
13017 /* ret = -ENODEV here? */
13018 }
13019 p[i] = 0;
13020 }
13021#endif
13022 /* Now read it back. */
13023 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13024 if (ret) {
13025 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13026
13027 break;
13028 }
13029
13030 /* Verify it. */
13031 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13032 if (p[i] == i)
13033 continue;
13034
David S. Miller59e6b432005-05-18 22:50:10 -070013035 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13036 DMA_RWCTRL_WRITE_BNDRY_16) {
13037 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013038 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13039 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13040 break;
13041 } else {
13042 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13043 ret = -ENODEV;
13044 goto out;
13045 }
13046 }
13047
13048 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13049 /* Success. */
13050 ret = 0;
13051 break;
13052 }
13053 }
David S. Miller59e6b432005-05-18 22:50:10 -070013054 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13055 DMA_RWCTRL_WRITE_BNDRY_16) {
Michael Chan6d1cfba2005-06-08 14:13:14 -070013056 static struct pci_device_id dma_wait_state_chipsets[] = {
13057 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13058 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13059 { },
13060 };
13061
David S. Miller59e6b432005-05-18 22:50:10 -070013062 /* DMA test passed without adjusting DMA boundary,
Michael Chan6d1cfba2005-06-08 14:13:14 -070013063 * now look for chipsets that are known to expose the
13064 * DMA bug without failing the test.
David S. Miller59e6b432005-05-18 22:50:10 -070013065 */
Michael Chan6d1cfba2005-06-08 14:13:14 -070013066 if (pci_dev_present(dma_wait_state_chipsets)) {
13067 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13068 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13069 }
13070 else
13071 /* Safe to use the calculated DMA boundary. */
13072 tp->dma_rwctrl = saved_dma_rwctrl;
13073
David S. Miller59e6b432005-05-18 22:50:10 -070013074 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13075 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013076
13077out:
13078 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13079out_nofree:
13080 return ret;
13081}
13082
13083static void __devinit tg3_init_link_config(struct tg3 *tp)
13084{
13085 tp->link_config.advertising =
13086 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13087 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13088 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13089 ADVERTISED_Autoneg | ADVERTISED_MII);
13090 tp->link_config.speed = SPEED_INVALID;
13091 tp->link_config.duplex = DUPLEX_INVALID;
13092 tp->link_config.autoneg = AUTONEG_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013093 tp->link_config.active_speed = SPEED_INVALID;
13094 tp->link_config.active_duplex = DUPLEX_INVALID;
13095 tp->link_config.phy_is_low_power = 0;
13096 tp->link_config.orig_speed = SPEED_INVALID;
13097 tp->link_config.orig_duplex = DUPLEX_INVALID;
13098 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13099}
13100
13101static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13102{
Michael Chanfdfec172005-07-25 12:31:48 -070013103 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13104 tp->bufmgr_config.mbuf_read_dma_low_water =
13105 DEFAULT_MB_RDMA_LOW_WATER_5705;
13106 tp->bufmgr_config.mbuf_mac_rx_low_water =
13107 DEFAULT_MB_MACRX_LOW_WATER_5705;
13108 tp->bufmgr_config.mbuf_high_water =
13109 DEFAULT_MB_HIGH_WATER_5705;
Michael Chanb5d37722006-09-27 16:06:21 -070013110 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13111 tp->bufmgr_config.mbuf_mac_rx_low_water =
13112 DEFAULT_MB_MACRX_LOW_WATER_5906;
13113 tp->bufmgr_config.mbuf_high_water =
13114 DEFAULT_MB_HIGH_WATER_5906;
13115 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013116
Michael Chanfdfec172005-07-25 12:31:48 -070013117 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13118 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13119 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13120 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13121 tp->bufmgr_config.mbuf_high_water_jumbo =
13122 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13123 } else {
13124 tp->bufmgr_config.mbuf_read_dma_low_water =
13125 DEFAULT_MB_RDMA_LOW_WATER;
13126 tp->bufmgr_config.mbuf_mac_rx_low_water =
13127 DEFAULT_MB_MACRX_LOW_WATER;
13128 tp->bufmgr_config.mbuf_high_water =
13129 DEFAULT_MB_HIGH_WATER;
13130
13131 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13132 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13133 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13134 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13135 tp->bufmgr_config.mbuf_high_water_jumbo =
13136 DEFAULT_MB_HIGH_WATER_JUMBO;
13137 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013138
13139 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13140 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13141}
13142
13143static char * __devinit tg3_phy_string(struct tg3 *tp)
13144{
13145 switch (tp->phy_id & PHY_ID_MASK) {
13146 case PHY_ID_BCM5400: return "5400";
13147 case PHY_ID_BCM5401: return "5401";
13148 case PHY_ID_BCM5411: return "5411";
13149 case PHY_ID_BCM5701: return "5701";
13150 case PHY_ID_BCM5703: return "5703";
13151 case PHY_ID_BCM5704: return "5704";
13152 case PHY_ID_BCM5705: return "5705";
13153 case PHY_ID_BCM5750: return "5750";
Michael Chan85e94ce2005-04-21 17:05:28 -070013154 case PHY_ID_BCM5752: return "5752";
Michael Chana4e2b342005-10-26 15:46:52 -070013155 case PHY_ID_BCM5714: return "5714";
Michael Chan4cf78e42005-07-25 12:29:19 -070013156 case PHY_ID_BCM5780: return "5780";
Michael Chanaf36e6b2006-03-23 01:28:06 -080013157 case PHY_ID_BCM5755: return "5755";
Michael Chand9ab5ad2006-03-20 22:27:35 -080013158 case PHY_ID_BCM5787: return "5787";
Matt Carlsond30cdd22007-10-07 23:28:35 -070013159 case PHY_ID_BCM5784: return "5784";
Michael Chan126a3362006-09-27 16:03:07 -070013160 case PHY_ID_BCM5756: return "5722/5756";
Michael Chanb5d37722006-09-27 16:06:21 -070013161 case PHY_ID_BCM5906: return "5906";
Matt Carlson9936bcf2007-10-10 18:03:07 -070013162 case PHY_ID_BCM5761: return "5761";
Linus Torvalds1da177e2005-04-16 15:20:36 -070013163 case PHY_ID_BCM8002: return "8002/serdes";
13164 case 0: return "serdes";
13165 default: return "unknown";
Stephen Hemminger855e1112008-04-16 16:37:28 -070013166 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013167}
13168
Michael Chanf9804dd2005-09-27 12:13:10 -070013169static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13170{
13171 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13172 strcpy(str, "PCI Express");
13173 return str;
13174 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13175 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13176
13177 strcpy(str, "PCIX:");
13178
13179 if ((clock_ctrl == 7) ||
13180 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13181 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13182 strcat(str, "133MHz");
13183 else if (clock_ctrl == 0)
13184 strcat(str, "33MHz");
13185 else if (clock_ctrl == 2)
13186 strcat(str, "50MHz");
13187 else if (clock_ctrl == 4)
13188 strcat(str, "66MHz");
13189 else if (clock_ctrl == 6)
13190 strcat(str, "100MHz");
Michael Chanf9804dd2005-09-27 12:13:10 -070013191 } else {
13192 strcpy(str, "PCI:");
13193 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13194 strcat(str, "66MHz");
13195 else
13196 strcat(str, "33MHz");
13197 }
13198 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13199 strcat(str, ":32-bit");
13200 else
13201 strcat(str, ":64-bit");
13202 return str;
13203}
13204
Michael Chan8c2dc7e2005-12-19 16:26:02 -080013205static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013206{
13207 struct pci_dev *peer;
13208 unsigned int func, devnr = tp->pdev->devfn & ~7;
13209
13210 for (func = 0; func < 8; func++) {
13211 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13212 if (peer && peer != tp->pdev)
13213 break;
13214 pci_dev_put(peer);
13215 }
Michael Chan16fe9d72005-12-13 21:09:54 -080013216 /* 5704 can be configured in single-port mode, set peer to
13217 * tp->pdev in that case.
13218 */
13219 if (!peer) {
13220 peer = tp->pdev;
13221 return peer;
13222 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013223
13224 /*
13225 * We don't need to keep the refcount elevated; there's no way
13226 * to remove one half of this device without removing the other
13227 */
13228 pci_dev_put(peer);
13229
13230 return peer;
13231}
13232
David S. Miller15f98502005-05-18 22:49:26 -070013233static void __devinit tg3_init_coal(struct tg3 *tp)
13234{
13235 struct ethtool_coalesce *ec = &tp->coal;
13236
13237 memset(ec, 0, sizeof(*ec));
13238 ec->cmd = ETHTOOL_GCOALESCE;
13239 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13240 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13241 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13242 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13243 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13244 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13245 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13246 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13247 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13248
13249 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13250 HOSTCC_MODE_CLRTICK_TXBD)) {
13251 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13252 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13253 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13254 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13255 }
Michael Chand244c892005-07-05 14:42:33 -070013256
13257 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13258 ec->rx_coalesce_usecs_irq = 0;
13259 ec->tx_coalesce_usecs_irq = 0;
13260 ec->stats_block_coalesce_usecs = 0;
13261 }
David S. Miller15f98502005-05-18 22:49:26 -070013262}
13263
Linus Torvalds1da177e2005-04-16 15:20:36 -070013264static int __devinit tg3_init_one(struct pci_dev *pdev,
13265 const struct pci_device_id *ent)
13266{
13267 static int tg3_version_printed = 0;
Matt Carlson63532392008-11-03 16:49:57 -080013268 resource_size_t tg3reg_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013269 struct net_device *dev;
13270 struct tg3 *tp;
Joe Perchesd6645372007-12-20 04:06:59 -080013271 int err, pm_cap;
Michael Chanf9804dd2005-09-27 12:13:10 -070013272 char str[40];
Michael Chan72f2afb2006-03-06 19:28:35 -080013273 u64 dma_mask, persist_dma_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013274
13275 if (tg3_version_printed++ == 0)
13276 printk(KERN_INFO "%s", version);
13277
13278 err = pci_enable_device(pdev);
13279 if (err) {
13280 printk(KERN_ERR PFX "Cannot enable PCI device, "
13281 "aborting.\n");
13282 return err;
13283 }
13284
Matt Carlson63532392008-11-03 16:49:57 -080013285 if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013286 printk(KERN_ERR PFX "Cannot find proper PCI device "
13287 "base address, aborting.\n");
13288 err = -ENODEV;
13289 goto err_out_disable_pdev;
13290 }
13291
13292 err = pci_request_regions(pdev, DRV_MODULE_NAME);
13293 if (err) {
13294 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13295 "aborting.\n");
13296 goto err_out_disable_pdev;
13297 }
13298
13299 pci_set_master(pdev);
13300
13301 /* Find power-management capability. */
13302 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13303 if (pm_cap == 0) {
13304 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13305 "aborting.\n");
13306 err = -EIO;
13307 goto err_out_free_res;
13308 }
13309
Linus Torvalds1da177e2005-04-16 15:20:36 -070013310 dev = alloc_etherdev(sizeof(*tp));
13311 if (!dev) {
13312 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13313 err = -ENOMEM;
13314 goto err_out_free_res;
13315 }
13316
Linus Torvalds1da177e2005-04-16 15:20:36 -070013317 SET_NETDEV_DEV(dev, &pdev->dev);
13318
Linus Torvalds1da177e2005-04-16 15:20:36 -070013319#if TG3_VLAN_TAG_USED
13320 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13321 dev->vlan_rx_register = tg3_vlan_rx_register;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013322#endif
13323
13324 tp = netdev_priv(dev);
13325 tp->pdev = pdev;
13326 tp->dev = dev;
13327 tp->pm_cap = pm_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013328 tp->rx_mode = TG3_DEF_RX_MODE;
13329 tp->tx_mode = TG3_DEF_TX_MODE;
Matt Carlson8ef21422008-05-02 16:47:53 -070013330
Linus Torvalds1da177e2005-04-16 15:20:36 -070013331 if (tg3_debug > 0)
13332 tp->msg_enable = tg3_debug;
13333 else
13334 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13335
13336 /* The word/byte swap controls here control register access byte
13337 * swapping. DMA data byte swapping is controlled in the GRC_MODE
13338 * setting below.
13339 */
13340 tp->misc_host_ctrl =
13341 MISC_HOST_CTRL_MASK_PCI_INT |
13342 MISC_HOST_CTRL_WORD_SWAP |
13343 MISC_HOST_CTRL_INDIR_ACCESS |
13344 MISC_HOST_CTRL_PCISTATE_RW;
13345
13346 /* The NONFRM (non-frame) byte/word swap controls take effect
13347 * on descriptor entries, anything which isn't packet data.
13348 *
13349 * The StrongARM chips on the board (one for tx, one for rx)
13350 * are running in big-endian mode.
13351 */
13352 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13353 GRC_MODE_WSWAP_NONFRM_DATA);
13354#ifdef __BIG_ENDIAN
13355 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13356#endif
13357 spin_lock_init(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013358 spin_lock_init(&tp->indirect_lock);
David Howellsc4028952006-11-22 14:57:56 +000013359 INIT_WORK(&tp->reset_task, tg3_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013360
Matt Carlson63532392008-11-03 16:49:57 -080013361 dev->mem_start = pci_resource_start(pdev, BAR_0);
13362 tg3reg_len = pci_resource_len(pdev, BAR_0);
13363 dev->mem_end = dev->mem_start + tg3reg_len;
13364
13365 tp->regs = ioremap_nocache(dev->mem_start, tg3reg_len);
Andy Gospodarekab0049b2007-09-06 20:42:14 +010013366 if (!tp->regs) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013367 printk(KERN_ERR PFX "Cannot map device registers, "
13368 "aborting.\n");
13369 err = -ENOMEM;
13370 goto err_out_free_dev;
13371 }
13372
13373 tg3_init_link_config(tp);
13374
Linus Torvalds1da177e2005-04-16 15:20:36 -070013375 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13376 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13377 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13378
13379 dev->open = tg3_open;
13380 dev->stop = tg3_close;
13381 dev->get_stats = tg3_get_stats;
13382 dev->set_multicast_list = tg3_set_rx_mode;
13383 dev->set_mac_address = tg3_set_mac_addr;
13384 dev->do_ioctl = tg3_ioctl;
13385 dev->tx_timeout = tg3_tx_timeout;
Stephen Hemmingerbea33482007-10-03 16:41:36 -070013386 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013387 dev->ethtool_ops = &tg3_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013388 dev->watchdog_timeo = TG3_TX_TIMEOUT;
13389 dev->change_mtu = tg3_change_mtu;
13390 dev->irq = pdev->irq;
13391#ifdef CONFIG_NET_POLL_CONTROLLER
13392 dev->poll_controller = tg3_poll_controller;
13393#endif
13394
13395 err = tg3_get_invariants(tp);
13396 if (err) {
13397 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13398 "aborting.\n");
13399 goto err_out_iounmap;
13400 }
13401
Michael Chan4a29cc22006-03-19 13:21:12 -080013402 /* The EPB bridge inside 5714, 5715, and 5780 and any
13403 * device behind the EPB cannot support DMA addresses > 40-bit.
Michael Chan72f2afb2006-03-06 19:28:35 -080013404 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13405 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13406 * do DMA address check in tg3_start_xmit().
13407 */
Michael Chan4a29cc22006-03-19 13:21:12 -080013408 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13409 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13410 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
Michael Chan72f2afb2006-03-06 19:28:35 -080013411 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13412#ifdef CONFIG_HIGHMEM
13413 dma_mask = DMA_64BIT_MASK;
13414#endif
Michael Chan4a29cc22006-03-19 13:21:12 -080013415 } else
Michael Chan72f2afb2006-03-06 19:28:35 -080013416 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13417
13418 /* Configure DMA attributes. */
13419 if (dma_mask > DMA_32BIT_MASK) {
13420 err = pci_set_dma_mask(pdev, dma_mask);
13421 if (!err) {
13422 dev->features |= NETIF_F_HIGHDMA;
13423 err = pci_set_consistent_dma_mask(pdev,
13424 persist_dma_mask);
13425 if (err < 0) {
13426 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13427 "DMA for consistent allocations\n");
13428 goto err_out_iounmap;
13429 }
13430 }
13431 }
13432 if (err || dma_mask == DMA_32BIT_MASK) {
13433 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13434 if (err) {
13435 printk(KERN_ERR PFX "No usable DMA configuration, "
13436 "aborting.\n");
13437 goto err_out_iounmap;
13438 }
13439 }
13440
Michael Chanfdfec172005-07-25 12:31:48 -070013441 tg3_init_bufmgr_config(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013442
Linus Torvalds1da177e2005-04-16 15:20:36 -070013443 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13444 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13445 }
13446 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13447 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13448 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
Michael Chanc7835a72006-11-15 21:14:42 -080013449 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -070013450 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13451 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13452 } else {
Michael Chan7f62ad52007-02-20 23:25:40 -080013453 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013454 }
13455
Michael Chan4e3a7aa2006-03-20 17:47:44 -080013456 /* TSO is on by default on chips that support hardware TSO.
13457 * Firmware TSO on older chips gives lower performance, so it
13458 * is off by default, but can be enabled using ethtool.
13459 */
Michael Chanb0026622006-07-03 19:42:14 -070013460 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013461 dev->features |= NETIF_F_TSO;
Michael Chanb5d37722006-09-27 16:06:21 -070013462 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13463 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
Michael Chanb0026622006-07-03 19:42:14 -070013464 dev->features |= NETIF_F_TSO6;
Matt Carlson57e69832008-05-25 23:48:31 -070013465 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13466 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13467 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13468 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlson9936bcf2007-10-10 18:03:07 -070013469 dev->features |= NETIF_F_TSO_ECN;
Michael Chanb0026622006-07-03 19:42:14 -070013470 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013471
Linus Torvalds1da177e2005-04-16 15:20:36 -070013472
13473 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13474 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13475 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13476 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13477 tp->rx_pending = 63;
13478 }
13479
Linus Torvalds1da177e2005-04-16 15:20:36 -070013480 err = tg3_get_device_address(tp);
13481 if (err) {
13482 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13483 "aborting.\n");
13484 goto err_out_iounmap;
13485 }
13486
Matt Carlson0d3031d2007-10-10 18:02:43 -070013487 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
Matt Carlson63532392008-11-03 16:49:57 -080013488 if (!(pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM)) {
Matt Carlson0d3031d2007-10-10 18:02:43 -070013489 printk(KERN_ERR PFX "Cannot find proper PCI device "
13490 "base address for APE, aborting.\n");
13491 err = -ENODEV;
13492 goto err_out_iounmap;
13493 }
13494
Matt Carlson63532392008-11-03 16:49:57 -080013495 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
Al Viro79ea13c2008-01-24 02:06:46 -080013496 if (!tp->aperegs) {
Matt Carlson0d3031d2007-10-10 18:02:43 -070013497 printk(KERN_ERR PFX "Cannot map APE registers, "
13498 "aborting.\n");
13499 err = -ENOMEM;
13500 goto err_out_iounmap;
13501 }
13502
13503 tg3_ape_lock_init(tp);
13504 }
13505
Matt Carlsonc88864d2007-11-12 21:07:01 -080013506 /*
13507 * Reset chip in case UNDI or EFI driver did not shutdown
13508 * DMA self test will enable WDMAC and we'll see (spurious)
13509 * pending DMA on the PCI bus at that point.
13510 */
13511 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13512 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13513 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13514 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13515 }
13516
13517 err = tg3_test_dma(tp);
13518 if (err) {
13519 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13520 goto err_out_apeunmap;
13521 }
13522
13523 /* Tigon3 can do ipv4 only... and some chips have buggy
13524 * checksumming.
13525 */
13526 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13527 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13528 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13529 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13530 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070013531 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13532 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsonc88864d2007-11-12 21:07:01 -080013533 dev->features |= NETIF_F_IPV6_CSUM;
13534
13535 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13536 } else
13537 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13538
13539 /* flow control autonegotiation is default behavior */
13540 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
Matt Carlson8d018622007-12-20 20:05:44 -080013541 tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
Matt Carlsonc88864d2007-11-12 21:07:01 -080013542
13543 tg3_init_coal(tp);
13544
Michael Chanc49a1562006-12-17 17:07:29 -080013545 pci_set_drvdata(pdev, dev);
13546
Linus Torvalds1da177e2005-04-16 15:20:36 -070013547 err = register_netdev(dev);
13548 if (err) {
13549 printk(KERN_ERR PFX "Cannot register net device, "
13550 "aborting.\n");
Matt Carlson0d3031d2007-10-10 18:02:43 -070013551 goto err_out_apeunmap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013552 }
13553
Joe Perchesd6645372007-12-20 04:06:59 -080013554 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
Johannes Berge1749612008-10-27 15:59:26 -070013555 "(%s) %s Ethernet %pM\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070013556 dev->name,
13557 tp->board_part_number,
13558 tp->pci_chip_rev_id,
13559 tg3_phy_string(tp),
Michael Chanf9804dd2005-09-27 12:13:10 -070013560 tg3_bus_string(tp, str),
Michael Chancbb45d22006-12-07 00:24:09 -080013561 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13562 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
Joe Perchesd6645372007-12-20 04:06:59 -080013563 "10/100/1000Base-T")),
Johannes Berge1749612008-10-27 15:59:26 -070013564 dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013565
13566 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
Michael Chan1c46ae02007-03-24 20:54:37 -070013567 "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070013568 dev->name,
13569 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13570 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13571 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13572 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -070013573 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
13574 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
Michael Chan4a29cc22006-03-19 13:21:12 -080013575 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13576 dev->name, tp->dma_rwctrl,
13577 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13578 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
Linus Torvalds1da177e2005-04-16 15:20:36 -070013579
13580 return 0;
13581
Matt Carlson0d3031d2007-10-10 18:02:43 -070013582err_out_apeunmap:
13583 if (tp->aperegs) {
13584 iounmap(tp->aperegs);
13585 tp->aperegs = NULL;
13586 }
13587
Linus Torvalds1da177e2005-04-16 15:20:36 -070013588err_out_iounmap:
Michael Chan68929142005-08-09 20:17:14 -070013589 if (tp->regs) {
13590 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070013591 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070013592 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013593
13594err_out_free_dev:
13595 free_netdev(dev);
13596
13597err_out_free_res:
13598 pci_release_regions(pdev);
13599
13600err_out_disable_pdev:
13601 pci_disable_device(pdev);
13602 pci_set_drvdata(pdev, NULL);
13603 return err;
13604}
13605
13606static void __devexit tg3_remove_one(struct pci_dev *pdev)
13607{
13608 struct net_device *dev = pci_get_drvdata(pdev);
13609
13610 if (dev) {
13611 struct tg3 *tp = netdev_priv(dev);
13612
Michael Chan7faa0062006-02-02 17:29:28 -080013613 flush_scheduled_work();
Matt Carlson158d7ab2008-05-29 01:37:54 -070013614
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013615 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13616 tg3_phy_fini(tp);
Matt Carlson158d7ab2008-05-29 01:37:54 -070013617 tg3_mdio_fini(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013618 }
Matt Carlson158d7ab2008-05-29 01:37:54 -070013619
Linus Torvalds1da177e2005-04-16 15:20:36 -070013620 unregister_netdev(dev);
Matt Carlson0d3031d2007-10-10 18:02:43 -070013621 if (tp->aperegs) {
13622 iounmap(tp->aperegs);
13623 tp->aperegs = NULL;
13624 }
Michael Chan68929142005-08-09 20:17:14 -070013625 if (tp->regs) {
13626 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070013627 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070013628 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013629 free_netdev(dev);
13630 pci_release_regions(pdev);
13631 pci_disable_device(pdev);
13632 pci_set_drvdata(pdev, NULL);
13633 }
13634}
13635
13636static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13637{
13638 struct net_device *dev = pci_get_drvdata(pdev);
13639 struct tg3 *tp = netdev_priv(dev);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070013640 pci_power_t target_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013641 int err;
13642
Michael Chan3e0c95f2007-08-03 20:56:54 -070013643 /* PCI register 4 needs to be saved whether netif_running() or not.
13644 * MSI address and data need to be saved if using MSI and
13645 * netif_running().
13646 */
13647 pci_save_state(pdev);
13648
Linus Torvalds1da177e2005-04-16 15:20:36 -070013649 if (!netif_running(dev))
13650 return 0;
13651
Michael Chan7faa0062006-02-02 17:29:28 -080013652 flush_scheduled_work();
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013653 tg3_phy_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013654 tg3_netif_stop(tp);
13655
13656 del_timer_sync(&tp->timer);
13657
David S. Millerf47c11e2005-06-24 20:18:35 -070013658 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013659 tg3_disable_ints(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -070013660 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013661
13662 netif_device_detach(dev);
13663
David S. Millerf47c11e2005-06-24 20:18:35 -070013664 tg3_full_lock(tp, 0);
Michael Chan944d9802005-05-29 14:57:48 -070013665 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan6a9eba12005-12-13 21:08:58 -080013666 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
David S. Millerf47c11e2005-06-24 20:18:35 -070013667 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013668
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070013669 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13670
13671 err = tg3_set_power_state(tp, target_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013672 if (err) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013673 int err2;
13674
David S. Millerf47c11e2005-06-24 20:18:35 -070013675 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013676
Michael Chan6a9eba12005-12-13 21:08:58 -080013677 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013678 err2 = tg3_restart_hw(tp, 1);
13679 if (err2)
Michael Chanb9ec6c12006-07-25 16:37:27 -070013680 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013681
13682 tp->timer.expires = jiffies + tp->timer_offset;
13683 add_timer(&tp->timer);
13684
13685 netif_device_attach(dev);
13686 tg3_netif_start(tp);
13687
Michael Chanb9ec6c12006-07-25 16:37:27 -070013688out:
David S. Millerf47c11e2005-06-24 20:18:35 -070013689 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013690
13691 if (!err2)
13692 tg3_phy_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013693 }
13694
13695 return err;
13696}
13697
13698static int tg3_resume(struct pci_dev *pdev)
13699{
13700 struct net_device *dev = pci_get_drvdata(pdev);
13701 struct tg3 *tp = netdev_priv(dev);
13702 int err;
13703
Michael Chan3e0c95f2007-08-03 20:56:54 -070013704 pci_restore_state(tp->pdev);
13705
Linus Torvalds1da177e2005-04-16 15:20:36 -070013706 if (!netif_running(dev))
13707 return 0;
13708
Michael Chanbc1c7562006-03-20 17:48:03 -080013709 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013710 if (err)
13711 return err;
13712
13713 netif_device_attach(dev);
13714
David S. Millerf47c11e2005-06-24 20:18:35 -070013715 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013716
Michael Chan6a9eba12005-12-13 21:08:58 -080013717 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Michael Chanb9ec6c12006-07-25 16:37:27 -070013718 err = tg3_restart_hw(tp, 1);
13719 if (err)
13720 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013721
13722 tp->timer.expires = jiffies + tp->timer_offset;
13723 add_timer(&tp->timer);
13724
Linus Torvalds1da177e2005-04-16 15:20:36 -070013725 tg3_netif_start(tp);
13726
Michael Chanb9ec6c12006-07-25 16:37:27 -070013727out:
David S. Millerf47c11e2005-06-24 20:18:35 -070013728 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013729
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013730 if (!err)
13731 tg3_phy_start(tp);
13732
Michael Chanb9ec6c12006-07-25 16:37:27 -070013733 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013734}
13735
13736static struct pci_driver tg3_driver = {
13737 .name = DRV_MODULE_NAME,
13738 .id_table = tg3_pci_tbl,
13739 .probe = tg3_init_one,
13740 .remove = __devexit_p(tg3_remove_one),
13741 .suspend = tg3_suspend,
13742 .resume = tg3_resume
13743};
13744
13745static int __init tg3_init(void)
13746{
Jeff Garzik29917622006-08-19 17:48:59 -040013747 return pci_register_driver(&tg3_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013748}
13749
13750static void __exit tg3_cleanup(void)
13751{
13752 pci_unregister_driver(&tg3_driver);
13753}
13754
13755module_init(tg3_init);
13756module_exit(tg3_cleanup);