blob: e64721b185d1b0f18782854187bb5e44603a8c1b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
Michael Chan65610fb2007-02-13 12:18:46 -08007 * Copyright (C) 2005-2007 Broadcom Corporation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Firmware is:
Michael Chan49cabf42005-06-06 15:15:17 -070010 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/compiler.h>
24#include <linux/slab.h>
25#include <linux/delay.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020026#include <linux/in.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
Matt Carlson158d7ab2008-05-29 01:37:54 -070035#include <linux/phy.h>
Matt Carlsona9daf362008-05-25 23:49:44 -070036#include <linux/brcmphy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/if_vlan.h>
38#include <linux/ip.h>
39#include <linux/tcp.h>
40#include <linux/workqueue.h>
Michael Chan61487482005-09-05 17:53:19 -070041#include <linux/prefetch.h>
Tobias Klauserf9a5f7d2005-10-29 15:09:26 +020042#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44#include <net/checksum.h>
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -030045#include <net/ip.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47#include <asm/system.h>
48#include <asm/io.h>
49#include <asm/byteorder.h>
50#include <asm/uaccess.h>
51
David S. Miller49b6e95f2007-03-29 01:38:42 -070052#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <asm/idprom.h>
David S. Miller49b6e95f2007-03-29 01:38:42 -070054#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#endif
56
Matt Carlson63532392008-11-03 16:49:57 -080057#define BAR_0 0
58#define BAR_2 2
59
Linus Torvalds1da177e2005-04-16 15:20:36 -070060#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
61#define TG3_VLAN_TAG_USED 1
62#else
63#define TG3_VLAN_TAG_USED 0
64#endif
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#define TG3_TSO_SUPPORT 1
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
68#include "tg3.h"
69
70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": "
Matt Carlson23197912008-08-15 14:11:19 -070072#define DRV_MODULE_VERSION "3.94"
73#define DRV_MODULE_RELDATE "August 14, 2008"
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0
77#define TG3_DEF_TX_MODE 0
78#define TG3_DEF_MSG_ENABLE \
79 (NETIF_MSG_DRV | \
80 NETIF_MSG_PROBE | \
81 NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | \
83 NETIF_MSG_IFDOWN | \
84 NETIF_MSG_IFUP | \
85 NETIF_MSG_RX_ERR | \
86 NETIF_MSG_TX_ERR)
87
88/* length of time before we decide the hardware is borked,
89 * and dev->tx_timeout() should be called to fix the problem
90 */
91#define TG3_TX_TIMEOUT (5 * HZ)
92
93/* hardware minimum and maximum for a single frame's data payload */
94#define TG3_MIN_MTU 60
95#define TG3_MAX_MTU(tp) \
Michael Chan0f893dc2005-07-25 12:30:38 -070096 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
98/* These numbers seem to be hard coded in the NIC firmware somehow.
99 * You can't change the ring sizes, but you can change where you place
100 * them in the NIC onboard memory.
101 */
102#define TG3_RX_RING_SIZE 512
103#define TG3_DEF_RX_RING_PENDING 200
104#define TG3_RX_JUMBO_RING_SIZE 256
105#define TG3_DEF_RX_JUMBO_RING_PENDING 100
106
107/* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
112 */
113#define TG3_RX_RCB_RING_SIZE(tp) \
114 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
115
116#define TG3_TX_RING_SIZE 512
117#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
118
119#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120 TG3_RX_RING_SIZE)
121#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 TG3_RX_JUMBO_RING_SIZE)
123#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RCB_RING_SIZE(tp))
125#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
126 TG3_TX_RING_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128
129#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
130#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
131
132/* minimum number of free TX descriptors required to wake up TX process */
Ranjit Manomohan42952232006-10-18 20:54:26 -0700133#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
135/* number of ETHTOOL_GSTATS u64's */
136#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
137
Michael Chan4cafd3f2005-05-29 14:56:34 -0700138#define TG3_NUM_TEST 6
139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140static char version[] __devinitdata =
141 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
142
143MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
144MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
145MODULE_LICENSE("GPL");
146MODULE_VERSION(DRV_MODULE_VERSION);
147
148static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
149module_param(tg3_debug, int, 0);
150MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
151
152static struct pci_device_id tg3_pci_tbl[] = {
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
Michael Chan126a3362006-09-27 16:03:07 -0700177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
Michael Chan126a3362006-09-27 16:03:07 -0700192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
Michael Chan676917d2006-12-07 00:20:22 -0800196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
Michael Chanb5d37722006-09-27 16:06:21 -0700204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
Matt Carlsond30cdd22007-10-07 23:28:35 -0700206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
Matt Carlson6c7af272007-10-21 16:12:02 -0700208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
Matt Carlson9936bcf2007-10-10 18:03:07 -0700209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
Matt Carlsonc88e6682008-11-03 16:49:18 -0800211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
Matt Carlson57e69832008-05-25 23:48:31 -0700213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700214 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
215 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
216 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
217 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
218 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
219 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
220 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
221 {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222};
223
224MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
225
Andreas Mohr50da8592006-08-14 23:54:30 -0700226static const struct {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 const char string[ETH_GSTRING_LEN];
228} ethtool_stats_keys[TG3_NUM_STATS] = {
229 { "rx_octets" },
230 { "rx_fragments" },
231 { "rx_ucast_packets" },
232 { "rx_mcast_packets" },
233 { "rx_bcast_packets" },
234 { "rx_fcs_errors" },
235 { "rx_align_errors" },
236 { "rx_xon_pause_rcvd" },
237 { "rx_xoff_pause_rcvd" },
238 { "rx_mac_ctrl_rcvd" },
239 { "rx_xoff_entered" },
240 { "rx_frame_too_long_errors" },
241 { "rx_jabbers" },
242 { "rx_undersize_packets" },
243 { "rx_in_length_errors" },
244 { "rx_out_length_errors" },
245 { "rx_64_or_less_octet_packets" },
246 { "rx_65_to_127_octet_packets" },
247 { "rx_128_to_255_octet_packets" },
248 { "rx_256_to_511_octet_packets" },
249 { "rx_512_to_1023_octet_packets" },
250 { "rx_1024_to_1522_octet_packets" },
251 { "rx_1523_to_2047_octet_packets" },
252 { "rx_2048_to_4095_octet_packets" },
253 { "rx_4096_to_8191_octet_packets" },
254 { "rx_8192_to_9022_octet_packets" },
255
256 { "tx_octets" },
257 { "tx_collisions" },
258
259 { "tx_xon_sent" },
260 { "tx_xoff_sent" },
261 { "tx_flow_control" },
262 { "tx_mac_errors" },
263 { "tx_single_collisions" },
264 { "tx_mult_collisions" },
265 { "tx_deferred" },
266 { "tx_excessive_collisions" },
267 { "tx_late_collisions" },
268 { "tx_collide_2times" },
269 { "tx_collide_3times" },
270 { "tx_collide_4times" },
271 { "tx_collide_5times" },
272 { "tx_collide_6times" },
273 { "tx_collide_7times" },
274 { "tx_collide_8times" },
275 { "tx_collide_9times" },
276 { "tx_collide_10times" },
277 { "tx_collide_11times" },
278 { "tx_collide_12times" },
279 { "tx_collide_13times" },
280 { "tx_collide_14times" },
281 { "tx_collide_15times" },
282 { "tx_ucast_packets" },
283 { "tx_mcast_packets" },
284 { "tx_bcast_packets" },
285 { "tx_carrier_sense_errors" },
286 { "tx_discards" },
287 { "tx_errors" },
288
289 { "dma_writeq_full" },
290 { "dma_write_prioq_full" },
291 { "rxbds_empty" },
292 { "rx_discards" },
293 { "rx_errors" },
294 { "rx_threshold_hit" },
295
296 { "dma_readq_full" },
297 { "dma_read_prioq_full" },
298 { "tx_comp_queue_full" },
299
300 { "ring_set_send_prod_index" },
301 { "ring_status_update" },
302 { "nic_irqs" },
303 { "nic_avoided_irqs" },
304 { "nic_tx_threshold_hit" }
305};
306
Andreas Mohr50da8592006-08-14 23:54:30 -0700307static const struct {
Michael Chan4cafd3f2005-05-29 14:56:34 -0700308 const char string[ETH_GSTRING_LEN];
309} ethtool_test_keys[TG3_NUM_TEST] = {
310 { "nvram test (online) " },
311 { "link test (online) " },
312 { "register test (offline)" },
313 { "memory test (offline)" },
314 { "loopback test (offline)" },
315 { "interrupt test (offline)" },
316};
317
Michael Chanb401e9e2005-12-19 16:27:04 -0800318static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
319{
320 writel(val, tp->regs + off);
321}
322
323static u32 tg3_read32(struct tg3 *tp, u32 off)
324{
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400325 return (readl(tp->regs + off));
Michael Chanb401e9e2005-12-19 16:27:04 -0800326}
327
Matt Carlson0d3031d2007-10-10 18:02:43 -0700328static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
329{
330 writel(val, tp->aperegs + off);
331}
332
333static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
334{
335 return (readl(tp->aperegs + off));
336}
337
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
339{
Michael Chan68929142005-08-09 20:17:14 -0700340 unsigned long flags;
341
342 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700343 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
344 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
Michael Chan68929142005-08-09 20:17:14 -0700345 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700346}
347
348static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
349{
350 writel(val, tp->regs + off);
351 readl(tp->regs + off);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352}
353
Michael Chan68929142005-08-09 20:17:14 -0700354static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
355{
356 unsigned long flags;
357 u32 val;
358
359 spin_lock_irqsave(&tp->indirect_lock, flags);
360 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
361 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
362 spin_unlock_irqrestore(&tp->indirect_lock, flags);
363 return val;
364}
365
366static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
367{
368 unsigned long flags;
369
370 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
371 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
372 TG3_64BIT_REG_LOW, val);
373 return;
374 }
375 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
376 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
377 TG3_64BIT_REG_LOW, val);
378 return;
379 }
380
381 spin_lock_irqsave(&tp->indirect_lock, flags);
382 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
383 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
384 spin_unlock_irqrestore(&tp->indirect_lock, flags);
385
386 /* In indirect mode when disabling interrupts, we also need
387 * to clear the interrupt bit in the GRC local ctrl register.
388 */
389 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
390 (val == 0x1)) {
391 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
392 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
393 }
394}
395
396static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
397{
398 unsigned long flags;
399 u32 val;
400
401 spin_lock_irqsave(&tp->indirect_lock, flags);
402 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
403 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
404 spin_unlock_irqrestore(&tp->indirect_lock, flags);
405 return val;
406}
407
Michael Chanb401e9e2005-12-19 16:27:04 -0800408/* usec_wait specifies the wait time in usec when writing to certain registers
409 * where it is unsafe to read back the register without some delay.
410 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
411 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
412 */
413static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414{
Michael Chanb401e9e2005-12-19 16:27:04 -0800415 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
416 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
417 /* Non-posted methods */
418 tp->write32(tp, off, val);
419 else {
420 /* Posted method */
421 tg3_write32(tp, off, val);
422 if (usec_wait)
423 udelay(usec_wait);
424 tp->read32(tp, off);
425 }
426 /* Wait again after the read for the posted method to guarantee that
427 * the wait time is met.
428 */
429 if (usec_wait)
430 udelay(usec_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431}
432
Michael Chan09ee9292005-08-09 20:17:00 -0700433static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
434{
435 tp->write32_mbox(tp, off, val);
Michael Chan68929142005-08-09 20:17:14 -0700436 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
437 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
438 tp->read32_mbox(tp, off);
Michael Chan09ee9292005-08-09 20:17:00 -0700439}
440
Michael Chan20094932005-08-09 20:16:32 -0700441static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442{
443 void __iomem *mbox = tp->regs + off;
444 writel(val, mbox);
445 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
446 writel(val, mbox);
447 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
448 readl(mbox);
449}
450
Michael Chanb5d37722006-09-27 16:06:21 -0700451static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
452{
453 return (readl(tp->regs + off + GRCMBOX_BASE));
454}
455
456static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
457{
458 writel(val, tp->regs + off + GRCMBOX_BASE);
459}
460
Michael Chan20094932005-08-09 20:16:32 -0700461#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700462#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
Michael Chan20094932005-08-09 20:16:32 -0700463#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
464#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700465#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
Michael Chan20094932005-08-09 20:16:32 -0700466
467#define tw32(reg,val) tp->write32(tp, reg, val)
Michael Chanb401e9e2005-12-19 16:27:04 -0800468#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
469#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
Michael Chan20094932005-08-09 20:16:32 -0700470#define tr32(reg) tp->read32(tp, reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
472static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
473{
Michael Chan68929142005-08-09 20:17:14 -0700474 unsigned long flags;
475
Michael Chanb5d37722006-09-27 16:06:21 -0700476 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
477 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
478 return;
479
Michael Chan68929142005-08-09 20:17:14 -0700480 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700481 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
482 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
483 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484
Michael Chanbbadf502006-04-06 21:46:34 -0700485 /* Always leave this as zero. */
486 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
487 } else {
488 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
489 tw32_f(TG3PCI_MEM_WIN_DATA, val);
490
491 /* Always leave this as zero. */
492 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
493 }
Michael Chan68929142005-08-09 20:17:14 -0700494 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495}
496
497static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
498{
Michael Chan68929142005-08-09 20:17:14 -0700499 unsigned long flags;
500
Michael Chanb5d37722006-09-27 16:06:21 -0700501 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
502 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
503 *val = 0;
504 return;
505 }
506
Michael Chan68929142005-08-09 20:17:14 -0700507 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700508 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
509 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
510 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511
Michael Chanbbadf502006-04-06 21:46:34 -0700512 /* Always leave this as zero. */
513 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
514 } else {
515 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
516 *val = tr32(TG3PCI_MEM_WIN_DATA);
517
518 /* Always leave this as zero. */
519 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
520 }
Michael Chan68929142005-08-09 20:17:14 -0700521 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522}
523
Matt Carlson0d3031d2007-10-10 18:02:43 -0700524static void tg3_ape_lock_init(struct tg3 *tp)
525{
526 int i;
527
528 /* Make sure the driver hasn't any stale locks. */
529 for (i = 0; i < 8; i++)
530 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
531 APE_LOCK_GRANT_DRIVER);
532}
533
534static int tg3_ape_lock(struct tg3 *tp, int locknum)
535{
536 int i, off;
537 int ret = 0;
538 u32 status;
539
540 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
541 return 0;
542
543 switch (locknum) {
Matt Carlson77b483f2008-08-15 14:07:24 -0700544 case TG3_APE_LOCK_GRC:
Matt Carlson0d3031d2007-10-10 18:02:43 -0700545 case TG3_APE_LOCK_MEM:
546 break;
547 default:
548 return -EINVAL;
549 }
550
551 off = 4 * locknum;
552
553 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
554
555 /* Wait for up to 1 millisecond to acquire lock. */
556 for (i = 0; i < 100; i++) {
557 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
558 if (status == APE_LOCK_GRANT_DRIVER)
559 break;
560 udelay(10);
561 }
562
563 if (status != APE_LOCK_GRANT_DRIVER) {
564 /* Revoke the lock request. */
565 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
566 APE_LOCK_GRANT_DRIVER);
567
568 ret = -EBUSY;
569 }
570
571 return ret;
572}
573
574static void tg3_ape_unlock(struct tg3 *tp, int locknum)
575{
576 int off;
577
578 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
579 return;
580
581 switch (locknum) {
Matt Carlson77b483f2008-08-15 14:07:24 -0700582 case TG3_APE_LOCK_GRC:
Matt Carlson0d3031d2007-10-10 18:02:43 -0700583 case TG3_APE_LOCK_MEM:
584 break;
585 default:
586 return;
587 }
588
589 off = 4 * locknum;
590 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
591}
592
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593static void tg3_disable_ints(struct tg3 *tp)
594{
595 tw32(TG3PCI_MISC_HOST_CTRL,
596 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700597 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598}
599
600static inline void tg3_cond_int(struct tg3 *tp)
601{
Michael Chan38f38432005-09-05 17:53:32 -0700602 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
603 (tp->hw_status->status & SD_STATUS_UPDATED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
Michael Chanb5d37722006-09-27 16:06:21 -0700605 else
606 tw32(HOSTCC_MODE, tp->coalesce_mode |
607 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608}
609
610static void tg3_enable_ints(struct tg3 *tp)
611{
Michael Chanbbe832c2005-06-24 20:20:04 -0700612 tp->irq_sync = 0;
613 wmb();
614
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 tw32(TG3PCI_MISC_HOST_CTRL,
616 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700617 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
618 (tp->last_tag << 24));
Michael Chanfcfa0a32006-03-20 22:28:41 -0800619 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
620 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
621 (tp->last_tag << 24));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 tg3_cond_int(tp);
623}
624
Michael Chan04237dd2005-04-25 15:17:17 -0700625static inline unsigned int tg3_has_work(struct tg3 *tp)
626{
627 struct tg3_hw_status *sblk = tp->hw_status;
628 unsigned int work_exists = 0;
629
630 /* check for phy events */
631 if (!(tp->tg3_flags &
632 (TG3_FLAG_USE_LINKCHG_REG |
633 TG3_FLAG_POLL_SERDES))) {
634 if (sblk->status & SD_STATUS_LINK_CHG)
635 work_exists = 1;
636 }
637 /* check for RX/TX work to do */
638 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
639 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
640 work_exists = 1;
641
642 return work_exists;
643}
644
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645/* tg3_restart_ints
Michael Chan04237dd2005-04-25 15:17:17 -0700646 * similar to tg3_enable_ints, but it accurately determines whether there
647 * is new work pending and can return without flushing the PIO write
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400648 * which reenables interrupts
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 */
650static void tg3_restart_ints(struct tg3 *tp)
651{
David S. Millerfac9b832005-05-18 22:46:34 -0700652 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
653 tp->last_tag << 24);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 mmiowb();
655
David S. Millerfac9b832005-05-18 22:46:34 -0700656 /* When doing tagged status, this work check is unnecessary.
657 * The last_tag we write above tells the chip which piece of
658 * work we've completed.
659 */
660 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
661 tg3_has_work(tp))
Michael Chan04237dd2005-04-25 15:17:17 -0700662 tw32(HOSTCC_MODE, tp->coalesce_mode |
663 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664}
665
666static inline void tg3_netif_stop(struct tg3 *tp)
667{
Michael Chanbbe832c2005-06-24 20:20:04 -0700668 tp->dev->trans_start = jiffies; /* prevent tx timeout */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700669 napi_disable(&tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 netif_tx_disable(tp->dev);
671}
672
673static inline void tg3_netif_start(struct tg3 *tp)
674{
675 netif_wake_queue(tp->dev);
676 /* NOTE: unconditional netif_wake_queue is only appropriate
677 * so long as all callers are assured to have free tx slots
678 * (such as after tg3_init_hw)
679 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700680 napi_enable(&tp->napi);
David S. Millerf47c11e2005-06-24 20:18:35 -0700681 tp->hw_status->status |= SD_STATUS_UPDATED;
682 tg3_enable_ints(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683}
684
685static void tg3_switch_clocks(struct tg3 *tp)
686{
687 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
688 u32 orig_clock_ctrl;
689
Matt Carlson795d01c2007-10-07 23:28:17 -0700690 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
691 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan4cf78e42005-07-25 12:29:19 -0700692 return;
693
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 orig_clock_ctrl = clock_ctrl;
695 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
696 CLOCK_CTRL_CLKRUN_OENABLE |
697 0x1f);
698 tp->pci_clock_ctrl = clock_ctrl;
699
700 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
701 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800702 tw32_wait_f(TG3PCI_CLOCK_CTRL,
703 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 }
705 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800706 tw32_wait_f(TG3PCI_CLOCK_CTRL,
707 clock_ctrl |
708 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
709 40);
710 tw32_wait_f(TG3PCI_CLOCK_CTRL,
711 clock_ctrl | (CLOCK_CTRL_ALTCLK),
712 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 }
Michael Chanb401e9e2005-12-19 16:27:04 -0800714 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715}
716
717#define PHY_BUSY_LOOPS 5000
718
719static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
720{
721 u32 frame_val;
722 unsigned int loops;
723 int ret;
724
725 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
726 tw32_f(MAC_MI_MODE,
727 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
728 udelay(80);
729 }
730
731 *val = 0x0;
732
733 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
734 MI_COM_PHY_ADDR_MASK);
735 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
736 MI_COM_REG_ADDR_MASK);
737 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400738
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 tw32_f(MAC_MI_COM, frame_val);
740
741 loops = PHY_BUSY_LOOPS;
742 while (loops != 0) {
743 udelay(10);
744 frame_val = tr32(MAC_MI_COM);
745
746 if ((frame_val & MI_COM_BUSY) == 0) {
747 udelay(5);
748 frame_val = tr32(MAC_MI_COM);
749 break;
750 }
751 loops -= 1;
752 }
753
754 ret = -EBUSY;
755 if (loops != 0) {
756 *val = frame_val & MI_COM_DATA_MASK;
757 ret = 0;
758 }
759
760 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
761 tw32_f(MAC_MI_MODE, tp->mi_mode);
762 udelay(80);
763 }
764
765 return ret;
766}
767
768static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
769{
770 u32 frame_val;
771 unsigned int loops;
772 int ret;
773
Michael Chanb5d37722006-09-27 16:06:21 -0700774 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
775 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
776 return 0;
777
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
779 tw32_f(MAC_MI_MODE,
780 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
781 udelay(80);
782 }
783
784 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
785 MI_COM_PHY_ADDR_MASK);
786 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
787 MI_COM_REG_ADDR_MASK);
788 frame_val |= (val & MI_COM_DATA_MASK);
789 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400790
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 tw32_f(MAC_MI_COM, frame_val);
792
793 loops = PHY_BUSY_LOOPS;
794 while (loops != 0) {
795 udelay(10);
796 frame_val = tr32(MAC_MI_COM);
797 if ((frame_val & MI_COM_BUSY) == 0) {
798 udelay(5);
799 frame_val = tr32(MAC_MI_COM);
800 break;
801 }
802 loops -= 1;
803 }
804
805 ret = -EBUSY;
806 if (loops != 0)
807 ret = 0;
808
809 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
810 tw32_f(MAC_MI_MODE, tp->mi_mode);
811 udelay(80);
812 }
813
814 return ret;
815}
816
Matt Carlson95e28692008-05-25 23:44:14 -0700817static int tg3_bmcr_reset(struct tg3 *tp)
818{
819 u32 phy_control;
820 int limit, err;
821
822 /* OK, reset it, and poll the BMCR_RESET bit until it
823 * clears or we time out.
824 */
825 phy_control = BMCR_RESET;
826 err = tg3_writephy(tp, MII_BMCR, phy_control);
827 if (err != 0)
828 return -EBUSY;
829
830 limit = 5000;
831 while (limit--) {
832 err = tg3_readphy(tp, MII_BMCR, &phy_control);
833 if (err != 0)
834 return -EBUSY;
835
836 if ((phy_control & BMCR_RESET) == 0) {
837 udelay(40);
838 break;
839 }
840 udelay(10);
841 }
842 if (limit <= 0)
843 return -EBUSY;
844
845 return 0;
846}
847
Matt Carlson158d7ab2008-05-29 01:37:54 -0700848static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
849{
850 struct tg3 *tp = (struct tg3 *)bp->priv;
851 u32 val;
852
853 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
854 return -EAGAIN;
855
856 if (tg3_readphy(tp, reg, &val))
857 return -EIO;
858
859 return val;
860}
861
862static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
863{
864 struct tg3 *tp = (struct tg3 *)bp->priv;
865
866 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
867 return -EAGAIN;
868
869 if (tg3_writephy(tp, reg, val))
870 return -EIO;
871
872 return 0;
873}
874
875static int tg3_mdio_reset(struct mii_bus *bp)
876{
877 return 0;
878}
879
Matt Carlsona9daf362008-05-25 23:49:44 -0700880static void tg3_mdio_config(struct tg3 *tp)
881{
882 u32 val;
883
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700884 if (tp->mdio_bus->phy_map[PHY_ADDR]->interface !=
Matt Carlsona9daf362008-05-25 23:49:44 -0700885 PHY_INTERFACE_MODE_RGMII)
886 return;
887
888 val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
889 MAC_PHYCFG1_RGMII_SND_STAT_EN);
890 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
891 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
892 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
893 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
894 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
895 }
896 tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
897
898 val = tr32(MAC_PHYCFG2) & ~(MAC_PHYCFG2_INBAND_ENABLE);
899 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
900 val |= MAC_PHYCFG2_INBAND_ENABLE;
901 tw32(MAC_PHYCFG2, val);
902
903 val = tr32(MAC_EXT_RGMII_MODE);
904 val &= ~(MAC_RGMII_MODE_RX_INT_B |
905 MAC_RGMII_MODE_RX_QUALITY |
906 MAC_RGMII_MODE_RX_ACTIVITY |
907 MAC_RGMII_MODE_RX_ENG_DET |
908 MAC_RGMII_MODE_TX_ENABLE |
909 MAC_RGMII_MODE_TX_LOWPWR |
910 MAC_RGMII_MODE_TX_RESET);
911 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
912 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
913 val |= MAC_RGMII_MODE_RX_INT_B |
914 MAC_RGMII_MODE_RX_QUALITY |
915 MAC_RGMII_MODE_RX_ACTIVITY |
916 MAC_RGMII_MODE_RX_ENG_DET;
917 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
918 val |= MAC_RGMII_MODE_TX_ENABLE |
919 MAC_RGMII_MODE_TX_LOWPWR |
920 MAC_RGMII_MODE_TX_RESET;
921 }
922 tw32(MAC_EXT_RGMII_MODE, val);
923}
924
Matt Carlson158d7ab2008-05-29 01:37:54 -0700925static void tg3_mdio_start(struct tg3 *tp)
926{
927 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700928 mutex_lock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700929 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700930 mutex_unlock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700931 }
932
933 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
934 tw32_f(MAC_MI_MODE, tp->mi_mode);
935 udelay(80);
Matt Carlsona9daf362008-05-25 23:49:44 -0700936
937 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)
938 tg3_mdio_config(tp);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700939}
940
941static void tg3_mdio_stop(struct tg3 *tp)
942{
943 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700944 mutex_lock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700945 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700946 mutex_unlock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700947 }
948}
949
950static int tg3_mdio_init(struct tg3 *tp)
951{
952 int i;
953 u32 reg;
Matt Carlsona9daf362008-05-25 23:49:44 -0700954 struct phy_device *phydev;
Matt Carlson158d7ab2008-05-29 01:37:54 -0700955
956 tg3_mdio_start(tp);
957
958 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
959 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
960 return 0;
961
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700962 tp->mdio_bus = mdiobus_alloc();
963 if (tp->mdio_bus == NULL)
964 return -ENOMEM;
Matt Carlson158d7ab2008-05-29 01:37:54 -0700965
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700966 tp->mdio_bus->name = "tg3 mdio bus";
967 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
Matt Carlson158d7ab2008-05-29 01:37:54 -0700968 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700969 tp->mdio_bus->priv = tp;
970 tp->mdio_bus->parent = &tp->pdev->dev;
971 tp->mdio_bus->read = &tg3_mdio_read;
972 tp->mdio_bus->write = &tg3_mdio_write;
973 tp->mdio_bus->reset = &tg3_mdio_reset;
974 tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
975 tp->mdio_bus->irq = &tp->mdio_irq[0];
Matt Carlson158d7ab2008-05-29 01:37:54 -0700976
977 for (i = 0; i < PHY_MAX_ADDR; i++)
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700978 tp->mdio_bus->irq[i] = PHY_POLL;
Matt Carlson158d7ab2008-05-29 01:37:54 -0700979
980 /* The bus registration will look for all the PHYs on the mdio bus.
981 * Unfortunately, it does not ensure the PHY is powered up before
982 * accessing the PHY ID registers. A chip reset is the
983 * quickest way to bring the device back to an operational state..
984 */
985 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
986 tg3_bmcr_reset(tp);
987
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700988 i = mdiobus_register(tp->mdio_bus);
Matt Carlsona9daf362008-05-25 23:49:44 -0700989 if (i) {
Matt Carlson158d7ab2008-05-29 01:37:54 -0700990 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
991 tp->dev->name, i);
Matt Carlsona9daf362008-05-25 23:49:44 -0700992 return i;
993 }
Matt Carlson158d7ab2008-05-29 01:37:54 -0700994
Matt Carlsona9daf362008-05-25 23:49:44 -0700995 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
996
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700997 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsona9daf362008-05-25 23:49:44 -0700998
999 switch (phydev->phy_id) {
1000 case TG3_PHY_ID_BCM50610:
1001 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1002 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1003 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1004 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1005 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1006 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1007 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1008 break;
1009 case TG3_PHY_ID_BCMAC131:
1010 phydev->interface = PHY_INTERFACE_MODE_MII;
1011 break;
1012 }
1013
1014 tg3_mdio_config(tp);
1015
1016 return 0;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001017}
1018
1019static void tg3_mdio_fini(struct tg3 *tp)
1020{
1021 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1022 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001023 mdiobus_unregister(tp->mdio_bus);
1024 mdiobus_free(tp->mdio_bus);
Matt Carlson158d7ab2008-05-29 01:37:54 -07001025 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1026 }
1027}
1028
Matt Carlson95e28692008-05-25 23:44:14 -07001029/* tp->lock is held. */
Matt Carlson4ba526c2008-08-15 14:10:04 -07001030static inline void tg3_generate_fw_event(struct tg3 *tp)
1031{
1032 u32 val;
1033
1034 val = tr32(GRC_RX_CPU_EVENT);
1035 val |= GRC_RX_CPU_DRIVER_EVENT;
1036 tw32_f(GRC_RX_CPU_EVENT, val);
1037
1038 tp->last_event_jiffies = jiffies;
1039}
1040
1041#define TG3_FW_EVENT_TIMEOUT_USEC 2500
1042
1043/* tp->lock is held. */
Matt Carlson95e28692008-05-25 23:44:14 -07001044static void tg3_wait_for_event_ack(struct tg3 *tp)
1045{
1046 int i;
Matt Carlson4ba526c2008-08-15 14:10:04 -07001047 unsigned int delay_cnt;
1048 long time_remain;
Matt Carlson95e28692008-05-25 23:44:14 -07001049
Matt Carlson4ba526c2008-08-15 14:10:04 -07001050 /* If enough time has passed, no wait is necessary. */
1051 time_remain = (long)(tp->last_event_jiffies + 1 +
1052 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1053 (long)jiffies;
1054 if (time_remain < 0)
1055 return;
1056
1057 /* Check if we can shorten the wait time. */
1058 delay_cnt = jiffies_to_usecs(time_remain);
1059 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1060 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1061 delay_cnt = (delay_cnt >> 3) + 1;
1062
1063 for (i = 0; i < delay_cnt; i++) {
Matt Carlson95e28692008-05-25 23:44:14 -07001064 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1065 break;
Matt Carlson4ba526c2008-08-15 14:10:04 -07001066 udelay(8);
Matt Carlson95e28692008-05-25 23:44:14 -07001067 }
1068}
1069
1070/* tp->lock is held. */
1071static void tg3_ump_link_report(struct tg3 *tp)
1072{
1073 u32 reg;
1074 u32 val;
1075
1076 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1077 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1078 return;
1079
1080 tg3_wait_for_event_ack(tp);
1081
1082 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1083
1084 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1085
1086 val = 0;
1087 if (!tg3_readphy(tp, MII_BMCR, &reg))
1088 val = reg << 16;
1089 if (!tg3_readphy(tp, MII_BMSR, &reg))
1090 val |= (reg & 0xffff);
1091 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1092
1093 val = 0;
1094 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1095 val = reg << 16;
1096 if (!tg3_readphy(tp, MII_LPA, &reg))
1097 val |= (reg & 0xffff);
1098 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1099
1100 val = 0;
1101 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1102 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1103 val = reg << 16;
1104 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1105 val |= (reg & 0xffff);
1106 }
1107 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1108
1109 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1110 val = reg << 16;
1111 else
1112 val = 0;
1113 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1114
Matt Carlson4ba526c2008-08-15 14:10:04 -07001115 tg3_generate_fw_event(tp);
Matt Carlson95e28692008-05-25 23:44:14 -07001116}
1117
1118static void tg3_link_report(struct tg3 *tp)
1119{
1120 if (!netif_carrier_ok(tp->dev)) {
1121 if (netif_msg_link(tp))
1122 printk(KERN_INFO PFX "%s: Link is down.\n",
1123 tp->dev->name);
1124 tg3_ump_link_report(tp);
1125 } else if (netif_msg_link(tp)) {
1126 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1127 tp->dev->name,
1128 (tp->link_config.active_speed == SPEED_1000 ?
1129 1000 :
1130 (tp->link_config.active_speed == SPEED_100 ?
1131 100 : 10)),
1132 (tp->link_config.active_duplex == DUPLEX_FULL ?
1133 "full" : "half"));
1134
1135 printk(KERN_INFO PFX
1136 "%s: Flow control is %s for TX and %s for RX.\n",
1137 tp->dev->name,
1138 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1139 "on" : "off",
1140 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1141 "on" : "off");
1142 tg3_ump_link_report(tp);
1143 }
1144}
1145
1146static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1147{
1148 u16 miireg;
1149
1150 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1151 miireg = ADVERTISE_PAUSE_CAP;
1152 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1153 miireg = ADVERTISE_PAUSE_ASYM;
1154 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1155 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1156 else
1157 miireg = 0;
1158
1159 return miireg;
1160}
1161
1162static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1163{
1164 u16 miireg;
1165
1166 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1167 miireg = ADVERTISE_1000XPAUSE;
1168 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1169 miireg = ADVERTISE_1000XPSE_ASYM;
1170 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1171 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1172 else
1173 miireg = 0;
1174
1175 return miireg;
1176}
1177
1178static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1179{
1180 u8 cap = 0;
1181
1182 if (lcladv & ADVERTISE_PAUSE_CAP) {
1183 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1184 if (rmtadv & LPA_PAUSE_CAP)
1185 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1186 else if (rmtadv & LPA_PAUSE_ASYM)
1187 cap = TG3_FLOW_CTRL_RX;
1188 } else {
1189 if (rmtadv & LPA_PAUSE_CAP)
1190 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1191 }
1192 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1193 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1194 cap = TG3_FLOW_CTRL_TX;
1195 }
1196
1197 return cap;
1198}
1199
1200static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1201{
1202 u8 cap = 0;
1203
1204 if (lcladv & ADVERTISE_1000XPAUSE) {
1205 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1206 if (rmtadv & LPA_1000XPAUSE)
1207 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1208 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1209 cap = TG3_FLOW_CTRL_RX;
1210 } else {
1211 if (rmtadv & LPA_1000XPAUSE)
1212 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1213 }
1214 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1215 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1216 cap = TG3_FLOW_CTRL_TX;
1217 }
1218
1219 return cap;
1220}
1221
Matt Carlsonf51f3562008-05-25 23:45:08 -07001222static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
Matt Carlson95e28692008-05-25 23:44:14 -07001223{
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001224 u8 autoneg;
Matt Carlsonf51f3562008-05-25 23:45:08 -07001225 u8 flowctrl = 0;
Matt Carlson95e28692008-05-25 23:44:14 -07001226 u32 old_rx_mode = tp->rx_mode;
1227 u32 old_tx_mode = tp->tx_mode;
1228
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001229 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001230 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001231 else
1232 autoneg = tp->link_config.autoneg;
1233
1234 if (autoneg == AUTONEG_ENABLE &&
Matt Carlson95e28692008-05-25 23:44:14 -07001235 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1236 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Matt Carlsonf51f3562008-05-25 23:45:08 -07001237 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
Matt Carlson95e28692008-05-25 23:44:14 -07001238 else
Matt Carlsonf51f3562008-05-25 23:45:08 -07001239 flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1240 } else
1241 flowctrl = tp->link_config.flowctrl;
Matt Carlson95e28692008-05-25 23:44:14 -07001242
Matt Carlsonf51f3562008-05-25 23:45:08 -07001243 tp->link_config.active_flowctrl = flowctrl;
Matt Carlson95e28692008-05-25 23:44:14 -07001244
Matt Carlsonf51f3562008-05-25 23:45:08 -07001245 if (flowctrl & TG3_FLOW_CTRL_RX)
Matt Carlson95e28692008-05-25 23:44:14 -07001246 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1247 else
1248 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1249
Matt Carlsonf51f3562008-05-25 23:45:08 -07001250 if (old_rx_mode != tp->rx_mode)
Matt Carlson95e28692008-05-25 23:44:14 -07001251 tw32_f(MAC_RX_MODE, tp->rx_mode);
Matt Carlson95e28692008-05-25 23:44:14 -07001252
Matt Carlsonf51f3562008-05-25 23:45:08 -07001253 if (flowctrl & TG3_FLOW_CTRL_TX)
Matt Carlson95e28692008-05-25 23:44:14 -07001254 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1255 else
1256 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1257
Matt Carlsonf51f3562008-05-25 23:45:08 -07001258 if (old_tx_mode != tp->tx_mode)
Matt Carlson95e28692008-05-25 23:44:14 -07001259 tw32_f(MAC_TX_MODE, tp->tx_mode);
Matt Carlson95e28692008-05-25 23:44:14 -07001260}
1261
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001262static void tg3_adjust_link(struct net_device *dev)
1263{
1264 u8 oldflowctrl, linkmesg = 0;
1265 u32 mac_mode, lcl_adv, rmt_adv;
1266 struct tg3 *tp = netdev_priv(dev);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001267 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001268
1269 spin_lock(&tp->lock);
1270
1271 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1272 MAC_MODE_HALF_DUPLEX);
1273
1274 oldflowctrl = tp->link_config.active_flowctrl;
1275
1276 if (phydev->link) {
1277 lcl_adv = 0;
1278 rmt_adv = 0;
1279
1280 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1281 mac_mode |= MAC_MODE_PORT_MODE_MII;
1282 else
1283 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1284
1285 if (phydev->duplex == DUPLEX_HALF)
1286 mac_mode |= MAC_MODE_HALF_DUPLEX;
1287 else {
1288 lcl_adv = tg3_advert_flowctrl_1000T(
1289 tp->link_config.flowctrl);
1290
1291 if (phydev->pause)
1292 rmt_adv = LPA_PAUSE_CAP;
1293 if (phydev->asym_pause)
1294 rmt_adv |= LPA_PAUSE_ASYM;
1295 }
1296
1297 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1298 } else
1299 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1300
1301 if (mac_mode != tp->mac_mode) {
1302 tp->mac_mode = mac_mode;
1303 tw32_f(MAC_MODE, tp->mac_mode);
1304 udelay(40);
1305 }
1306
1307 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1308 tw32(MAC_TX_LENGTHS,
1309 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1310 (6 << TX_LENGTHS_IPG_SHIFT) |
1311 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1312 else
1313 tw32(MAC_TX_LENGTHS,
1314 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1315 (6 << TX_LENGTHS_IPG_SHIFT) |
1316 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1317
1318 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1319 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1320 phydev->speed != tp->link_config.active_speed ||
1321 phydev->duplex != tp->link_config.active_duplex ||
1322 oldflowctrl != tp->link_config.active_flowctrl)
1323 linkmesg = 1;
1324
1325 tp->link_config.active_speed = phydev->speed;
1326 tp->link_config.active_duplex = phydev->duplex;
1327
1328 spin_unlock(&tp->lock);
1329
1330 if (linkmesg)
1331 tg3_link_report(tp);
1332}
1333
1334static int tg3_phy_init(struct tg3 *tp)
1335{
1336 struct phy_device *phydev;
1337
1338 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1339 return 0;
1340
1341 /* Bring the PHY back to a known state. */
1342 tg3_bmcr_reset(tp);
1343
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001344 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001345
1346 /* Attach the MAC to the PHY. */
Matt Carlsona9daf362008-05-25 23:49:44 -07001347 phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link,
1348 phydev->dev_flags, phydev->interface);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001349 if (IS_ERR(phydev)) {
1350 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1351 return PTR_ERR(phydev);
1352 }
1353
1354 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1355
1356 /* Mask with MAC supported features. */
1357 phydev->supported &= (PHY_GBIT_FEATURES |
1358 SUPPORTED_Pause |
1359 SUPPORTED_Asym_Pause);
1360
1361 phydev->advertising = phydev->supported;
1362
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001363 return 0;
1364}
1365
1366static void tg3_phy_start(struct tg3 *tp)
1367{
1368 struct phy_device *phydev;
1369
1370 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1371 return;
1372
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001373 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001374
1375 if (tp->link_config.phy_is_low_power) {
1376 tp->link_config.phy_is_low_power = 0;
1377 phydev->speed = tp->link_config.orig_speed;
1378 phydev->duplex = tp->link_config.orig_duplex;
1379 phydev->autoneg = tp->link_config.orig_autoneg;
1380 phydev->advertising = tp->link_config.orig_advertising;
1381 }
1382
1383 phy_start(phydev);
1384
1385 phy_start_aneg(phydev);
1386}
1387
1388static void tg3_phy_stop(struct tg3 *tp)
1389{
1390 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1391 return;
1392
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001393 phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001394}
1395
1396static void tg3_phy_fini(struct tg3 *tp)
1397{
1398 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001399 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001400 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1401 }
1402}
1403
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001404static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1405{
1406 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1407 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1408}
1409
Matt Carlson9ef8ca92007-07-11 19:48:29 -07001410static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1411{
1412 u32 phy;
1413
1414 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1415 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1416 return;
1417
1418 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1419 u32 ephy;
1420
1421 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1422 tg3_writephy(tp, MII_TG3_EPHY_TEST,
1423 ephy | MII_TG3_EPHY_SHADOW_EN);
1424 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1425 if (enable)
1426 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1427 else
1428 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1429 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1430 }
1431 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1432 }
1433 } else {
1434 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1435 MII_TG3_AUXCTL_SHDWSEL_MISC;
1436 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1437 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1438 if (enable)
1439 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1440 else
1441 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1442 phy |= MII_TG3_AUXCTL_MISC_WREN;
1443 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1444 }
1445 }
1446}
1447
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448static void tg3_phy_set_wirespeed(struct tg3 *tp)
1449{
1450 u32 val;
1451
1452 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1453 return;
1454
1455 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1456 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1457 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1458 (val | (1 << 15) | (1 << 4)));
1459}
1460
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001461static void tg3_phy_apply_otp(struct tg3 *tp)
1462{
1463 u32 otp, phy;
1464
1465 if (!tp->phy_otp)
1466 return;
1467
1468 otp = tp->phy_otp;
1469
1470 /* Enable SM_DSP clock and tx 6dB coding. */
1471 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1472 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1473 MII_TG3_AUXCTL_ACTL_TX_6DB;
1474 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1475
1476 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1477 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1478 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1479
1480 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1481 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1482 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1483
1484 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1485 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1486 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1487
1488 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1489 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1490
1491 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1492 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1493
1494 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1495 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1496 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1497
1498 /* Turn off SM_DSP clock. */
1499 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1500 MII_TG3_AUXCTL_ACTL_TX_6DB;
1501 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1502}
1503
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504static int tg3_wait_macro_done(struct tg3 *tp)
1505{
1506 int limit = 100;
1507
1508 while (limit--) {
1509 u32 tmp32;
1510
1511 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1512 if ((tmp32 & 0x1000) == 0)
1513 break;
1514 }
1515 }
1516 if (limit <= 0)
1517 return -EBUSY;
1518
1519 return 0;
1520}
1521
1522static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1523{
1524 static const u32 test_pat[4][6] = {
1525 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1526 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1527 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1528 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1529 };
1530 int chan;
1531
1532 for (chan = 0; chan < 4; chan++) {
1533 int i;
1534
1535 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1536 (chan * 0x2000) | 0x0200);
1537 tg3_writephy(tp, 0x16, 0x0002);
1538
1539 for (i = 0; i < 6; i++)
1540 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1541 test_pat[chan][i]);
1542
1543 tg3_writephy(tp, 0x16, 0x0202);
1544 if (tg3_wait_macro_done(tp)) {
1545 *resetp = 1;
1546 return -EBUSY;
1547 }
1548
1549 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1550 (chan * 0x2000) | 0x0200);
1551 tg3_writephy(tp, 0x16, 0x0082);
1552 if (tg3_wait_macro_done(tp)) {
1553 *resetp = 1;
1554 return -EBUSY;
1555 }
1556
1557 tg3_writephy(tp, 0x16, 0x0802);
1558 if (tg3_wait_macro_done(tp)) {
1559 *resetp = 1;
1560 return -EBUSY;
1561 }
1562
1563 for (i = 0; i < 6; i += 2) {
1564 u32 low, high;
1565
1566 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1567 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1568 tg3_wait_macro_done(tp)) {
1569 *resetp = 1;
1570 return -EBUSY;
1571 }
1572 low &= 0x7fff;
1573 high &= 0x000f;
1574 if (low != test_pat[chan][i] ||
1575 high != test_pat[chan][i+1]) {
1576 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1577 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1578 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1579
1580 return -EBUSY;
1581 }
1582 }
1583 }
1584
1585 return 0;
1586}
1587
1588static int tg3_phy_reset_chanpat(struct tg3 *tp)
1589{
1590 int chan;
1591
1592 for (chan = 0; chan < 4; chan++) {
1593 int i;
1594
1595 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1596 (chan * 0x2000) | 0x0200);
1597 tg3_writephy(tp, 0x16, 0x0002);
1598 for (i = 0; i < 6; i++)
1599 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1600 tg3_writephy(tp, 0x16, 0x0202);
1601 if (tg3_wait_macro_done(tp))
1602 return -EBUSY;
1603 }
1604
1605 return 0;
1606}
1607
1608static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1609{
1610 u32 reg32, phy9_orig;
1611 int retries, do_phy_reset, err;
1612
1613 retries = 10;
1614 do_phy_reset = 1;
1615 do {
1616 if (do_phy_reset) {
1617 err = tg3_bmcr_reset(tp);
1618 if (err)
1619 return err;
1620 do_phy_reset = 0;
1621 }
1622
1623 /* Disable transmitter and interrupt. */
1624 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1625 continue;
1626
1627 reg32 |= 0x3000;
1628 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1629
1630 /* Set full-duplex, 1000 mbps. */
1631 tg3_writephy(tp, MII_BMCR,
1632 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1633
1634 /* Set to master mode. */
1635 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1636 continue;
1637
1638 tg3_writephy(tp, MII_TG3_CTRL,
1639 (MII_TG3_CTRL_AS_MASTER |
1640 MII_TG3_CTRL_ENABLE_AS_MASTER));
1641
1642 /* Enable SM_DSP_CLOCK and 6dB. */
1643 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1644
1645 /* Block the PHY control access. */
1646 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1647 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1648
1649 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1650 if (!err)
1651 break;
1652 } while (--retries);
1653
1654 err = tg3_phy_reset_chanpat(tp);
1655 if (err)
1656 return err;
1657
1658 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1659 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1660
1661 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1662 tg3_writephy(tp, 0x16, 0x0000);
1663
1664 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1665 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1666 /* Set Extended packet length bit for jumbo frames */
1667 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1668 }
1669 else {
1670 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1671 }
1672
1673 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1674
1675 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1676 reg32 &= ~0x3000;
1677 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1678 } else if (!err)
1679 err = -EBUSY;
1680
1681 return err;
1682}
1683
1684/* This will reset the tigon3 PHY if there is no valid
1685 * link unless the FORCE argument is non-zero.
1686 */
1687static int tg3_phy_reset(struct tg3 *tp)
1688{
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001689 u32 cpmuctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 u32 phy_status;
1691 int err;
1692
Michael Chan60189dd2006-12-17 17:08:07 -08001693 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1694 u32 val;
1695
1696 val = tr32(GRC_MISC_CFG);
1697 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1698 udelay(40);
1699 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1701 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1702 if (err != 0)
1703 return -EBUSY;
1704
Michael Chanc8e1e822006-04-29 18:55:17 -07001705 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1706 netif_carrier_off(tp->dev);
1707 tg3_link_report(tp);
1708 }
1709
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1711 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1712 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1713 err = tg3_phy_reset_5703_4_5(tp);
1714 if (err)
1715 return err;
1716 goto out;
1717 }
1718
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001719 cpmuctrl = 0;
1720 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1721 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1722 cpmuctrl = tr32(TG3_CPMU_CTRL);
1723 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1724 tw32(TG3_CPMU_CTRL,
1725 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1726 }
1727
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 err = tg3_bmcr_reset(tp);
1729 if (err)
1730 return err;
1731
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001732 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1733 u32 phy;
1734
1735 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1736 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1737
1738 tw32(TG3_CPMU_CTRL, cpmuctrl);
1739 }
1740
Matt Carlsonbcb37f62008-11-03 16:52:09 -08001741 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1742 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
Matt Carlsonce057f02007-11-12 21:08:03 -08001743 u32 val;
1744
1745 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1746 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1747 CPMU_LSPD_1000MB_MACCLK_12_5) {
1748 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1749 udelay(40);
1750 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1751 }
Matt Carlson662f38d2007-11-12 21:16:17 -08001752
1753 /* Disable GPHY autopowerdown. */
1754 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1755 MII_TG3_MISC_SHDW_WREN |
1756 MII_TG3_MISC_SHDW_APD_SEL |
1757 MII_TG3_MISC_SHDW_APD_WKTM_84MS);
Matt Carlsonce057f02007-11-12 21:08:03 -08001758 }
1759
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001760 tg3_phy_apply_otp(tp);
1761
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762out:
1763 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1764 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1765 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1766 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1767 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1768 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1769 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1770 }
1771 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1772 tg3_writephy(tp, 0x1c, 0x8d68);
1773 tg3_writephy(tp, 0x1c, 0x8d68);
1774 }
1775 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1776 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1777 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1778 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1779 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1780 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1781 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1782 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1783 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1784 }
Michael Chanc424cb22006-04-29 18:56:34 -07001785 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1786 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1787 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
Michael Chanc1d2a192007-01-08 19:57:20 -08001788 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1789 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1790 tg3_writephy(tp, MII_TG3_TEST1,
1791 MII_TG3_TEST1_TRIM_EN | 0x4);
1792 } else
1793 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
Michael Chanc424cb22006-04-29 18:56:34 -07001794 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1795 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 /* Set Extended packet length bit (bit 14) on all chips that */
1797 /* support jumbo frames */
1798 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1799 /* Cannot do read-modify-write on 5401 */
1800 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
Michael Chan0f893dc2005-07-25 12:30:38 -07001801 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 u32 phy_reg;
1803
1804 /* Set bit 14 with read-modify-write to preserve other bits */
1805 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1806 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1807 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1808 }
1809
1810 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1811 * jumbo frames transmission.
1812 */
Michael Chan0f893dc2005-07-25 12:30:38 -07001813 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 u32 phy_reg;
1815
1816 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1817 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1818 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1819 }
1820
Michael Chan715116a2006-09-27 16:09:25 -07001821 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan715116a2006-09-27 16:09:25 -07001822 /* adjust output voltage */
1823 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
Michael Chan715116a2006-09-27 16:09:25 -07001824 }
1825
Matt Carlson9ef8ca92007-07-11 19:48:29 -07001826 tg3_phy_toggle_automdix(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 tg3_phy_set_wirespeed(tp);
1828 return 0;
1829}
1830
1831static void tg3_frob_aux_power(struct tg3 *tp)
1832{
1833 struct tg3 *tp_peer = tp;
1834
Michael Chan9d26e212006-12-07 00:21:14 -08001835 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 return;
1837
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001838 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1839 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1840 struct net_device *dev_peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001842 dev_peer = pci_get_drvdata(tp->pdev_peer);
Michael Chanbc1c7562006-03-20 17:48:03 -08001843 /* remove_one() may have been run on the peer. */
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001844 if (!dev_peer)
Michael Chanbc1c7562006-03-20 17:48:03 -08001845 tp_peer = tp;
1846 else
1847 tp_peer = netdev_priv(dev_peer);
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001848 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849
1850 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
Michael Chan6921d202005-12-13 21:15:53 -08001851 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1852 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1853 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1855 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
Michael Chanb401e9e2005-12-19 16:27:04 -08001856 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1857 (GRC_LCLCTRL_GPIO_OE0 |
1858 GRC_LCLCTRL_GPIO_OE1 |
1859 GRC_LCLCTRL_GPIO_OE2 |
1860 GRC_LCLCTRL_GPIO_OUTPUT0 |
1861 GRC_LCLCTRL_GPIO_OUTPUT1),
1862 100);
Matt Carlson5f0c4a32008-06-09 15:41:12 -07001863 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1864 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1865 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1866 GRC_LCLCTRL_GPIO_OE1 |
1867 GRC_LCLCTRL_GPIO_OE2 |
1868 GRC_LCLCTRL_GPIO_OUTPUT0 |
1869 GRC_LCLCTRL_GPIO_OUTPUT1 |
1870 tp->grc_local_ctrl;
1871 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1872
1873 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1874 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1875
1876 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1877 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 } else {
1879 u32 no_gpio2;
Michael Chandc56b7d2005-12-19 16:26:28 -08001880 u32 grc_local_ctrl = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881
1882 if (tp_peer != tp &&
1883 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1884 return;
1885
Michael Chandc56b7d2005-12-19 16:26:28 -08001886 /* Workaround to prevent overdrawing Amps. */
1887 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1888 ASIC_REV_5714) {
1889 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chanb401e9e2005-12-19 16:27:04 -08001890 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1891 grc_local_ctrl, 100);
Michael Chandc56b7d2005-12-19 16:26:28 -08001892 }
1893
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 /* On 5753 and variants, GPIO2 cannot be used. */
1895 no_gpio2 = tp->nic_sram_data_cfg &
1896 NIC_SRAM_DATA_CFG_NO_GPIO2;
1897
Michael Chandc56b7d2005-12-19 16:26:28 -08001898 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 GRC_LCLCTRL_GPIO_OE1 |
1900 GRC_LCLCTRL_GPIO_OE2 |
1901 GRC_LCLCTRL_GPIO_OUTPUT1 |
1902 GRC_LCLCTRL_GPIO_OUTPUT2;
1903 if (no_gpio2) {
1904 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1905 GRC_LCLCTRL_GPIO_OUTPUT2);
1906 }
Michael Chanb401e9e2005-12-19 16:27:04 -08001907 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1908 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909
1910 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1911
Michael Chanb401e9e2005-12-19 16:27:04 -08001912 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1913 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914
1915 if (!no_gpio2) {
1916 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chanb401e9e2005-12-19 16:27:04 -08001917 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1918 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 }
1920 }
1921 } else {
1922 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1923 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1924 if (tp_peer != tp &&
1925 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1926 return;
1927
Michael Chanb401e9e2005-12-19 16:27:04 -08001928 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1929 (GRC_LCLCTRL_GPIO_OE1 |
1930 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931
Michael Chanb401e9e2005-12-19 16:27:04 -08001932 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1933 GRC_LCLCTRL_GPIO_OE1, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934
Michael Chanb401e9e2005-12-19 16:27:04 -08001935 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1936 (GRC_LCLCTRL_GPIO_OE1 |
1937 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 }
1939 }
1940}
1941
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07001942static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1943{
1944 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1945 return 1;
1946 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1947 if (speed != SPEED_10)
1948 return 1;
1949 } else if (speed == SPEED_10)
1950 return 1;
1951
1952 return 0;
1953}
1954
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955static int tg3_setup_phy(struct tg3 *, int);
1956
1957#define RESET_KIND_SHUTDOWN 0
1958#define RESET_KIND_INIT 1
1959#define RESET_KIND_SUSPEND 2
1960
1961static void tg3_write_sig_post_reset(struct tg3 *, int);
1962static int tg3_halt_cpu(struct tg3 *, u32);
Michael Chan6921d202005-12-13 21:15:53 -08001963static int tg3_nvram_lock(struct tg3 *);
1964static void tg3_nvram_unlock(struct tg3 *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965
Michael Chan15c3b692006-03-22 01:06:52 -08001966static void tg3_power_down_phy(struct tg3 *tp)
1967{
Matt Carlsonce057f02007-11-12 21:08:03 -08001968 u32 val;
1969
Michael Chan51297242007-02-13 12:17:57 -08001970 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1971 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1972 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1973 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1974
1975 sg_dig_ctrl |=
1976 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1977 tw32(SG_DIG_CTRL, sg_dig_ctrl);
1978 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1979 }
Michael Chan3f7045c2006-09-27 16:02:29 -07001980 return;
Michael Chan51297242007-02-13 12:17:57 -08001981 }
Michael Chan3f7045c2006-09-27 16:02:29 -07001982
Michael Chan60189dd2006-12-17 17:08:07 -08001983 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan60189dd2006-12-17 17:08:07 -08001984 tg3_bmcr_reset(tp);
1985 val = tr32(GRC_MISC_CFG);
1986 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1987 udelay(40);
1988 return;
Matt Carlsondd477002008-05-25 23:45:58 -07001989 } else if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
Michael Chan715116a2006-09-27 16:09:25 -07001990 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1991 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1992 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1993 }
Michael Chan3f7045c2006-09-27 16:02:29 -07001994
Michael Chan15c3b692006-03-22 01:06:52 -08001995 /* The PHY should not be powered down on some chips because
1996 * of bugs.
1997 */
1998 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1999 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2000 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2001 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2002 return;
Matt Carlsonce057f02007-11-12 21:08:03 -08002003
Matt Carlsonbcb37f62008-11-03 16:52:09 -08002004 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2005 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
Matt Carlsonce057f02007-11-12 21:08:03 -08002006 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2007 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2008 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2009 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2010 }
2011
Michael Chan15c3b692006-03-22 01:06:52 -08002012 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2013}
2014
Matt Carlson3f007892008-11-03 16:51:36 -08002015/* tp->lock is held. */
2016static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2017{
2018 u32 addr_high, addr_low;
2019 int i;
2020
2021 addr_high = ((tp->dev->dev_addr[0] << 8) |
2022 tp->dev->dev_addr[1]);
2023 addr_low = ((tp->dev->dev_addr[2] << 24) |
2024 (tp->dev->dev_addr[3] << 16) |
2025 (tp->dev->dev_addr[4] << 8) |
2026 (tp->dev->dev_addr[5] << 0));
2027 for (i = 0; i < 4; i++) {
2028 if (i == 1 && skip_mac_1)
2029 continue;
2030 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2031 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2032 }
2033
2034 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2035 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2036 for (i = 0; i < 12; i++) {
2037 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2038 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2039 }
2040 }
2041
2042 addr_high = (tp->dev->dev_addr[0] +
2043 tp->dev->dev_addr[1] +
2044 tp->dev->dev_addr[2] +
2045 tp->dev->dev_addr[3] +
2046 tp->dev->dev_addr[4] +
2047 tp->dev->dev_addr[5]) &
2048 TX_BACKOFF_SEED_MASK;
2049 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2050}
2051
Michael Chanbc1c7562006-03-20 17:48:03 -08002052static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053{
2054 u32 misc_host_ctrl;
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002055 bool device_should_wake;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056
2057 /* Make sure register accesses (indirect or otherwise)
2058 * will function correctly.
2059 */
2060 pci_write_config_dword(tp->pdev,
2061 TG3PCI_MISC_HOST_CTRL,
2062 tp->misc_host_ctrl);
2063
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 switch (state) {
Michael Chanbc1c7562006-03-20 17:48:03 -08002065 case PCI_D0:
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002066 pci_enable_wake(tp->pdev, state, false);
2067 pci_set_power_state(tp->pdev, PCI_D0);
Michael Chan8c6bda12005-04-21 17:09:08 -07002068
Michael Chan9d26e212006-12-07 00:21:14 -08002069 /* Switch out of Vaux if it is a NIC */
2070 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
Michael Chanb401e9e2005-12-19 16:27:04 -08002071 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072
2073 return 0;
2074
Michael Chanbc1c7562006-03-20 17:48:03 -08002075 case PCI_D1:
Michael Chanbc1c7562006-03-20 17:48:03 -08002076 case PCI_D2:
Michael Chanbc1c7562006-03-20 17:48:03 -08002077 case PCI_D3hot:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 break;
2079
2080 default:
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002081 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2082 tp->dev->name, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 return -EINVAL;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002084 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2086 tw32(TG3PCI_MISC_HOST_CTRL,
2087 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2088
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002089 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2090 device_may_wakeup(&tp->pdev->dev) &&
2091 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2092
Matt Carlsondd477002008-05-25 23:45:58 -07002093 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002094 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2095 !tp->link_config.phy_is_low_power) {
2096 struct phy_device *phydev;
2097 u32 advertising;
2098
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07002099 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002100
2101 tp->link_config.phy_is_low_power = 1;
2102
2103 tp->link_config.orig_speed = phydev->speed;
2104 tp->link_config.orig_duplex = phydev->duplex;
2105 tp->link_config.orig_autoneg = phydev->autoneg;
2106 tp->link_config.orig_advertising = phydev->advertising;
2107
2108 advertising = ADVERTISED_TP |
2109 ADVERTISED_Pause |
2110 ADVERTISED_Autoneg |
2111 ADVERTISED_10baseT_Half;
2112
2113 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002114 device_should_wake) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002115 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2116 advertising |=
2117 ADVERTISED_100baseT_Half |
2118 ADVERTISED_100baseT_Full |
2119 ADVERTISED_10baseT_Full;
2120 else
2121 advertising |= ADVERTISED_10baseT_Full;
2122 }
2123
2124 phydev->advertising = advertising;
2125
2126 phy_start_aneg(phydev);
2127 }
Matt Carlsondd477002008-05-25 23:45:58 -07002128 } else {
2129 if (tp->link_config.phy_is_low_power == 0) {
2130 tp->link_config.phy_is_low_power = 1;
2131 tp->link_config.orig_speed = tp->link_config.speed;
2132 tp->link_config.orig_duplex = tp->link_config.duplex;
2133 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2134 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135
Matt Carlsondd477002008-05-25 23:45:58 -07002136 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2137 tp->link_config.speed = SPEED_10;
2138 tp->link_config.duplex = DUPLEX_HALF;
2139 tp->link_config.autoneg = AUTONEG_ENABLE;
2140 tg3_setup_phy(tp, 0);
2141 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142 }
2143
Matt Carlson3f007892008-11-03 16:51:36 -08002144 __tg3_set_mac_addr(tp, 0);
2145
Michael Chanb5d37722006-09-27 16:06:21 -07002146 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2147 u32 val;
2148
2149 val = tr32(GRC_VCPU_EXT_CTRL);
2150 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2151 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chan6921d202005-12-13 21:15:53 -08002152 int i;
2153 u32 val;
2154
2155 for (i = 0; i < 200; i++) {
2156 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2157 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2158 break;
2159 msleep(1);
2160 }
2161 }
Gary Zambranoa85feb82007-05-05 11:52:19 -07002162 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2163 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2164 WOL_DRV_STATE_SHUTDOWN |
2165 WOL_DRV_WOL |
2166 WOL_SET_MAGIC_PKT);
Michael Chan6921d202005-12-13 21:15:53 -08002167
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002168 if (device_should_wake) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 u32 mac_mode;
2170
2171 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
Matt Carlsondd477002008-05-25 23:45:58 -07002172 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
2173 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2174 udelay(40);
2175 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176
Michael Chan3f7045c2006-09-27 16:02:29 -07002177 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2178 mac_mode = MAC_MODE_PORT_MODE_GMII;
2179 else
2180 mac_mode = MAC_MODE_PORT_MODE_MII;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002182 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2183 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2184 ASIC_REV_5700) {
2185 u32 speed = (tp->tg3_flags &
2186 TG3_FLAG_WOL_SPEED_100MB) ?
2187 SPEED_100 : SPEED_10;
2188 if (tg3_5700_link_polarity(tp, speed))
2189 mac_mode |= MAC_MODE_LINK_POLARITY;
2190 else
2191 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2192 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 } else {
2194 mac_mode = MAC_MODE_PORT_MODE_TBI;
2195 }
2196
John W. Linvillecbf46852005-04-21 17:01:29 -07002197 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 tw32(MAC_LED_CTRL, tp->led_ctrl);
2199
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002200 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2201 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2202 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2203 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2204 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2205 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206
Matt Carlson3bda1252008-08-15 14:08:22 -07002207 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2208 mac_mode |= tp->mac_mode &
2209 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2210 if (mac_mode & MAC_MODE_APE_TX_EN)
2211 mac_mode |= MAC_MODE_TDE_ENABLE;
2212 }
2213
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214 tw32_f(MAC_MODE, mac_mode);
2215 udelay(100);
2216
2217 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2218 udelay(10);
2219 }
2220
2221 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2222 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2223 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2224 u32 base_val;
2225
2226 base_val = tp->pci_clock_ctrl;
2227 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2228 CLOCK_CTRL_TXCLK_DISABLE);
2229
Michael Chanb401e9e2005-12-19 16:27:04 -08002230 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2231 CLOCK_CTRL_PWRDOWN_PLL133, 40);
Michael Chand7b0a852007-02-13 12:17:38 -08002232 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
Matt Carlson795d01c2007-10-07 23:28:17 -07002233 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
Michael Chand7b0a852007-02-13 12:17:38 -08002234 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
Michael Chan4cf78e42005-07-25 12:29:19 -07002235 /* do nothing */
Michael Chan85e94ce2005-04-21 17:05:28 -07002236 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2238 u32 newbits1, newbits2;
2239
2240 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2241 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2242 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2243 CLOCK_CTRL_TXCLK_DISABLE |
2244 CLOCK_CTRL_ALTCLK);
2245 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2246 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2247 newbits1 = CLOCK_CTRL_625_CORE;
2248 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2249 } else {
2250 newbits1 = CLOCK_CTRL_ALTCLK;
2251 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2252 }
2253
Michael Chanb401e9e2005-12-19 16:27:04 -08002254 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2255 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256
Michael Chanb401e9e2005-12-19 16:27:04 -08002257 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2258 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259
2260 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2261 u32 newbits3;
2262
2263 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2264 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2265 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2266 CLOCK_CTRL_TXCLK_DISABLE |
2267 CLOCK_CTRL_44MHZ_CORE);
2268 } else {
2269 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2270 }
2271
Michael Chanb401e9e2005-12-19 16:27:04 -08002272 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2273 tp->pci_clock_ctrl | newbits3, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 }
2275 }
2276
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002277 if (!(device_should_wake) &&
Matt Carlson0d3031d2007-10-10 18:02:43 -07002278 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
2279 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
Michael Chan3f7045c2006-09-27 16:02:29 -07002280 tg3_power_down_phy(tp);
Michael Chan6921d202005-12-13 21:15:53 -08002281
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 tg3_frob_aux_power(tp);
2283
2284 /* Workaround for unstable PLL clock */
2285 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2286 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2287 u32 val = tr32(0x7d00);
2288
2289 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2290 tw32(0x7d00, val);
Michael Chan6921d202005-12-13 21:15:53 -08002291 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chanec41c7d2006-01-17 02:40:55 -08002292 int err;
2293
2294 err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 tg3_halt_cpu(tp, RX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -08002296 if (!err)
2297 tg3_nvram_unlock(tp);
Michael Chan6921d202005-12-13 21:15:53 -08002298 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 }
2300
Michael Chanbbadf502006-04-06 21:46:34 -07002301 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2302
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002303 if (device_should_wake)
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002304 pci_enable_wake(tp->pdev, state, true);
2305
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306 /* Finally, set the new power state. */
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002307 pci_set_power_state(tp->pdev, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309 return 0;
2310}
2311
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2313{
2314 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2315 case MII_TG3_AUX_STAT_10HALF:
2316 *speed = SPEED_10;
2317 *duplex = DUPLEX_HALF;
2318 break;
2319
2320 case MII_TG3_AUX_STAT_10FULL:
2321 *speed = SPEED_10;
2322 *duplex = DUPLEX_FULL;
2323 break;
2324
2325 case MII_TG3_AUX_STAT_100HALF:
2326 *speed = SPEED_100;
2327 *duplex = DUPLEX_HALF;
2328 break;
2329
2330 case MII_TG3_AUX_STAT_100FULL:
2331 *speed = SPEED_100;
2332 *duplex = DUPLEX_FULL;
2333 break;
2334
2335 case MII_TG3_AUX_STAT_1000HALF:
2336 *speed = SPEED_1000;
2337 *duplex = DUPLEX_HALF;
2338 break;
2339
2340 case MII_TG3_AUX_STAT_1000FULL:
2341 *speed = SPEED_1000;
2342 *duplex = DUPLEX_FULL;
2343 break;
2344
2345 default:
Michael Chan715116a2006-09-27 16:09:25 -07002346 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2347 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2348 SPEED_10;
2349 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2350 DUPLEX_HALF;
2351 break;
2352 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353 *speed = SPEED_INVALID;
2354 *duplex = DUPLEX_INVALID;
2355 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002356 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357}
2358
2359static void tg3_phy_copper_begin(struct tg3 *tp)
2360{
2361 u32 new_adv;
2362 int i;
2363
2364 if (tp->link_config.phy_is_low_power) {
2365 /* Entering low power mode. Disable gigabit and
2366 * 100baseT advertisements.
2367 */
2368 tg3_writephy(tp, MII_TG3_CTRL, 0);
2369
2370 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2371 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2372 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2373 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2374
2375 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2376 } else if (tp->link_config.speed == SPEED_INVALID) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2378 tp->link_config.advertising &=
2379 ~(ADVERTISED_1000baseT_Half |
2380 ADVERTISED_1000baseT_Full);
2381
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002382 new_adv = ADVERTISE_CSMA;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2384 new_adv |= ADVERTISE_10HALF;
2385 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2386 new_adv |= ADVERTISE_10FULL;
2387 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2388 new_adv |= ADVERTISE_100HALF;
2389 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2390 new_adv |= ADVERTISE_100FULL;
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002391
2392 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2393
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2395
2396 if (tp->link_config.advertising &
2397 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2398 new_adv = 0;
2399 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2400 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2401 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2402 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2403 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2404 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2405 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2406 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2407 MII_TG3_CTRL_ENABLE_AS_MASTER);
2408 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2409 } else {
2410 tg3_writephy(tp, MII_TG3_CTRL, 0);
2411 }
2412 } else {
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002413 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2414 new_adv |= ADVERTISE_CSMA;
2415
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 /* Asking for a specific link mode. */
2417 if (tp->link_config.speed == SPEED_1000) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2419
2420 if (tp->link_config.duplex == DUPLEX_FULL)
2421 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2422 else
2423 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2424 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2425 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2426 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2427 MII_TG3_CTRL_ENABLE_AS_MASTER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 if (tp->link_config.speed == SPEED_100) {
2430 if (tp->link_config.duplex == DUPLEX_FULL)
2431 new_adv |= ADVERTISE_100FULL;
2432 else
2433 new_adv |= ADVERTISE_100HALF;
2434 } else {
2435 if (tp->link_config.duplex == DUPLEX_FULL)
2436 new_adv |= ADVERTISE_10FULL;
2437 else
2438 new_adv |= ADVERTISE_10HALF;
2439 }
2440 tg3_writephy(tp, MII_ADVERTISE, new_adv);
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002441
2442 new_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443 }
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002444
2445 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 }
2447
2448 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2449 tp->link_config.speed != SPEED_INVALID) {
2450 u32 bmcr, orig_bmcr;
2451
2452 tp->link_config.active_speed = tp->link_config.speed;
2453 tp->link_config.active_duplex = tp->link_config.duplex;
2454
2455 bmcr = 0;
2456 switch (tp->link_config.speed) {
2457 default:
2458 case SPEED_10:
2459 break;
2460
2461 case SPEED_100:
2462 bmcr |= BMCR_SPEED100;
2463 break;
2464
2465 case SPEED_1000:
2466 bmcr |= TG3_BMCR_SPEED1000;
2467 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002468 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469
2470 if (tp->link_config.duplex == DUPLEX_FULL)
2471 bmcr |= BMCR_FULLDPLX;
2472
2473 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2474 (bmcr != orig_bmcr)) {
2475 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2476 for (i = 0; i < 1500; i++) {
2477 u32 tmp;
2478
2479 udelay(10);
2480 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2481 tg3_readphy(tp, MII_BMSR, &tmp))
2482 continue;
2483 if (!(tmp & BMSR_LSTATUS)) {
2484 udelay(40);
2485 break;
2486 }
2487 }
2488 tg3_writephy(tp, MII_BMCR, bmcr);
2489 udelay(40);
2490 }
2491 } else {
2492 tg3_writephy(tp, MII_BMCR,
2493 BMCR_ANENABLE | BMCR_ANRESTART);
2494 }
2495}
2496
2497static int tg3_init_5401phy_dsp(struct tg3 *tp)
2498{
2499 int err;
2500
2501 /* Turn off tap power management. */
2502 /* Set Extended packet length bit */
2503 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2504
2505 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2506 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2507
2508 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2509 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2510
2511 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2512 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2513
2514 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2515 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2516
2517 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2518 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2519
2520 udelay(40);
2521
2522 return err;
2523}
2524
Michael Chan3600d912006-12-07 00:21:48 -08002525static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526{
Michael Chan3600d912006-12-07 00:21:48 -08002527 u32 adv_reg, all_mask = 0;
2528
2529 if (mask & ADVERTISED_10baseT_Half)
2530 all_mask |= ADVERTISE_10HALF;
2531 if (mask & ADVERTISED_10baseT_Full)
2532 all_mask |= ADVERTISE_10FULL;
2533 if (mask & ADVERTISED_100baseT_Half)
2534 all_mask |= ADVERTISE_100HALF;
2535 if (mask & ADVERTISED_100baseT_Full)
2536 all_mask |= ADVERTISE_100FULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537
2538 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2539 return 0;
2540
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541 if ((adv_reg & all_mask) != all_mask)
2542 return 0;
2543 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2544 u32 tg3_ctrl;
2545
Michael Chan3600d912006-12-07 00:21:48 -08002546 all_mask = 0;
2547 if (mask & ADVERTISED_1000baseT_Half)
2548 all_mask |= ADVERTISE_1000HALF;
2549 if (mask & ADVERTISED_1000baseT_Full)
2550 all_mask |= ADVERTISE_1000FULL;
2551
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2553 return 0;
2554
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555 if ((tg3_ctrl & all_mask) != all_mask)
2556 return 0;
2557 }
2558 return 1;
2559}
2560
Matt Carlsonef167e22007-12-20 20:10:01 -08002561static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2562{
2563 u32 curadv, reqadv;
2564
2565 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2566 return 1;
2567
2568 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2569 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2570
2571 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2572 if (curadv != reqadv)
2573 return 0;
2574
2575 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2576 tg3_readphy(tp, MII_LPA, rmtadv);
2577 } else {
2578 /* Reprogram the advertisement register, even if it
2579 * does not affect the current link. If the link
2580 * gets renegotiated in the future, we can save an
2581 * additional renegotiation cycle by advertising
2582 * it correctly in the first place.
2583 */
2584 if (curadv != reqadv) {
2585 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2586 ADVERTISE_PAUSE_ASYM);
2587 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2588 }
2589 }
2590
2591 return 1;
2592}
2593
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2595{
2596 int current_link_up;
2597 u32 bmsr, dummy;
Matt Carlsonef167e22007-12-20 20:10:01 -08002598 u32 lcl_adv, rmt_adv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599 u16 current_speed;
2600 u8 current_duplex;
2601 int i, err;
2602
2603 tw32(MAC_EVENT, 0);
2604
2605 tw32_f(MAC_STATUS,
2606 (MAC_STATUS_SYNC_CHANGED |
2607 MAC_STATUS_CFG_CHANGED |
2608 MAC_STATUS_MI_COMPLETION |
2609 MAC_STATUS_LNKSTATE_CHANGED));
2610 udelay(40);
2611
Matt Carlson8ef21422008-05-02 16:47:53 -07002612 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2613 tw32_f(MAC_MI_MODE,
2614 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2615 udelay(80);
2616 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617
2618 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2619
2620 /* Some third-party PHYs need to be reset on link going
2621 * down.
2622 */
2623 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2624 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2625 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2626 netif_carrier_ok(tp->dev)) {
2627 tg3_readphy(tp, MII_BMSR, &bmsr);
2628 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2629 !(bmsr & BMSR_LSTATUS))
2630 force_reset = 1;
2631 }
2632 if (force_reset)
2633 tg3_phy_reset(tp);
2634
2635 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2636 tg3_readphy(tp, MII_BMSR, &bmsr);
2637 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2638 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2639 bmsr = 0;
2640
2641 if (!(bmsr & BMSR_LSTATUS)) {
2642 err = tg3_init_5401phy_dsp(tp);
2643 if (err)
2644 return err;
2645
2646 tg3_readphy(tp, MII_BMSR, &bmsr);
2647 for (i = 0; i < 1000; i++) {
2648 udelay(10);
2649 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2650 (bmsr & BMSR_LSTATUS)) {
2651 udelay(40);
2652 break;
2653 }
2654 }
2655
2656 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2657 !(bmsr & BMSR_LSTATUS) &&
2658 tp->link_config.active_speed == SPEED_1000) {
2659 err = tg3_phy_reset(tp);
2660 if (!err)
2661 err = tg3_init_5401phy_dsp(tp);
2662 if (err)
2663 return err;
2664 }
2665 }
2666 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2667 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2668 /* 5701 {A0,B0} CRC bug workaround */
2669 tg3_writephy(tp, 0x15, 0x0a75);
2670 tg3_writephy(tp, 0x1c, 0x8c68);
2671 tg3_writephy(tp, 0x1c, 0x8d68);
2672 tg3_writephy(tp, 0x1c, 0x8c68);
2673 }
2674
2675 /* Clear pending interrupts... */
2676 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2677 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2678
2679 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2680 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
Michael Chan715116a2006-09-27 16:09:25 -07002681 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2683
2684 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2685 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2686 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2687 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2688 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2689 else
2690 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2691 }
2692
2693 current_link_up = 0;
2694 current_speed = SPEED_INVALID;
2695 current_duplex = DUPLEX_INVALID;
2696
2697 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2698 u32 val;
2699
2700 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2701 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2702 if (!(val & (1 << 10))) {
2703 val |= (1 << 10);
2704 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2705 goto relink;
2706 }
2707 }
2708
2709 bmsr = 0;
2710 for (i = 0; i < 100; i++) {
2711 tg3_readphy(tp, MII_BMSR, &bmsr);
2712 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2713 (bmsr & BMSR_LSTATUS))
2714 break;
2715 udelay(40);
2716 }
2717
2718 if (bmsr & BMSR_LSTATUS) {
2719 u32 aux_stat, bmcr;
2720
2721 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2722 for (i = 0; i < 2000; i++) {
2723 udelay(10);
2724 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2725 aux_stat)
2726 break;
2727 }
2728
2729 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2730 &current_speed,
2731 &current_duplex);
2732
2733 bmcr = 0;
2734 for (i = 0; i < 200; i++) {
2735 tg3_readphy(tp, MII_BMCR, &bmcr);
2736 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2737 continue;
2738 if (bmcr && bmcr != 0x7fff)
2739 break;
2740 udelay(10);
2741 }
2742
Matt Carlsonef167e22007-12-20 20:10:01 -08002743 lcl_adv = 0;
2744 rmt_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745
Matt Carlsonef167e22007-12-20 20:10:01 -08002746 tp->link_config.active_speed = current_speed;
2747 tp->link_config.active_duplex = current_duplex;
2748
2749 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2750 if ((bmcr & BMCR_ANENABLE) &&
2751 tg3_copper_is_advertising_all(tp,
2752 tp->link_config.advertising)) {
2753 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2754 &rmt_adv))
2755 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756 }
2757 } else {
2758 if (!(bmcr & BMCR_ANENABLE) &&
2759 tp->link_config.speed == current_speed &&
Matt Carlsonef167e22007-12-20 20:10:01 -08002760 tp->link_config.duplex == current_duplex &&
2761 tp->link_config.flowctrl ==
2762 tp->link_config.active_flowctrl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002763 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 }
2765 }
2766
Matt Carlsonef167e22007-12-20 20:10:01 -08002767 if (current_link_up == 1 &&
2768 tp->link_config.active_duplex == DUPLEX_FULL)
2769 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770 }
2771
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772relink:
Michael Chan6921d202005-12-13 21:15:53 -08002773 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774 u32 tmp;
2775
2776 tg3_phy_copper_begin(tp);
2777
2778 tg3_readphy(tp, MII_BMSR, &tmp);
2779 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2780 (tmp & BMSR_LSTATUS))
2781 current_link_up = 1;
2782 }
2783
2784 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2785 if (current_link_up == 1) {
2786 if (tp->link_config.active_speed == SPEED_100 ||
2787 tp->link_config.active_speed == SPEED_10)
2788 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2789 else
2790 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2791 } else
2792 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2793
2794 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2795 if (tp->link_config.active_duplex == DUPLEX_HALF)
2796 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2797
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002799 if (current_link_up == 1 &&
2800 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002802 else
2803 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804 }
2805
2806 /* ??? Without this setting Netgear GA302T PHY does not
2807 * ??? send/receive packets...
2808 */
2809 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2810 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2811 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2812 tw32_f(MAC_MI_MODE, tp->mi_mode);
2813 udelay(80);
2814 }
2815
2816 tw32_f(MAC_MODE, tp->mac_mode);
2817 udelay(40);
2818
2819 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2820 /* Polled via timer. */
2821 tw32_f(MAC_EVENT, 0);
2822 } else {
2823 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2824 }
2825 udelay(40);
2826
2827 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2828 current_link_up == 1 &&
2829 tp->link_config.active_speed == SPEED_1000 &&
2830 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2831 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2832 udelay(120);
2833 tw32_f(MAC_STATUS,
2834 (MAC_STATUS_SYNC_CHANGED |
2835 MAC_STATUS_CFG_CHANGED));
2836 udelay(40);
2837 tg3_write_mem(tp,
2838 NIC_SRAM_FIRMWARE_MBOX,
2839 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2840 }
2841
2842 if (current_link_up != netif_carrier_ok(tp->dev)) {
2843 if (current_link_up)
2844 netif_carrier_on(tp->dev);
2845 else
2846 netif_carrier_off(tp->dev);
2847 tg3_link_report(tp);
2848 }
2849
2850 return 0;
2851}
2852
2853struct tg3_fiber_aneginfo {
2854 int state;
2855#define ANEG_STATE_UNKNOWN 0
2856#define ANEG_STATE_AN_ENABLE 1
2857#define ANEG_STATE_RESTART_INIT 2
2858#define ANEG_STATE_RESTART 3
2859#define ANEG_STATE_DISABLE_LINK_OK 4
2860#define ANEG_STATE_ABILITY_DETECT_INIT 5
2861#define ANEG_STATE_ABILITY_DETECT 6
2862#define ANEG_STATE_ACK_DETECT_INIT 7
2863#define ANEG_STATE_ACK_DETECT 8
2864#define ANEG_STATE_COMPLETE_ACK_INIT 9
2865#define ANEG_STATE_COMPLETE_ACK 10
2866#define ANEG_STATE_IDLE_DETECT_INIT 11
2867#define ANEG_STATE_IDLE_DETECT 12
2868#define ANEG_STATE_LINK_OK 13
2869#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2870#define ANEG_STATE_NEXT_PAGE_WAIT 15
2871
2872 u32 flags;
2873#define MR_AN_ENABLE 0x00000001
2874#define MR_RESTART_AN 0x00000002
2875#define MR_AN_COMPLETE 0x00000004
2876#define MR_PAGE_RX 0x00000008
2877#define MR_NP_LOADED 0x00000010
2878#define MR_TOGGLE_TX 0x00000020
2879#define MR_LP_ADV_FULL_DUPLEX 0x00000040
2880#define MR_LP_ADV_HALF_DUPLEX 0x00000080
2881#define MR_LP_ADV_SYM_PAUSE 0x00000100
2882#define MR_LP_ADV_ASYM_PAUSE 0x00000200
2883#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2884#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2885#define MR_LP_ADV_NEXT_PAGE 0x00001000
2886#define MR_TOGGLE_RX 0x00002000
2887#define MR_NP_RX 0x00004000
2888
2889#define MR_LINK_OK 0x80000000
2890
2891 unsigned long link_time, cur_time;
2892
2893 u32 ability_match_cfg;
2894 int ability_match_count;
2895
2896 char ability_match, idle_match, ack_match;
2897
2898 u32 txconfig, rxconfig;
2899#define ANEG_CFG_NP 0x00000080
2900#define ANEG_CFG_ACK 0x00000040
2901#define ANEG_CFG_RF2 0x00000020
2902#define ANEG_CFG_RF1 0x00000010
2903#define ANEG_CFG_PS2 0x00000001
2904#define ANEG_CFG_PS1 0x00008000
2905#define ANEG_CFG_HD 0x00004000
2906#define ANEG_CFG_FD 0x00002000
2907#define ANEG_CFG_INVAL 0x00001f06
2908
2909};
2910#define ANEG_OK 0
2911#define ANEG_DONE 1
2912#define ANEG_TIMER_ENAB 2
2913#define ANEG_FAILED -1
2914
2915#define ANEG_STATE_SETTLE_TIME 10000
2916
2917static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2918 struct tg3_fiber_aneginfo *ap)
2919{
Matt Carlson5be73b42007-12-20 20:09:29 -08002920 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921 unsigned long delta;
2922 u32 rx_cfg_reg;
2923 int ret;
2924
2925 if (ap->state == ANEG_STATE_UNKNOWN) {
2926 ap->rxconfig = 0;
2927 ap->link_time = 0;
2928 ap->cur_time = 0;
2929 ap->ability_match_cfg = 0;
2930 ap->ability_match_count = 0;
2931 ap->ability_match = 0;
2932 ap->idle_match = 0;
2933 ap->ack_match = 0;
2934 }
2935 ap->cur_time++;
2936
2937 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2938 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2939
2940 if (rx_cfg_reg != ap->ability_match_cfg) {
2941 ap->ability_match_cfg = rx_cfg_reg;
2942 ap->ability_match = 0;
2943 ap->ability_match_count = 0;
2944 } else {
2945 if (++ap->ability_match_count > 1) {
2946 ap->ability_match = 1;
2947 ap->ability_match_cfg = rx_cfg_reg;
2948 }
2949 }
2950 if (rx_cfg_reg & ANEG_CFG_ACK)
2951 ap->ack_match = 1;
2952 else
2953 ap->ack_match = 0;
2954
2955 ap->idle_match = 0;
2956 } else {
2957 ap->idle_match = 1;
2958 ap->ability_match_cfg = 0;
2959 ap->ability_match_count = 0;
2960 ap->ability_match = 0;
2961 ap->ack_match = 0;
2962
2963 rx_cfg_reg = 0;
2964 }
2965
2966 ap->rxconfig = rx_cfg_reg;
2967 ret = ANEG_OK;
2968
2969 switch(ap->state) {
2970 case ANEG_STATE_UNKNOWN:
2971 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2972 ap->state = ANEG_STATE_AN_ENABLE;
2973
2974 /* fallthru */
2975 case ANEG_STATE_AN_ENABLE:
2976 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2977 if (ap->flags & MR_AN_ENABLE) {
2978 ap->link_time = 0;
2979 ap->cur_time = 0;
2980 ap->ability_match_cfg = 0;
2981 ap->ability_match_count = 0;
2982 ap->ability_match = 0;
2983 ap->idle_match = 0;
2984 ap->ack_match = 0;
2985
2986 ap->state = ANEG_STATE_RESTART_INIT;
2987 } else {
2988 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2989 }
2990 break;
2991
2992 case ANEG_STATE_RESTART_INIT:
2993 ap->link_time = ap->cur_time;
2994 ap->flags &= ~(MR_NP_LOADED);
2995 ap->txconfig = 0;
2996 tw32(MAC_TX_AUTO_NEG, 0);
2997 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2998 tw32_f(MAC_MODE, tp->mac_mode);
2999 udelay(40);
3000
3001 ret = ANEG_TIMER_ENAB;
3002 ap->state = ANEG_STATE_RESTART;
3003
3004 /* fallthru */
3005 case ANEG_STATE_RESTART:
3006 delta = ap->cur_time - ap->link_time;
3007 if (delta > ANEG_STATE_SETTLE_TIME) {
3008 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3009 } else {
3010 ret = ANEG_TIMER_ENAB;
3011 }
3012 break;
3013
3014 case ANEG_STATE_DISABLE_LINK_OK:
3015 ret = ANEG_DONE;
3016 break;
3017
3018 case ANEG_STATE_ABILITY_DETECT_INIT:
3019 ap->flags &= ~(MR_TOGGLE_TX);
Matt Carlson5be73b42007-12-20 20:09:29 -08003020 ap->txconfig = ANEG_CFG_FD;
3021 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3022 if (flowctrl & ADVERTISE_1000XPAUSE)
3023 ap->txconfig |= ANEG_CFG_PS1;
3024 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3025 ap->txconfig |= ANEG_CFG_PS2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3027 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3028 tw32_f(MAC_MODE, tp->mac_mode);
3029 udelay(40);
3030
3031 ap->state = ANEG_STATE_ABILITY_DETECT;
3032 break;
3033
3034 case ANEG_STATE_ABILITY_DETECT:
3035 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3036 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3037 }
3038 break;
3039
3040 case ANEG_STATE_ACK_DETECT_INIT:
3041 ap->txconfig |= ANEG_CFG_ACK;
3042 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3043 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3044 tw32_f(MAC_MODE, tp->mac_mode);
3045 udelay(40);
3046
3047 ap->state = ANEG_STATE_ACK_DETECT;
3048
3049 /* fallthru */
3050 case ANEG_STATE_ACK_DETECT:
3051 if (ap->ack_match != 0) {
3052 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3053 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3054 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3055 } else {
3056 ap->state = ANEG_STATE_AN_ENABLE;
3057 }
3058 } else if (ap->ability_match != 0 &&
3059 ap->rxconfig == 0) {
3060 ap->state = ANEG_STATE_AN_ENABLE;
3061 }
3062 break;
3063
3064 case ANEG_STATE_COMPLETE_ACK_INIT:
3065 if (ap->rxconfig & ANEG_CFG_INVAL) {
3066 ret = ANEG_FAILED;
3067 break;
3068 }
3069 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3070 MR_LP_ADV_HALF_DUPLEX |
3071 MR_LP_ADV_SYM_PAUSE |
3072 MR_LP_ADV_ASYM_PAUSE |
3073 MR_LP_ADV_REMOTE_FAULT1 |
3074 MR_LP_ADV_REMOTE_FAULT2 |
3075 MR_LP_ADV_NEXT_PAGE |
3076 MR_TOGGLE_RX |
3077 MR_NP_RX);
3078 if (ap->rxconfig & ANEG_CFG_FD)
3079 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3080 if (ap->rxconfig & ANEG_CFG_HD)
3081 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3082 if (ap->rxconfig & ANEG_CFG_PS1)
3083 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3084 if (ap->rxconfig & ANEG_CFG_PS2)
3085 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3086 if (ap->rxconfig & ANEG_CFG_RF1)
3087 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3088 if (ap->rxconfig & ANEG_CFG_RF2)
3089 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3090 if (ap->rxconfig & ANEG_CFG_NP)
3091 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3092
3093 ap->link_time = ap->cur_time;
3094
3095 ap->flags ^= (MR_TOGGLE_TX);
3096 if (ap->rxconfig & 0x0008)
3097 ap->flags |= MR_TOGGLE_RX;
3098 if (ap->rxconfig & ANEG_CFG_NP)
3099 ap->flags |= MR_NP_RX;
3100 ap->flags |= MR_PAGE_RX;
3101
3102 ap->state = ANEG_STATE_COMPLETE_ACK;
3103 ret = ANEG_TIMER_ENAB;
3104 break;
3105
3106 case ANEG_STATE_COMPLETE_ACK:
3107 if (ap->ability_match != 0 &&
3108 ap->rxconfig == 0) {
3109 ap->state = ANEG_STATE_AN_ENABLE;
3110 break;
3111 }
3112 delta = ap->cur_time - ap->link_time;
3113 if (delta > ANEG_STATE_SETTLE_TIME) {
3114 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3115 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3116 } else {
3117 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3118 !(ap->flags & MR_NP_RX)) {
3119 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3120 } else {
3121 ret = ANEG_FAILED;
3122 }
3123 }
3124 }
3125 break;
3126
3127 case ANEG_STATE_IDLE_DETECT_INIT:
3128 ap->link_time = ap->cur_time;
3129 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3130 tw32_f(MAC_MODE, tp->mac_mode);
3131 udelay(40);
3132
3133 ap->state = ANEG_STATE_IDLE_DETECT;
3134 ret = ANEG_TIMER_ENAB;
3135 break;
3136
3137 case ANEG_STATE_IDLE_DETECT:
3138 if (ap->ability_match != 0 &&
3139 ap->rxconfig == 0) {
3140 ap->state = ANEG_STATE_AN_ENABLE;
3141 break;
3142 }
3143 delta = ap->cur_time - ap->link_time;
3144 if (delta > ANEG_STATE_SETTLE_TIME) {
3145 /* XXX another gem from the Broadcom driver :( */
3146 ap->state = ANEG_STATE_LINK_OK;
3147 }
3148 break;
3149
3150 case ANEG_STATE_LINK_OK:
3151 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3152 ret = ANEG_DONE;
3153 break;
3154
3155 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3156 /* ??? unimplemented */
3157 break;
3158
3159 case ANEG_STATE_NEXT_PAGE_WAIT:
3160 /* ??? unimplemented */
3161 break;
3162
3163 default:
3164 ret = ANEG_FAILED;
3165 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07003166 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167
3168 return ret;
3169}
3170
Matt Carlson5be73b42007-12-20 20:09:29 -08003171static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003172{
3173 int res = 0;
3174 struct tg3_fiber_aneginfo aninfo;
3175 int status = ANEG_FAILED;
3176 unsigned int tick;
3177 u32 tmp;
3178
3179 tw32_f(MAC_TX_AUTO_NEG, 0);
3180
3181 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3182 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3183 udelay(40);
3184
3185 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3186 udelay(40);
3187
3188 memset(&aninfo, 0, sizeof(aninfo));
3189 aninfo.flags |= MR_AN_ENABLE;
3190 aninfo.state = ANEG_STATE_UNKNOWN;
3191 aninfo.cur_time = 0;
3192 tick = 0;
3193 while (++tick < 195000) {
3194 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3195 if (status == ANEG_DONE || status == ANEG_FAILED)
3196 break;
3197
3198 udelay(1);
3199 }
3200
3201 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3202 tw32_f(MAC_MODE, tp->mac_mode);
3203 udelay(40);
3204
Matt Carlson5be73b42007-12-20 20:09:29 -08003205 *txflags = aninfo.txconfig;
3206 *rxflags = aninfo.flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207
3208 if (status == ANEG_DONE &&
3209 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3210 MR_LP_ADV_FULL_DUPLEX)))
3211 res = 1;
3212
3213 return res;
3214}
3215
3216static void tg3_init_bcm8002(struct tg3 *tp)
3217{
3218 u32 mac_status = tr32(MAC_STATUS);
3219 int i;
3220
3221 /* Reset when initting first time or we have a link. */
3222 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3223 !(mac_status & MAC_STATUS_PCS_SYNCED))
3224 return;
3225
3226 /* Set PLL lock range. */
3227 tg3_writephy(tp, 0x16, 0x8007);
3228
3229 /* SW reset */
3230 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3231
3232 /* Wait for reset to complete. */
3233 /* XXX schedule_timeout() ... */
3234 for (i = 0; i < 500; i++)
3235 udelay(10);
3236
3237 /* Config mode; select PMA/Ch 1 regs. */
3238 tg3_writephy(tp, 0x10, 0x8411);
3239
3240 /* Enable auto-lock and comdet, select txclk for tx. */
3241 tg3_writephy(tp, 0x11, 0x0a10);
3242
3243 tg3_writephy(tp, 0x18, 0x00a0);
3244 tg3_writephy(tp, 0x16, 0x41ff);
3245
3246 /* Assert and deassert POR. */
3247 tg3_writephy(tp, 0x13, 0x0400);
3248 udelay(40);
3249 tg3_writephy(tp, 0x13, 0x0000);
3250
3251 tg3_writephy(tp, 0x11, 0x0a50);
3252 udelay(40);
3253 tg3_writephy(tp, 0x11, 0x0a10);
3254
3255 /* Wait for signal to stabilize */
3256 /* XXX schedule_timeout() ... */
3257 for (i = 0; i < 15000; i++)
3258 udelay(10);
3259
3260 /* Deselect the channel register so we can read the PHYID
3261 * later.
3262 */
3263 tg3_writephy(tp, 0x10, 0x8011);
3264}
3265
3266static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3267{
Matt Carlson82cd3d12007-12-20 20:09:00 -08003268 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003269 u32 sg_dig_ctrl, sg_dig_status;
3270 u32 serdes_cfg, expected_sg_dig_ctrl;
3271 int workaround, port_a;
3272 int current_link_up;
3273
3274 serdes_cfg = 0;
3275 expected_sg_dig_ctrl = 0;
3276 workaround = 0;
3277 port_a = 1;
3278 current_link_up = 0;
3279
3280 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3281 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3282 workaround = 1;
3283 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3284 port_a = 0;
3285
3286 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3287 /* preserve bits 20-23 for voltage regulator */
3288 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3289 }
3290
3291 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3292
3293 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003294 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295 if (workaround) {
3296 u32 val = serdes_cfg;
3297
3298 if (port_a)
3299 val |= 0xc010000;
3300 else
3301 val |= 0x4010000;
3302 tw32_f(MAC_SERDES_CFG, val);
3303 }
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003304
3305 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003306 }
3307 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3308 tg3_setup_flow_control(tp, 0, 0);
3309 current_link_up = 1;
3310 }
3311 goto out;
3312 }
3313
3314 /* Want auto-negotiation. */
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003315 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003316
Matt Carlson82cd3d12007-12-20 20:09:00 -08003317 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3318 if (flowctrl & ADVERTISE_1000XPAUSE)
3319 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3320 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3321 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003322
3323 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003324 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3325 tp->serdes_counter &&
3326 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3327 MAC_STATUS_RCVD_CFG)) ==
3328 MAC_STATUS_PCS_SYNCED)) {
3329 tp->serdes_counter--;
3330 current_link_up = 1;
3331 goto out;
3332 }
3333restart_autoneg:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334 if (workaround)
3335 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003336 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337 udelay(5);
3338 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3339
Michael Chan3d3ebe72006-09-27 15:59:15 -07003340 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3341 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003342 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3343 MAC_STATUS_SIGNAL_DET)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003344 sg_dig_status = tr32(SG_DIG_STATUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003345 mac_status = tr32(MAC_STATUS);
3346
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003347 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348 (mac_status & MAC_STATUS_PCS_SYNCED)) {
Matt Carlson82cd3d12007-12-20 20:09:00 -08003349 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003350
Matt Carlson82cd3d12007-12-20 20:09:00 -08003351 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3352 local_adv |= ADVERTISE_1000XPAUSE;
3353 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3354 local_adv |= ADVERTISE_1000XPSE_ASYM;
3355
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003356 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08003357 remote_adv |= LPA_1000XPAUSE;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003358 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08003359 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003360
3361 tg3_setup_flow_control(tp, local_adv, remote_adv);
3362 current_link_up = 1;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003363 tp->serdes_counter = 0;
3364 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003365 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003366 if (tp->serdes_counter)
3367 tp->serdes_counter--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368 else {
3369 if (workaround) {
3370 u32 val = serdes_cfg;
3371
3372 if (port_a)
3373 val |= 0xc010000;
3374 else
3375 val |= 0x4010000;
3376
3377 tw32_f(MAC_SERDES_CFG, val);
3378 }
3379
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003380 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003381 udelay(40);
3382
3383 /* Link parallel detection - link is up */
3384 /* only if we have PCS_SYNC and not */
3385 /* receiving config code words */
3386 mac_status = tr32(MAC_STATUS);
3387 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3388 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3389 tg3_setup_flow_control(tp, 0, 0);
3390 current_link_up = 1;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003391 tp->tg3_flags2 |=
3392 TG3_FLG2_PARALLEL_DETECT;
3393 tp->serdes_counter =
3394 SERDES_PARALLEL_DET_TIMEOUT;
3395 } else
3396 goto restart_autoneg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003397 }
3398 }
Michael Chan3d3ebe72006-09-27 15:59:15 -07003399 } else {
3400 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3401 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402 }
3403
3404out:
3405 return current_link_up;
3406}
3407
3408static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3409{
3410 int current_link_up = 0;
3411
Michael Chan5cf64b82007-05-05 12:11:21 -07003412 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003413 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003414
3415 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
Matt Carlson5be73b42007-12-20 20:09:29 -08003416 u32 txflags, rxflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003418
Matt Carlson5be73b42007-12-20 20:09:29 -08003419 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3420 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421
Matt Carlson5be73b42007-12-20 20:09:29 -08003422 if (txflags & ANEG_CFG_PS1)
3423 local_adv |= ADVERTISE_1000XPAUSE;
3424 if (txflags & ANEG_CFG_PS2)
3425 local_adv |= ADVERTISE_1000XPSE_ASYM;
3426
3427 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3428 remote_adv |= LPA_1000XPAUSE;
3429 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3430 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431
3432 tg3_setup_flow_control(tp, local_adv, remote_adv);
3433
Linus Torvalds1da177e2005-04-16 15:20:36 -07003434 current_link_up = 1;
3435 }
3436 for (i = 0; i < 30; i++) {
3437 udelay(20);
3438 tw32_f(MAC_STATUS,
3439 (MAC_STATUS_SYNC_CHANGED |
3440 MAC_STATUS_CFG_CHANGED));
3441 udelay(40);
3442 if ((tr32(MAC_STATUS) &
3443 (MAC_STATUS_SYNC_CHANGED |
3444 MAC_STATUS_CFG_CHANGED)) == 0)
3445 break;
3446 }
3447
3448 mac_status = tr32(MAC_STATUS);
3449 if (current_link_up == 0 &&
3450 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3451 !(mac_status & MAC_STATUS_RCVD_CFG))
3452 current_link_up = 1;
3453 } else {
Matt Carlson5be73b42007-12-20 20:09:29 -08003454 tg3_setup_flow_control(tp, 0, 0);
3455
Linus Torvalds1da177e2005-04-16 15:20:36 -07003456 /* Forcing 1000FD link up. */
3457 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458
3459 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3460 udelay(40);
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07003461
3462 tw32_f(MAC_MODE, tp->mac_mode);
3463 udelay(40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464 }
3465
3466out:
3467 return current_link_up;
3468}
3469
3470static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3471{
3472 u32 orig_pause_cfg;
3473 u16 orig_active_speed;
3474 u8 orig_active_duplex;
3475 u32 mac_status;
3476 int current_link_up;
3477 int i;
3478
Matt Carlson8d018622007-12-20 20:05:44 -08003479 orig_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480 orig_active_speed = tp->link_config.active_speed;
3481 orig_active_duplex = tp->link_config.active_duplex;
3482
3483 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3484 netif_carrier_ok(tp->dev) &&
3485 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3486 mac_status = tr32(MAC_STATUS);
3487 mac_status &= (MAC_STATUS_PCS_SYNCED |
3488 MAC_STATUS_SIGNAL_DET |
3489 MAC_STATUS_CFG_CHANGED |
3490 MAC_STATUS_RCVD_CFG);
3491 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3492 MAC_STATUS_SIGNAL_DET)) {
3493 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3494 MAC_STATUS_CFG_CHANGED));
3495 return 0;
3496 }
3497 }
3498
3499 tw32_f(MAC_TX_AUTO_NEG, 0);
3500
3501 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3502 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3503 tw32_f(MAC_MODE, tp->mac_mode);
3504 udelay(40);
3505
3506 if (tp->phy_id == PHY_ID_BCM8002)
3507 tg3_init_bcm8002(tp);
3508
3509 /* Enable link change event even when serdes polling. */
3510 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3511 udelay(40);
3512
3513 current_link_up = 0;
3514 mac_status = tr32(MAC_STATUS);
3515
3516 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3517 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3518 else
3519 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3520
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521 tp->hw_status->status =
3522 (SD_STATUS_UPDATED |
3523 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3524
3525 for (i = 0; i < 100; i++) {
3526 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3527 MAC_STATUS_CFG_CHANGED));
3528 udelay(5);
3529 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
Michael Chan3d3ebe72006-09-27 15:59:15 -07003530 MAC_STATUS_CFG_CHANGED |
3531 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003532 break;
3533 }
3534
3535 mac_status = tr32(MAC_STATUS);
3536 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3537 current_link_up = 0;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003538 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3539 tp->serdes_counter == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540 tw32_f(MAC_MODE, (tp->mac_mode |
3541 MAC_MODE_SEND_CONFIGS));
3542 udelay(1);
3543 tw32_f(MAC_MODE, tp->mac_mode);
3544 }
3545 }
3546
3547 if (current_link_up == 1) {
3548 tp->link_config.active_speed = SPEED_1000;
3549 tp->link_config.active_duplex = DUPLEX_FULL;
3550 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3551 LED_CTRL_LNKLED_OVERRIDE |
3552 LED_CTRL_1000MBPS_ON));
3553 } else {
3554 tp->link_config.active_speed = SPEED_INVALID;
3555 tp->link_config.active_duplex = DUPLEX_INVALID;
3556 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3557 LED_CTRL_LNKLED_OVERRIDE |
3558 LED_CTRL_TRAFFIC_OVERRIDE));
3559 }
3560
3561 if (current_link_up != netif_carrier_ok(tp->dev)) {
3562 if (current_link_up)
3563 netif_carrier_on(tp->dev);
3564 else
3565 netif_carrier_off(tp->dev);
3566 tg3_link_report(tp);
3567 } else {
Matt Carlson8d018622007-12-20 20:05:44 -08003568 u32 now_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003569 if (orig_pause_cfg != now_pause_cfg ||
3570 orig_active_speed != tp->link_config.active_speed ||
3571 orig_active_duplex != tp->link_config.active_duplex)
3572 tg3_link_report(tp);
3573 }
3574
3575 return 0;
3576}
3577
Michael Chan747e8f82005-07-25 12:33:22 -07003578static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3579{
3580 int current_link_up, err = 0;
3581 u32 bmsr, bmcr;
3582 u16 current_speed;
3583 u8 current_duplex;
Matt Carlsonef167e22007-12-20 20:10:01 -08003584 u32 local_adv, remote_adv;
Michael Chan747e8f82005-07-25 12:33:22 -07003585
3586 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3587 tw32_f(MAC_MODE, tp->mac_mode);
3588 udelay(40);
3589
3590 tw32(MAC_EVENT, 0);
3591
3592 tw32_f(MAC_STATUS,
3593 (MAC_STATUS_SYNC_CHANGED |
3594 MAC_STATUS_CFG_CHANGED |
3595 MAC_STATUS_MI_COMPLETION |
3596 MAC_STATUS_LNKSTATE_CHANGED));
3597 udelay(40);
3598
3599 if (force_reset)
3600 tg3_phy_reset(tp);
3601
3602 current_link_up = 0;
3603 current_speed = SPEED_INVALID;
3604 current_duplex = DUPLEX_INVALID;
3605
3606 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3607 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08003608 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3609 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3610 bmsr |= BMSR_LSTATUS;
3611 else
3612 bmsr &= ~BMSR_LSTATUS;
3613 }
Michael Chan747e8f82005-07-25 12:33:22 -07003614
3615 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3616
3617 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
Matt Carlson2bd3ed02008-06-09 15:39:55 -07003618 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
Michael Chan747e8f82005-07-25 12:33:22 -07003619 /* do nothing, just check for link up at the end */
3620 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3621 u32 adv, new_adv;
3622
3623 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3624 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3625 ADVERTISE_1000XPAUSE |
3626 ADVERTISE_1000XPSE_ASYM |
3627 ADVERTISE_SLCT);
3628
Matt Carlsonba4d07a2007-12-20 20:08:00 -08003629 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
Michael Chan747e8f82005-07-25 12:33:22 -07003630
3631 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3632 new_adv |= ADVERTISE_1000XHALF;
3633 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3634 new_adv |= ADVERTISE_1000XFULL;
3635
3636 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3637 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3638 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3639 tg3_writephy(tp, MII_BMCR, bmcr);
3640
3641 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
Michael Chan3d3ebe72006-09-27 15:59:15 -07003642 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
Michael Chan747e8f82005-07-25 12:33:22 -07003643 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3644
3645 return err;
3646 }
3647 } else {
3648 u32 new_bmcr;
3649
3650 bmcr &= ~BMCR_SPEED1000;
3651 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3652
3653 if (tp->link_config.duplex == DUPLEX_FULL)
3654 new_bmcr |= BMCR_FULLDPLX;
3655
3656 if (new_bmcr != bmcr) {
3657 /* BMCR_SPEED1000 is a reserved bit that needs
3658 * to be set on write.
3659 */
3660 new_bmcr |= BMCR_SPEED1000;
3661
3662 /* Force a linkdown */
3663 if (netif_carrier_ok(tp->dev)) {
3664 u32 adv;
3665
3666 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3667 adv &= ~(ADVERTISE_1000XFULL |
3668 ADVERTISE_1000XHALF |
3669 ADVERTISE_SLCT);
3670 tg3_writephy(tp, MII_ADVERTISE, adv);
3671 tg3_writephy(tp, MII_BMCR, bmcr |
3672 BMCR_ANRESTART |
3673 BMCR_ANENABLE);
3674 udelay(10);
3675 netif_carrier_off(tp->dev);
3676 }
3677 tg3_writephy(tp, MII_BMCR, new_bmcr);
3678 bmcr = new_bmcr;
3679 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3680 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08003681 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3682 ASIC_REV_5714) {
3683 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3684 bmsr |= BMSR_LSTATUS;
3685 else
3686 bmsr &= ~BMSR_LSTATUS;
3687 }
Michael Chan747e8f82005-07-25 12:33:22 -07003688 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3689 }
3690 }
3691
3692 if (bmsr & BMSR_LSTATUS) {
3693 current_speed = SPEED_1000;
3694 current_link_up = 1;
3695 if (bmcr & BMCR_FULLDPLX)
3696 current_duplex = DUPLEX_FULL;
3697 else
3698 current_duplex = DUPLEX_HALF;
3699
Matt Carlsonef167e22007-12-20 20:10:01 -08003700 local_adv = 0;
3701 remote_adv = 0;
3702
Michael Chan747e8f82005-07-25 12:33:22 -07003703 if (bmcr & BMCR_ANENABLE) {
Matt Carlsonef167e22007-12-20 20:10:01 -08003704 u32 common;
Michael Chan747e8f82005-07-25 12:33:22 -07003705
3706 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3707 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3708 common = local_adv & remote_adv;
3709 if (common & (ADVERTISE_1000XHALF |
3710 ADVERTISE_1000XFULL)) {
3711 if (common & ADVERTISE_1000XFULL)
3712 current_duplex = DUPLEX_FULL;
3713 else
3714 current_duplex = DUPLEX_HALF;
Michael Chan747e8f82005-07-25 12:33:22 -07003715 }
3716 else
3717 current_link_up = 0;
3718 }
3719 }
3720
Matt Carlsonef167e22007-12-20 20:10:01 -08003721 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3722 tg3_setup_flow_control(tp, local_adv, remote_adv);
3723
Michael Chan747e8f82005-07-25 12:33:22 -07003724 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3725 if (tp->link_config.active_duplex == DUPLEX_HALF)
3726 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3727
3728 tw32_f(MAC_MODE, tp->mac_mode);
3729 udelay(40);
3730
3731 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3732
3733 tp->link_config.active_speed = current_speed;
3734 tp->link_config.active_duplex = current_duplex;
3735
3736 if (current_link_up != netif_carrier_ok(tp->dev)) {
3737 if (current_link_up)
3738 netif_carrier_on(tp->dev);
3739 else {
3740 netif_carrier_off(tp->dev);
3741 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3742 }
3743 tg3_link_report(tp);
3744 }
3745 return err;
3746}
3747
3748static void tg3_serdes_parallel_detect(struct tg3 *tp)
3749{
Michael Chan3d3ebe72006-09-27 15:59:15 -07003750 if (tp->serdes_counter) {
Michael Chan747e8f82005-07-25 12:33:22 -07003751 /* Give autoneg time to complete. */
Michael Chan3d3ebe72006-09-27 15:59:15 -07003752 tp->serdes_counter--;
Michael Chan747e8f82005-07-25 12:33:22 -07003753 return;
3754 }
3755 if (!netif_carrier_ok(tp->dev) &&
3756 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3757 u32 bmcr;
3758
3759 tg3_readphy(tp, MII_BMCR, &bmcr);
3760 if (bmcr & BMCR_ANENABLE) {
3761 u32 phy1, phy2;
3762
3763 /* Select shadow register 0x1f */
3764 tg3_writephy(tp, 0x1c, 0x7c00);
3765 tg3_readphy(tp, 0x1c, &phy1);
3766
3767 /* Select expansion interrupt status register */
3768 tg3_writephy(tp, 0x17, 0x0f01);
3769 tg3_readphy(tp, 0x15, &phy2);
3770 tg3_readphy(tp, 0x15, &phy2);
3771
3772 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3773 /* We have signal detect and not receiving
3774 * config code words, link is up by parallel
3775 * detection.
3776 */
3777
3778 bmcr &= ~BMCR_ANENABLE;
3779 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3780 tg3_writephy(tp, MII_BMCR, bmcr);
3781 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3782 }
3783 }
3784 }
3785 else if (netif_carrier_ok(tp->dev) &&
3786 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3787 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3788 u32 phy2;
3789
3790 /* Select expansion interrupt status register */
3791 tg3_writephy(tp, 0x17, 0x0f01);
3792 tg3_readphy(tp, 0x15, &phy2);
3793 if (phy2 & 0x20) {
3794 u32 bmcr;
3795
3796 /* Config code words received, turn on autoneg. */
3797 tg3_readphy(tp, MII_BMCR, &bmcr);
3798 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3799
3800 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3801
3802 }
3803 }
3804}
3805
Linus Torvalds1da177e2005-04-16 15:20:36 -07003806static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3807{
3808 int err;
3809
3810 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3811 err = tg3_setup_fiber_phy(tp, force_reset);
Michael Chan747e8f82005-07-25 12:33:22 -07003812 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3813 err = tg3_setup_fiber_mii_phy(tp, force_reset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003814 } else {
3815 err = tg3_setup_copper_phy(tp, force_reset);
3816 }
3817
Matt Carlsonbcb37f62008-11-03 16:52:09 -08003818 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
Matt Carlsonaa6c91f2007-11-12 21:18:04 -08003819 u32 val, scale;
3820
3821 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3822 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3823 scale = 65;
3824 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3825 scale = 6;
3826 else
3827 scale = 12;
3828
3829 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3830 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3831 tw32(GRC_MISC_CFG, val);
3832 }
3833
Linus Torvalds1da177e2005-04-16 15:20:36 -07003834 if (tp->link_config.active_speed == SPEED_1000 &&
3835 tp->link_config.active_duplex == DUPLEX_HALF)
3836 tw32(MAC_TX_LENGTHS,
3837 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3838 (6 << TX_LENGTHS_IPG_SHIFT) |
3839 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3840 else
3841 tw32(MAC_TX_LENGTHS,
3842 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3843 (6 << TX_LENGTHS_IPG_SHIFT) |
3844 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3845
3846 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3847 if (netif_carrier_ok(tp->dev)) {
3848 tw32(HOSTCC_STAT_COAL_TICKS,
David S. Miller15f98502005-05-18 22:49:26 -07003849 tp->coal.stats_block_coalesce_usecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003850 } else {
3851 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3852 }
3853 }
3854
Matt Carlson8ed5d972007-05-07 00:25:49 -07003855 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3856 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3857 if (!netif_carrier_ok(tp->dev))
3858 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3859 tp->pwrmgmt_thresh;
3860 else
3861 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3862 tw32(PCIE_PWR_MGMT_THRESH, val);
3863 }
3864
Linus Torvalds1da177e2005-04-16 15:20:36 -07003865 return err;
3866}
3867
Michael Chandf3e6542006-05-26 17:48:07 -07003868/* This is called whenever we suspect that the system chipset is re-
3869 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3870 * is bogus tx completions. We try to recover by setting the
3871 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3872 * in the workqueue.
3873 */
3874static void tg3_tx_recover(struct tg3 *tp)
3875{
3876 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3877 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3878
3879 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3880 "mapped I/O cycles to the network device, attempting to "
3881 "recover. Please report the problem to the driver maintainer "
3882 "and include system chipset information.\n", tp->dev->name);
3883
3884 spin_lock(&tp->lock);
Michael Chandf3e6542006-05-26 17:48:07 -07003885 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
Michael Chandf3e6542006-05-26 17:48:07 -07003886 spin_unlock(&tp->lock);
3887}
3888
Michael Chan1b2a7202006-08-07 21:46:02 -07003889static inline u32 tg3_tx_avail(struct tg3 *tp)
3890{
3891 smp_mb();
3892 return (tp->tx_pending -
3893 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3894}
3895
Linus Torvalds1da177e2005-04-16 15:20:36 -07003896/* Tigon3 never reports partial packet sends. So we do not
3897 * need special logic to handle SKBs that have not had all
3898 * of their frags sent yet, like SunGEM does.
3899 */
3900static void tg3_tx(struct tg3 *tp)
3901{
3902 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3903 u32 sw_idx = tp->tx_cons;
3904
3905 while (sw_idx != hw_idx) {
3906 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3907 struct sk_buff *skb = ri->skb;
Michael Chandf3e6542006-05-26 17:48:07 -07003908 int i, tx_bug = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003909
Michael Chandf3e6542006-05-26 17:48:07 -07003910 if (unlikely(skb == NULL)) {
3911 tg3_tx_recover(tp);
3912 return;
3913 }
3914
David S. Miller90079ce2008-09-11 04:52:51 -07003915 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003916
3917 ri->skb = NULL;
3918
3919 sw_idx = NEXT_TX(sw_idx);
3920
3921 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003922 ri = &tp->tx_buffers[sw_idx];
Michael Chandf3e6542006-05-26 17:48:07 -07003923 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3924 tx_bug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003925 sw_idx = NEXT_TX(sw_idx);
3926 }
3927
David S. Millerf47c11e2005-06-24 20:18:35 -07003928 dev_kfree_skb(skb);
Michael Chandf3e6542006-05-26 17:48:07 -07003929
3930 if (unlikely(tx_bug)) {
3931 tg3_tx_recover(tp);
3932 return;
3933 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003934 }
3935
3936 tp->tx_cons = sw_idx;
3937
Michael Chan1b2a7202006-08-07 21:46:02 -07003938 /* Need to make the tx_cons update visible to tg3_start_xmit()
3939 * before checking for netif_queue_stopped(). Without the
3940 * memory barrier, there is a small possibility that tg3_start_xmit()
3941 * will miss it and cause the queue to be stopped forever.
3942 */
3943 smp_mb();
3944
3945 if (unlikely(netif_queue_stopped(tp->dev) &&
Ranjit Manomohan42952232006-10-18 20:54:26 -07003946 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
Michael Chan1b2a7202006-08-07 21:46:02 -07003947 netif_tx_lock(tp->dev);
Michael Chan51b91462005-09-01 17:41:28 -07003948 if (netif_queue_stopped(tp->dev) &&
Ranjit Manomohan42952232006-10-18 20:54:26 -07003949 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
Michael Chan51b91462005-09-01 17:41:28 -07003950 netif_wake_queue(tp->dev);
Michael Chan1b2a7202006-08-07 21:46:02 -07003951 netif_tx_unlock(tp->dev);
Michael Chan51b91462005-09-01 17:41:28 -07003952 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003953}
3954
3955/* Returns size of skb allocated or < 0 on error.
3956 *
3957 * We only need to fill in the address because the other members
3958 * of the RX descriptor are invariant, see tg3_init_rings.
3959 *
3960 * Note the purposeful assymetry of cpu vs. chip accesses. For
3961 * posting buffers we only dirty the first cache line of the RX
3962 * descriptor (containing the address). Whereas for the RX status
3963 * buffers the cpu only reads the last cacheline of the RX descriptor
3964 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3965 */
3966static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3967 int src_idx, u32 dest_idx_unmasked)
3968{
3969 struct tg3_rx_buffer_desc *desc;
3970 struct ring_info *map, *src_map;
3971 struct sk_buff *skb;
3972 dma_addr_t mapping;
3973 int skb_size, dest_idx;
3974
3975 src_map = NULL;
3976 switch (opaque_key) {
3977 case RXD_OPAQUE_RING_STD:
3978 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3979 desc = &tp->rx_std[dest_idx];
3980 map = &tp->rx_std_buffers[dest_idx];
3981 if (src_idx >= 0)
3982 src_map = &tp->rx_std_buffers[src_idx];
Michael Chan7e72aad2005-07-25 12:31:17 -07003983 skb_size = tp->rx_pkt_buf_sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003984 break;
3985
3986 case RXD_OPAQUE_RING_JUMBO:
3987 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3988 desc = &tp->rx_jumbo[dest_idx];
3989 map = &tp->rx_jumbo_buffers[dest_idx];
3990 if (src_idx >= 0)
3991 src_map = &tp->rx_jumbo_buffers[src_idx];
3992 skb_size = RX_JUMBO_PKT_BUF_SZ;
3993 break;
3994
3995 default:
3996 return -EINVAL;
Stephen Hemminger855e1112008-04-16 16:37:28 -07003997 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003998
3999 /* Do not overwrite any of the map or rp information
4000 * until we are sure we can commit to a new buffer.
4001 *
4002 * Callers depend upon this behavior and assume that
4003 * we leave everything unchanged if we fail.
4004 */
David S. Millera20e9c62006-07-31 22:38:16 -07004005 skb = netdev_alloc_skb(tp->dev, skb_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004006 if (skb == NULL)
4007 return -ENOMEM;
4008
Linus Torvalds1da177e2005-04-16 15:20:36 -07004009 skb_reserve(skb, tp->rx_offset);
4010
4011 mapping = pci_map_single(tp->pdev, skb->data,
4012 skb_size - tp->rx_offset,
4013 PCI_DMA_FROMDEVICE);
4014
4015 map->skb = skb;
4016 pci_unmap_addr_set(map, mapping, mapping);
4017
4018 if (src_map != NULL)
4019 src_map->skb = NULL;
4020
4021 desc->addr_hi = ((u64)mapping >> 32);
4022 desc->addr_lo = ((u64)mapping & 0xffffffff);
4023
4024 return skb_size;
4025}
4026
4027/* We only need to move over in the address because the other
4028 * members of the RX descriptor are invariant. See notes above
4029 * tg3_alloc_rx_skb for full details.
4030 */
4031static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
4032 int src_idx, u32 dest_idx_unmasked)
4033{
4034 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4035 struct ring_info *src_map, *dest_map;
4036 int dest_idx;
4037
4038 switch (opaque_key) {
4039 case RXD_OPAQUE_RING_STD:
4040 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4041 dest_desc = &tp->rx_std[dest_idx];
4042 dest_map = &tp->rx_std_buffers[dest_idx];
4043 src_desc = &tp->rx_std[src_idx];
4044 src_map = &tp->rx_std_buffers[src_idx];
4045 break;
4046
4047 case RXD_OPAQUE_RING_JUMBO:
4048 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4049 dest_desc = &tp->rx_jumbo[dest_idx];
4050 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4051 src_desc = &tp->rx_jumbo[src_idx];
4052 src_map = &tp->rx_jumbo_buffers[src_idx];
4053 break;
4054
4055 default:
4056 return;
Stephen Hemminger855e1112008-04-16 16:37:28 -07004057 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004058
4059 dest_map->skb = src_map->skb;
4060 pci_unmap_addr_set(dest_map, mapping,
4061 pci_unmap_addr(src_map, mapping));
4062 dest_desc->addr_hi = src_desc->addr_hi;
4063 dest_desc->addr_lo = src_desc->addr_lo;
4064
4065 src_map->skb = NULL;
4066}
4067
4068#if TG3_VLAN_TAG_USED
4069static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4070{
4071 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4072}
4073#endif
4074
4075/* The RX ring scheme is composed of multiple rings which post fresh
4076 * buffers to the chip, and one special ring the chip uses to report
4077 * status back to the host.
4078 *
4079 * The special ring reports the status of received packets to the
4080 * host. The chip does not write into the original descriptor the
4081 * RX buffer was obtained from. The chip simply takes the original
4082 * descriptor as provided by the host, updates the status and length
4083 * field, then writes this into the next status ring entry.
4084 *
4085 * Each ring the host uses to post buffers to the chip is described
4086 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4087 * it is first placed into the on-chip ram. When the packet's length
4088 * is known, it walks down the TG3_BDINFO entries to select the ring.
4089 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4090 * which is within the range of the new packet's length is chosen.
4091 *
4092 * The "separate ring for rx status" scheme may sound queer, but it makes
4093 * sense from a cache coherency perspective. If only the host writes
4094 * to the buffer post rings, and only the chip writes to the rx status
4095 * rings, then cache lines never move beyond shared-modified state.
4096 * If both the host and chip were to write into the same ring, cache line
4097 * eviction could occur since both entities want it in an exclusive state.
4098 */
4099static int tg3_rx(struct tg3 *tp, int budget)
4100{
Michael Chanf92905d2006-06-29 20:14:29 -07004101 u32 work_mask, rx_std_posted = 0;
Michael Chan483ba502005-04-25 15:14:03 -07004102 u32 sw_idx = tp->rx_rcb_ptr;
4103 u16 hw_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004104 int received;
4105
4106 hw_idx = tp->hw_status->idx[0].rx_producer;
4107 /*
4108 * We need to order the read of hw_idx and the read of
4109 * the opaque cookie.
4110 */
4111 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004112 work_mask = 0;
4113 received = 0;
4114 while (sw_idx != hw_idx && budget > 0) {
4115 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4116 unsigned int len;
4117 struct sk_buff *skb;
4118 dma_addr_t dma_addr;
4119 u32 opaque_key, desc_idx, *post_ptr;
4120
4121 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4122 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4123 if (opaque_key == RXD_OPAQUE_RING_STD) {
4124 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4125 mapping);
4126 skb = tp->rx_std_buffers[desc_idx].skb;
4127 post_ptr = &tp->rx_std_ptr;
Michael Chanf92905d2006-06-29 20:14:29 -07004128 rx_std_posted++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004129 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4130 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4131 mapping);
4132 skb = tp->rx_jumbo_buffers[desc_idx].skb;
4133 post_ptr = &tp->rx_jumbo_ptr;
4134 }
4135 else {
4136 goto next_pkt_nopost;
4137 }
4138
4139 work_mask |= opaque_key;
4140
4141 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4142 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4143 drop_it:
4144 tg3_recycle_rx(tp, opaque_key,
4145 desc_idx, *post_ptr);
4146 drop_it_no_recycle:
4147 /* Other statistics kept track of by card. */
4148 tp->net_stats.rx_dropped++;
4149 goto next_pkt;
4150 }
4151
4152 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
4153
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004154 if (len > RX_COPY_THRESHOLD
Linus Torvalds1da177e2005-04-16 15:20:36 -07004155 && tp->rx_offset == 2
4156 /* rx_offset != 2 iff this is a 5701 card running
4157 * in PCI-X mode [see tg3_get_invariants()] */
4158 ) {
4159 int skb_size;
4160
4161 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4162 desc_idx, *post_ptr);
4163 if (skb_size < 0)
4164 goto drop_it;
4165
4166 pci_unmap_single(tp->pdev, dma_addr,
4167 skb_size - tp->rx_offset,
4168 PCI_DMA_FROMDEVICE);
4169
4170 skb_put(skb, len);
4171 } else {
4172 struct sk_buff *copy_skb;
4173
4174 tg3_recycle_rx(tp, opaque_key,
4175 desc_idx, *post_ptr);
4176
David S. Millera20e9c62006-07-31 22:38:16 -07004177 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004178 if (copy_skb == NULL)
4179 goto drop_it_no_recycle;
4180
Linus Torvalds1da177e2005-04-16 15:20:36 -07004181 skb_reserve(copy_skb, 2);
4182 skb_put(copy_skb, len);
4183 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03004184 skb_copy_from_linear_data(skb, copy_skb->data, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004185 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4186
4187 /* We'll reuse the original ring buffer. */
4188 skb = copy_skb;
4189 }
4190
4191 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4192 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4193 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4194 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4195 skb->ip_summed = CHECKSUM_UNNECESSARY;
4196 else
4197 skb->ip_summed = CHECKSUM_NONE;
4198
4199 skb->protocol = eth_type_trans(skb, tp->dev);
4200#if TG3_VLAN_TAG_USED
4201 if (tp->vlgrp != NULL &&
4202 desc->type_flags & RXD_FLAG_VLAN) {
4203 tg3_vlan_rx(tp, skb,
4204 desc->err_vlan & RXD_VLAN_MASK);
4205 } else
4206#endif
4207 netif_receive_skb(skb);
4208
4209 tp->dev->last_rx = jiffies;
4210 received++;
4211 budget--;
4212
4213next_pkt:
4214 (*post_ptr)++;
Michael Chanf92905d2006-06-29 20:14:29 -07004215
4216 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4217 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4218
4219 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4220 TG3_64BIT_REG_LOW, idx);
4221 work_mask &= ~RXD_OPAQUE_RING_STD;
4222 rx_std_posted = 0;
4223 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004224next_pkt_nopost:
Michael Chan483ba502005-04-25 15:14:03 -07004225 sw_idx++;
Eric Dumazet6b31a512007-02-06 13:29:21 -08004226 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
Michael Chan52f6d692005-04-25 15:14:32 -07004227
4228 /* Refresh hw_idx to see if there is new work */
4229 if (sw_idx == hw_idx) {
4230 hw_idx = tp->hw_status->idx[0].rx_producer;
4231 rmb();
4232 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004233 }
4234
4235 /* ACK the status ring. */
Michael Chan483ba502005-04-25 15:14:03 -07004236 tp->rx_rcb_ptr = sw_idx;
4237 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004238
4239 /* Refill RX ring(s). */
4240 if (work_mask & RXD_OPAQUE_RING_STD) {
4241 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4242 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4243 sw_idx);
4244 }
4245 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4246 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4247 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4248 sw_idx);
4249 }
4250 mmiowb();
4251
4252 return received;
4253}
4254
David S. Miller6f535762007-10-11 18:08:29 -07004255static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004256{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004257 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004258
Linus Torvalds1da177e2005-04-16 15:20:36 -07004259 /* handle link change and other phy events */
4260 if (!(tp->tg3_flags &
4261 (TG3_FLAG_USE_LINKCHG_REG |
4262 TG3_FLAG_POLL_SERDES))) {
4263 if (sblk->status & SD_STATUS_LINK_CHG) {
4264 sblk->status = SD_STATUS_UPDATED |
4265 (sblk->status & ~SD_STATUS_LINK_CHG);
David S. Millerf47c11e2005-06-24 20:18:35 -07004266 spin_lock(&tp->lock);
Matt Carlsondd477002008-05-25 23:45:58 -07004267 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4268 tw32_f(MAC_STATUS,
4269 (MAC_STATUS_SYNC_CHANGED |
4270 MAC_STATUS_CFG_CHANGED |
4271 MAC_STATUS_MI_COMPLETION |
4272 MAC_STATUS_LNKSTATE_CHANGED));
4273 udelay(40);
4274 } else
4275 tg3_setup_phy(tp, 0);
David S. Millerf47c11e2005-06-24 20:18:35 -07004276 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004277 }
4278 }
4279
4280 /* run TX completion thread */
4281 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004282 tg3_tx(tp);
David S. Miller6f535762007-10-11 18:08:29 -07004283 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
Michael Chan4fd7ab52007-10-12 01:39:50 -07004284 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285 }
4286
Linus Torvalds1da177e2005-04-16 15:20:36 -07004287 /* run RX thread, within the bounds set by NAPI.
4288 * All RX "locking" is done by ensuring outside
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004289 * code synchronizes with tg3->napi.poll()
Linus Torvalds1da177e2005-04-16 15:20:36 -07004290 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004291 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
David S. Miller6f535762007-10-11 18:08:29 -07004292 work_done += tg3_rx(tp, budget - work_done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004293
David S. Miller6f535762007-10-11 18:08:29 -07004294 return work_done;
4295}
David S. Millerf7383c22005-05-18 22:50:53 -07004296
David S. Miller6f535762007-10-11 18:08:29 -07004297static int tg3_poll(struct napi_struct *napi, int budget)
4298{
4299 struct tg3 *tp = container_of(napi, struct tg3, napi);
4300 int work_done = 0;
Michael Chan4fd7ab52007-10-12 01:39:50 -07004301 struct tg3_hw_status *sblk = tp->hw_status;
David S. Miller6f535762007-10-11 18:08:29 -07004302
4303 while (1) {
4304 work_done = tg3_poll_work(tp, work_done, budget);
4305
4306 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4307 goto tx_recovery;
4308
4309 if (unlikely(work_done >= budget))
4310 break;
4311
Michael Chan4fd7ab52007-10-12 01:39:50 -07004312 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4313 /* tp->last_tag is used in tg3_restart_ints() below
4314 * to tell the hw how much work has been processed,
4315 * so we must read it before checking for more work.
4316 */
4317 tp->last_tag = sblk->status_tag;
4318 rmb();
4319 } else
4320 sblk->status &= ~SD_STATUS_UPDATED;
4321
David S. Miller6f535762007-10-11 18:08:29 -07004322 if (likely(!tg3_has_work(tp))) {
David S. Miller6f535762007-10-11 18:08:29 -07004323 netif_rx_complete(tp->dev, napi);
4324 tg3_restart_ints(tp);
4325 break;
4326 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004327 }
4328
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004329 return work_done;
David S. Miller6f535762007-10-11 18:08:29 -07004330
4331tx_recovery:
Michael Chan4fd7ab52007-10-12 01:39:50 -07004332 /* work_done is guaranteed to be less than budget. */
David S. Miller6f535762007-10-11 18:08:29 -07004333 netif_rx_complete(tp->dev, napi);
4334 schedule_work(&tp->reset_task);
Michael Chan4fd7ab52007-10-12 01:39:50 -07004335 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004336}
4337
David S. Millerf47c11e2005-06-24 20:18:35 -07004338static void tg3_irq_quiesce(struct tg3 *tp)
4339{
4340 BUG_ON(tp->irq_sync);
4341
4342 tp->irq_sync = 1;
4343 smp_mb();
4344
4345 synchronize_irq(tp->pdev->irq);
4346}
4347
4348static inline int tg3_irq_sync(struct tg3 *tp)
4349{
4350 return tp->irq_sync;
4351}
4352
4353/* Fully shutdown all tg3 driver activity elsewhere in the system.
4354 * If irq_sync is non-zero, then the IRQ handler must be synchronized
4355 * with as well. Most of the time, this is not necessary except when
4356 * shutting down the device.
4357 */
4358static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4359{
Michael Chan46966542007-07-11 19:47:19 -07004360 spin_lock_bh(&tp->lock);
David S. Millerf47c11e2005-06-24 20:18:35 -07004361 if (irq_sync)
4362 tg3_irq_quiesce(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07004363}
4364
4365static inline void tg3_full_unlock(struct tg3 *tp)
4366{
David S. Millerf47c11e2005-06-24 20:18:35 -07004367 spin_unlock_bh(&tp->lock);
4368}
4369
Michael Chanfcfa0a32006-03-20 22:28:41 -08004370/* One-shot MSI handler - Chip automatically disables interrupt
4371 * after sending MSI so driver doesn't have to do it.
4372 */
David Howells7d12e782006-10-05 14:55:46 +01004373static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
Michael Chanfcfa0a32006-03-20 22:28:41 -08004374{
4375 struct net_device *dev = dev_id;
4376 struct tg3 *tp = netdev_priv(dev);
4377
4378 prefetch(tp->hw_status);
4379 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4380
4381 if (likely(!tg3_irq_sync(tp)))
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004382 netif_rx_schedule(dev, &tp->napi);
Michael Chanfcfa0a32006-03-20 22:28:41 -08004383
4384 return IRQ_HANDLED;
4385}
4386
Michael Chan88b06bc2005-04-21 17:13:25 -07004387/* MSI ISR - No need to check for interrupt sharing and no need to
4388 * flush status block and interrupt mailbox. PCI ordering rules
4389 * guarantee that MSI will arrive after the status block.
4390 */
David Howells7d12e782006-10-05 14:55:46 +01004391static irqreturn_t tg3_msi(int irq, void *dev_id)
Michael Chan88b06bc2005-04-21 17:13:25 -07004392{
4393 struct net_device *dev = dev_id;
4394 struct tg3 *tp = netdev_priv(dev);
Michael Chan88b06bc2005-04-21 17:13:25 -07004395
Michael Chan61487482005-09-05 17:53:19 -07004396 prefetch(tp->hw_status);
4397 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Michael Chan88b06bc2005-04-21 17:13:25 -07004398 /*
David S. Millerfac9b832005-05-18 22:46:34 -07004399 * Writing any value to intr-mbox-0 clears PCI INTA# and
Michael Chan88b06bc2005-04-21 17:13:25 -07004400 * chip-internal interrupt pending events.
David S. Millerfac9b832005-05-18 22:46:34 -07004401 * Writing non-zero to intr-mbox-0 additional tells the
Michael Chan88b06bc2005-04-21 17:13:25 -07004402 * NIC to stop sending us irqs, engaging "in-intr-handler"
4403 * event coalescing.
4404 */
4405 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chan61487482005-09-05 17:53:19 -07004406 if (likely(!tg3_irq_sync(tp)))
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004407 netif_rx_schedule(dev, &tp->napi);
Michael Chan61487482005-09-05 17:53:19 -07004408
Michael Chan88b06bc2005-04-21 17:13:25 -07004409 return IRQ_RETVAL(1);
4410}
4411
David Howells7d12e782006-10-05 14:55:46 +01004412static irqreturn_t tg3_interrupt(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004413{
4414 struct net_device *dev = dev_id;
4415 struct tg3 *tp = netdev_priv(dev);
4416 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004417 unsigned int handled = 1;
4418
Linus Torvalds1da177e2005-04-16 15:20:36 -07004419 /* In INTx mode, it is possible for the interrupt to arrive at
4420 * the CPU before the status block posted prior to the interrupt.
4421 * Reading the PCI State register will confirm whether the
4422 * interrupt is ours and will flush the status block.
4423 */
Michael Chand18edcb2007-03-24 20:57:11 -07004424 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4425 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4426 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4427 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07004428 goto out;
David S. Millerfac9b832005-05-18 22:46:34 -07004429 }
Michael Chand18edcb2007-03-24 20:57:11 -07004430 }
4431
4432 /*
4433 * Writing any value to intr-mbox-0 clears PCI INTA# and
4434 * chip-internal interrupt pending events.
4435 * Writing non-zero to intr-mbox-0 additional tells the
4436 * NIC to stop sending us irqs, engaging "in-intr-handler"
4437 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07004438 *
4439 * Flush the mailbox to de-assert the IRQ immediately to prevent
4440 * spurious interrupts. The flush impacts performance but
4441 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07004442 */
Michael Chanc04cb342007-05-07 00:26:15 -07004443 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chand18edcb2007-03-24 20:57:11 -07004444 if (tg3_irq_sync(tp))
4445 goto out;
4446 sblk->status &= ~SD_STATUS_UPDATED;
4447 if (likely(tg3_has_work(tp))) {
4448 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004449 netif_rx_schedule(dev, &tp->napi);
Michael Chand18edcb2007-03-24 20:57:11 -07004450 } else {
4451 /* No work, shared interrupt perhaps? re-enable
4452 * interrupts, and flush that PCI write
4453 */
4454 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4455 0x00000000);
David S. Millerfac9b832005-05-18 22:46:34 -07004456 }
David S. Millerf47c11e2005-06-24 20:18:35 -07004457out:
David S. Millerfac9b832005-05-18 22:46:34 -07004458 return IRQ_RETVAL(handled);
4459}
4460
David Howells7d12e782006-10-05 14:55:46 +01004461static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
David S. Millerfac9b832005-05-18 22:46:34 -07004462{
4463 struct net_device *dev = dev_id;
4464 struct tg3 *tp = netdev_priv(dev);
4465 struct tg3_hw_status *sblk = tp->hw_status;
David S. Millerfac9b832005-05-18 22:46:34 -07004466 unsigned int handled = 1;
4467
David S. Millerfac9b832005-05-18 22:46:34 -07004468 /* In INTx mode, it is possible for the interrupt to arrive at
4469 * the CPU before the status block posted prior to the interrupt.
4470 * Reading the PCI State register will confirm whether the
4471 * interrupt is ours and will flush the status block.
4472 */
Michael Chand18edcb2007-03-24 20:57:11 -07004473 if (unlikely(sblk->status_tag == tp->last_tag)) {
4474 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4475 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4476 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07004477 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004478 }
Michael Chand18edcb2007-03-24 20:57:11 -07004479 }
4480
4481 /*
4482 * writing any value to intr-mbox-0 clears PCI INTA# and
4483 * chip-internal interrupt pending events.
4484 * writing non-zero to intr-mbox-0 additional tells the
4485 * NIC to stop sending us irqs, engaging "in-intr-handler"
4486 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07004487 *
4488 * Flush the mailbox to de-assert the IRQ immediately to prevent
4489 * spurious interrupts. The flush impacts performance but
4490 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07004491 */
Michael Chanc04cb342007-05-07 00:26:15 -07004492 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chand18edcb2007-03-24 20:57:11 -07004493 if (tg3_irq_sync(tp))
4494 goto out;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004495 if (netif_rx_schedule_prep(dev, &tp->napi)) {
Michael Chand18edcb2007-03-24 20:57:11 -07004496 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4497 /* Update last_tag to mark that this status has been
4498 * seen. Because interrupt may be shared, we may be
4499 * racing with tg3_poll(), so only update last_tag
4500 * if tg3_poll() is not scheduled.
4501 */
4502 tp->last_tag = sblk->status_tag;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004503 __netif_rx_schedule(dev, &tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004504 }
David S. Millerf47c11e2005-06-24 20:18:35 -07004505out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004506 return IRQ_RETVAL(handled);
4507}
4508
Michael Chan79381092005-04-21 17:13:59 -07004509/* ISR for interrupt test */
David Howells7d12e782006-10-05 14:55:46 +01004510static irqreturn_t tg3_test_isr(int irq, void *dev_id)
Michael Chan79381092005-04-21 17:13:59 -07004511{
4512 struct net_device *dev = dev_id;
4513 struct tg3 *tp = netdev_priv(dev);
4514 struct tg3_hw_status *sblk = tp->hw_status;
4515
Michael Chanf9804dd2005-09-27 12:13:10 -07004516 if ((sblk->status & SD_STATUS_UPDATED) ||
4517 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
Michael Chanb16250e2006-09-27 16:10:14 -07004518 tg3_disable_ints(tp);
Michael Chan79381092005-04-21 17:13:59 -07004519 return IRQ_RETVAL(1);
4520 }
4521 return IRQ_RETVAL(0);
4522}
4523
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07004524static int tg3_init_hw(struct tg3 *, int);
Michael Chan944d9802005-05-29 14:57:48 -07004525static int tg3_halt(struct tg3 *, int, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004526
Michael Chanb9ec6c12006-07-25 16:37:27 -07004527/* Restart hardware after configuration changes, self-test, etc.
4528 * Invoked with tp->lock held.
4529 */
4530static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
Eric Dumazet78c61462008-04-24 23:33:06 -07004531 __releases(tp->lock)
4532 __acquires(tp->lock)
Michael Chanb9ec6c12006-07-25 16:37:27 -07004533{
4534 int err;
4535
4536 err = tg3_init_hw(tp, reset_phy);
4537 if (err) {
4538 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4539 "aborting.\n", tp->dev->name);
4540 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4541 tg3_full_unlock(tp);
4542 del_timer_sync(&tp->timer);
4543 tp->irq_sync = 0;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004544 napi_enable(&tp->napi);
Michael Chanb9ec6c12006-07-25 16:37:27 -07004545 dev_close(tp->dev);
4546 tg3_full_lock(tp, 0);
4547 }
4548 return err;
4549}
4550
Linus Torvalds1da177e2005-04-16 15:20:36 -07004551#ifdef CONFIG_NET_POLL_CONTROLLER
4552static void tg3_poll_controller(struct net_device *dev)
4553{
Michael Chan88b06bc2005-04-21 17:13:25 -07004554 struct tg3 *tp = netdev_priv(dev);
4555
David Howells7d12e782006-10-05 14:55:46 +01004556 tg3_interrupt(tp->pdev->irq, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004557}
4558#endif
4559
David Howellsc4028952006-11-22 14:57:56 +00004560static void tg3_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004561{
David Howellsc4028952006-11-22 14:57:56 +00004562 struct tg3 *tp = container_of(work, struct tg3, reset_task);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004563 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004564 unsigned int restart_timer;
4565
Michael Chan7faa0062006-02-02 17:29:28 -08004566 tg3_full_lock(tp, 0);
Michael Chan7faa0062006-02-02 17:29:28 -08004567
4568 if (!netif_running(tp->dev)) {
Michael Chan7faa0062006-02-02 17:29:28 -08004569 tg3_full_unlock(tp);
4570 return;
4571 }
4572
4573 tg3_full_unlock(tp);
4574
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004575 tg3_phy_stop(tp);
4576
Linus Torvalds1da177e2005-04-16 15:20:36 -07004577 tg3_netif_stop(tp);
4578
David S. Millerf47c11e2005-06-24 20:18:35 -07004579 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004580
4581 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4582 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4583
Michael Chandf3e6542006-05-26 17:48:07 -07004584 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4585 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4586 tp->write32_rx_mbox = tg3_write_flush_reg32;
4587 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4588 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4589 }
4590
Michael Chan944d9802005-05-29 14:57:48 -07004591 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004592 err = tg3_init_hw(tp, 1);
4593 if (err)
Michael Chanb9ec6c12006-07-25 16:37:27 -07004594 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004595
4596 tg3_netif_start(tp);
4597
Linus Torvalds1da177e2005-04-16 15:20:36 -07004598 if (restart_timer)
4599 mod_timer(&tp->timer, jiffies + 1);
Michael Chan7faa0062006-02-02 17:29:28 -08004600
Michael Chanb9ec6c12006-07-25 16:37:27 -07004601out:
Michael Chan7faa0062006-02-02 17:29:28 -08004602 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004603
4604 if (!err)
4605 tg3_phy_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004606}
4607
Michael Chanb0408752007-02-13 12:18:30 -08004608static void tg3_dump_short_state(struct tg3 *tp)
4609{
4610 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4611 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4612 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4613 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4614}
4615
Linus Torvalds1da177e2005-04-16 15:20:36 -07004616static void tg3_tx_timeout(struct net_device *dev)
4617{
4618 struct tg3 *tp = netdev_priv(dev);
4619
Michael Chanb0408752007-02-13 12:18:30 -08004620 if (netif_msg_tx_err(tp)) {
Michael Chan9f88f292006-12-07 00:22:54 -08004621 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4622 dev->name);
Michael Chanb0408752007-02-13 12:18:30 -08004623 tg3_dump_short_state(tp);
4624 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004625
4626 schedule_work(&tp->reset_task);
4627}
4628
Michael Chanc58ec932005-09-17 00:46:27 -07004629/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4630static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4631{
4632 u32 base = (u32) mapping & 0xffffffff;
4633
4634 return ((base > 0xffffdcc0) &&
4635 (base + len + 8 < base));
4636}
4637
Michael Chan72f2afb2006-03-06 19:28:35 -08004638/* Test for DMA addresses > 40-bit */
4639static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4640 int len)
4641{
4642#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
Michael Chan6728a8e2006-03-27 23:16:49 -08004643 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
Michael Chan72f2afb2006-03-06 19:28:35 -08004644 return (((u64) mapping + len) > DMA_40BIT_MASK);
4645 return 0;
4646#else
4647 return 0;
4648#endif
4649}
4650
Linus Torvalds1da177e2005-04-16 15:20:36 -07004651static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4652
Michael Chan72f2afb2006-03-06 19:28:35 -08004653/* Workaround 4GB and 40-bit hardware DMA bugs. */
4654static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
Michael Chanc58ec932005-09-17 00:46:27 -07004655 u32 last_plus_one, u32 *start,
4656 u32 base_flags, u32 mss)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004657{
Matt Carlson41588ba2008-04-19 18:12:33 -07004658 struct sk_buff *new_skb;
Michael Chanc58ec932005-09-17 00:46:27 -07004659 dma_addr_t new_addr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004660 u32 entry = *start;
Michael Chanc58ec932005-09-17 00:46:27 -07004661 int i, ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004662
Matt Carlson41588ba2008-04-19 18:12:33 -07004663 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4664 new_skb = skb_copy(skb, GFP_ATOMIC);
4665 else {
4666 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4667
4668 new_skb = skb_copy_expand(skb,
4669 skb_headroom(skb) + more_headroom,
4670 skb_tailroom(skb), GFP_ATOMIC);
4671 }
4672
Linus Torvalds1da177e2005-04-16 15:20:36 -07004673 if (!new_skb) {
Michael Chanc58ec932005-09-17 00:46:27 -07004674 ret = -1;
4675 } else {
4676 /* New SKB is guaranteed to be linear. */
4677 entry = *start;
David S. Miller90079ce2008-09-11 04:52:51 -07004678 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4679 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4680
Michael Chanc58ec932005-09-17 00:46:27 -07004681 /* Make sure new skb does not cross any 4G boundaries.
4682 * Drop the packet if it does.
4683 */
David S. Miller90079ce2008-09-11 04:52:51 -07004684 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
David S. Miller638266f2008-09-11 15:45:19 -07004685 if (!ret)
4686 skb_dma_unmap(&tp->pdev->dev, new_skb,
4687 DMA_TO_DEVICE);
Michael Chanc58ec932005-09-17 00:46:27 -07004688 ret = -1;
4689 dev_kfree_skb(new_skb);
4690 new_skb = NULL;
4691 } else {
4692 tg3_set_txd(tp, entry, new_addr, new_skb->len,
4693 base_flags, 1 | (mss << 1));
4694 *start = NEXT_TX(entry);
4695 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004696 }
4697
Linus Torvalds1da177e2005-04-16 15:20:36 -07004698 /* Now clean up the sw ring entries. */
4699 i = 0;
4700 while (entry != last_plus_one) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004701 if (i == 0) {
4702 tp->tx_buffers[entry].skb = new_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004703 } else {
4704 tp->tx_buffers[entry].skb = NULL;
4705 }
4706 entry = NEXT_TX(entry);
4707 i++;
4708 }
4709
David S. Miller90079ce2008-09-11 04:52:51 -07004710 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004711 dev_kfree_skb(skb);
4712
Michael Chanc58ec932005-09-17 00:46:27 -07004713 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004714}
4715
4716static void tg3_set_txd(struct tg3 *tp, int entry,
4717 dma_addr_t mapping, int len, u32 flags,
4718 u32 mss_and_is_end)
4719{
4720 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4721 int is_end = (mss_and_is_end & 0x1);
4722 u32 mss = (mss_and_is_end >> 1);
4723 u32 vlan_tag = 0;
4724
4725 if (is_end)
4726 flags |= TXD_FLAG_END;
4727 if (flags & TXD_FLAG_VLAN) {
4728 vlan_tag = flags >> 16;
4729 flags &= 0xffff;
4730 }
4731 vlan_tag |= (mss << TXD_MSS_SHIFT);
4732
4733 txd->addr_hi = ((u64) mapping >> 32);
4734 txd->addr_lo = ((u64) mapping & 0xffffffff);
4735 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4736 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4737}
4738
Michael Chan5a6f3072006-03-20 22:28:05 -08004739/* hard_start_xmit for devices that don't have any bugs and
4740 * support TG3_FLG2_HW_TSO_2 only.
4741 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004742static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4743{
4744 struct tg3 *tp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004745 u32 len, entry, base_flags, mss;
David S. Miller90079ce2008-09-11 04:52:51 -07004746 struct skb_shared_info *sp;
4747 dma_addr_t mapping;
Michael Chan5a6f3072006-03-20 22:28:05 -08004748
4749 len = skb_headlen(skb);
4750
Michael Chan00b70502006-06-17 21:58:45 -07004751 /* We are running in BH disabled context with netif_tx_lock
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004752 * and TX reclaim runs via tp->napi.poll inside of a software
Michael Chan5a6f3072006-03-20 22:28:05 -08004753 * interrupt. Furthermore, IRQ processing runs lockless so we have
4754 * no IRQ context deadlocks to worry about either. Rejoice!
4755 */
Michael Chan1b2a7202006-08-07 21:46:02 -07004756 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004757 if (!netif_queue_stopped(dev)) {
4758 netif_stop_queue(dev);
4759
4760 /* This is a hard error, log it. */
4761 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4762 "queue awake!\n", dev->name);
4763 }
Michael Chan5a6f3072006-03-20 22:28:05 -08004764 return NETDEV_TX_BUSY;
4765 }
4766
4767 entry = tp->tx_prod;
4768 base_flags = 0;
Michael Chan5a6f3072006-03-20 22:28:05 -08004769 mss = 0;
Matt Carlsonc13e3712007-05-05 11:50:04 -07004770 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004771 int tcp_opt_len, ip_tcp_len;
4772
4773 if (skb_header_cloned(skb) &&
4774 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4775 dev_kfree_skb(skb);
4776 goto out_unlock;
4777 }
4778
Michael Chanb0026622006-07-03 19:42:14 -07004779 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4780 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4781 else {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004782 struct iphdr *iph = ip_hdr(skb);
4783
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004784 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -03004785 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
Michael Chanb0026622006-07-03 19:42:14 -07004786
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004787 iph->check = 0;
4788 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
Michael Chanb0026622006-07-03 19:42:14 -07004789 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4790 }
Michael Chan5a6f3072006-03-20 22:28:05 -08004791
4792 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4793 TXD_FLAG_CPU_POST_DMA);
4794
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004795 tcp_hdr(skb)->check = 0;
Michael Chan5a6f3072006-03-20 22:28:05 -08004796
Michael Chan5a6f3072006-03-20 22:28:05 -08004797 }
Patrick McHardy84fa7932006-08-29 16:44:56 -07004798 else if (skb->ip_summed == CHECKSUM_PARTIAL)
Michael Chan5a6f3072006-03-20 22:28:05 -08004799 base_flags |= TXD_FLAG_TCPUDP_CSUM;
Michael Chan5a6f3072006-03-20 22:28:05 -08004800#if TG3_VLAN_TAG_USED
4801 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4802 base_flags |= (TXD_FLAG_VLAN |
4803 (vlan_tx_tag_get(skb) << 16));
4804#endif
4805
David S. Miller90079ce2008-09-11 04:52:51 -07004806 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4807 dev_kfree_skb(skb);
4808 goto out_unlock;
4809 }
4810
4811 sp = skb_shinfo(skb);
4812
4813 mapping = sp->dma_maps[0];
Michael Chan5a6f3072006-03-20 22:28:05 -08004814
4815 tp->tx_buffers[entry].skb = skb;
Michael Chan5a6f3072006-03-20 22:28:05 -08004816
4817 tg3_set_txd(tp, entry, mapping, len, base_flags,
4818 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4819
4820 entry = NEXT_TX(entry);
4821
4822 /* Now loop through additional data fragments, and queue them. */
4823 if (skb_shinfo(skb)->nr_frags > 0) {
4824 unsigned int i, last;
4825
4826 last = skb_shinfo(skb)->nr_frags - 1;
4827 for (i = 0; i <= last; i++) {
4828 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4829
4830 len = frag->size;
David S. Miller90079ce2008-09-11 04:52:51 -07004831 mapping = sp->dma_maps[i + 1];
Michael Chan5a6f3072006-03-20 22:28:05 -08004832 tp->tx_buffers[entry].skb = NULL;
Michael Chan5a6f3072006-03-20 22:28:05 -08004833
4834 tg3_set_txd(tp, entry, mapping, len,
4835 base_flags, (i == last) | (mss << 1));
4836
4837 entry = NEXT_TX(entry);
4838 }
4839 }
4840
4841 /* Packets are ready, update Tx producer idx local and on card. */
4842 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4843
4844 tp->tx_prod = entry;
Michael Chan1b2a7202006-08-07 21:46:02 -07004845 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004846 netif_stop_queue(dev);
Ranjit Manomohan42952232006-10-18 20:54:26 -07004847 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
Michael Chan5a6f3072006-03-20 22:28:05 -08004848 netif_wake_queue(tp->dev);
4849 }
4850
4851out_unlock:
4852 mmiowb();
Michael Chan5a6f3072006-03-20 22:28:05 -08004853
4854 dev->trans_start = jiffies;
4855
4856 return NETDEV_TX_OK;
4857}
4858
Michael Chan52c0fd82006-06-29 20:15:54 -07004859static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4860
4861/* Use GSO to workaround a rare TSO bug that may be triggered when the
4862 * TSO header is greater than 80 bytes.
4863 */
4864static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4865{
4866 struct sk_buff *segs, *nskb;
4867
4868 /* Estimate the number of fragments in the worst case */
Michael Chan1b2a7202006-08-07 21:46:02 -07004869 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
Michael Chan52c0fd82006-06-29 20:15:54 -07004870 netif_stop_queue(tp->dev);
Michael Chan7f62ad52007-02-20 23:25:40 -08004871 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4872 return NETDEV_TX_BUSY;
4873
4874 netif_wake_queue(tp->dev);
Michael Chan52c0fd82006-06-29 20:15:54 -07004875 }
4876
4877 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07004878 if (IS_ERR(segs))
Michael Chan52c0fd82006-06-29 20:15:54 -07004879 goto tg3_tso_bug_end;
4880
4881 do {
4882 nskb = segs;
4883 segs = segs->next;
4884 nskb->next = NULL;
4885 tg3_start_xmit_dma_bug(nskb, tp->dev);
4886 } while (segs);
4887
4888tg3_tso_bug_end:
4889 dev_kfree_skb(skb);
4890
4891 return NETDEV_TX_OK;
4892}
Michael Chan52c0fd82006-06-29 20:15:54 -07004893
Michael Chan5a6f3072006-03-20 22:28:05 -08004894/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4895 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4896 */
4897static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4898{
4899 struct tg3 *tp = netdev_priv(dev);
Michael Chan5a6f3072006-03-20 22:28:05 -08004900 u32 len, entry, base_flags, mss;
David S. Miller90079ce2008-09-11 04:52:51 -07004901 struct skb_shared_info *sp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004902 int would_hit_hwbug;
David S. Miller90079ce2008-09-11 04:52:51 -07004903 dma_addr_t mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004904
4905 len = skb_headlen(skb);
4906
Michael Chan00b70502006-06-17 21:58:45 -07004907 /* We are running in BH disabled context with netif_tx_lock
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004908 * and TX reclaim runs via tp->napi.poll inside of a software
David S. Millerf47c11e2005-06-24 20:18:35 -07004909 * interrupt. Furthermore, IRQ processing runs lockless so we have
4910 * no IRQ context deadlocks to worry about either. Rejoice!
Linus Torvalds1da177e2005-04-16 15:20:36 -07004911 */
Michael Chan1b2a7202006-08-07 21:46:02 -07004912 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
Stephen Hemminger1f064a82005-12-06 17:36:44 -08004913 if (!netif_queue_stopped(dev)) {
4914 netif_stop_queue(dev);
4915
4916 /* This is a hard error, log it. */
4917 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4918 "queue awake!\n", dev->name);
4919 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004920 return NETDEV_TX_BUSY;
4921 }
4922
4923 entry = tp->tx_prod;
4924 base_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07004925 if (skb->ip_summed == CHECKSUM_PARTIAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004926 base_flags |= TXD_FLAG_TCPUDP_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004927 mss = 0;
Matt Carlsonc13e3712007-05-05 11:50:04 -07004928 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004929 struct iphdr *iph;
Michael Chan52c0fd82006-06-29 20:15:54 -07004930 int tcp_opt_len, ip_tcp_len, hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004931
4932 if (skb_header_cloned(skb) &&
4933 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4934 dev_kfree_skb(skb);
4935 goto out_unlock;
4936 }
4937
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004938 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -03004939 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004940
Michael Chan52c0fd82006-06-29 20:15:54 -07004941 hdr_len = ip_tcp_len + tcp_opt_len;
4942 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
Michael Chan7f62ad52007-02-20 23:25:40 -08004943 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
Michael Chan52c0fd82006-06-29 20:15:54 -07004944 return (tg3_tso_bug(tp, skb));
4945
Linus Torvalds1da177e2005-04-16 15:20:36 -07004946 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4947 TXD_FLAG_CPU_POST_DMA);
4948
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004949 iph = ip_hdr(skb);
4950 iph->check = 0;
4951 iph->tot_len = htons(mss + hdr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004952 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004953 tcp_hdr(skb)->check = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004954 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004955 } else
4956 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4957 iph->daddr, 0,
4958 IPPROTO_TCP,
4959 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004960
4961 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4962 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004963 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004964 int tsflags;
4965
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004966 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004967 mss |= (tsflags << 11);
4968 }
4969 } else {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004970 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004971 int tsflags;
4972
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004973 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004974 base_flags |= tsflags << 12;
4975 }
4976 }
4977 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004978#if TG3_VLAN_TAG_USED
4979 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4980 base_flags |= (TXD_FLAG_VLAN |
4981 (vlan_tx_tag_get(skb) << 16));
4982#endif
4983
David S. Miller90079ce2008-09-11 04:52:51 -07004984 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4985 dev_kfree_skb(skb);
4986 goto out_unlock;
4987 }
4988
4989 sp = skb_shinfo(skb);
4990
4991 mapping = sp->dma_maps[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004992
4993 tp->tx_buffers[entry].skb = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004994
4995 would_hit_hwbug = 0;
4996
Matt Carlson41588ba2008-04-19 18:12:33 -07004997 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4998 would_hit_hwbug = 1;
4999 else if (tg3_4g_overflow_test(mapping, len))
Michael Chanc58ec932005-09-17 00:46:27 -07005000 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005001
5002 tg3_set_txd(tp, entry, mapping, len, base_flags,
5003 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5004
5005 entry = NEXT_TX(entry);
5006
5007 /* Now loop through additional data fragments, and queue them. */
5008 if (skb_shinfo(skb)->nr_frags > 0) {
5009 unsigned int i, last;
5010
5011 last = skb_shinfo(skb)->nr_frags - 1;
5012 for (i = 0; i <= last; i++) {
5013 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5014
5015 len = frag->size;
David S. Miller90079ce2008-09-11 04:52:51 -07005016 mapping = sp->dma_maps[i + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07005017
5018 tp->tx_buffers[entry].skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005019
Michael Chanc58ec932005-09-17 00:46:27 -07005020 if (tg3_4g_overflow_test(mapping, len))
5021 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005022
Michael Chan72f2afb2006-03-06 19:28:35 -08005023 if (tg3_40bit_overflow_test(tp, mapping, len))
5024 would_hit_hwbug = 1;
5025
Linus Torvalds1da177e2005-04-16 15:20:36 -07005026 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5027 tg3_set_txd(tp, entry, mapping, len,
5028 base_flags, (i == last)|(mss << 1));
5029 else
5030 tg3_set_txd(tp, entry, mapping, len,
5031 base_flags, (i == last));
5032
5033 entry = NEXT_TX(entry);
5034 }
5035 }
5036
5037 if (would_hit_hwbug) {
5038 u32 last_plus_one = entry;
5039 u32 start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005040
Michael Chanc58ec932005-09-17 00:46:27 -07005041 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5042 start &= (TG3_TX_RING_SIZE - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005043
5044 /* If the workaround fails due to memory/mapping
5045 * failure, silently drop this packet.
5046 */
Michael Chan72f2afb2006-03-06 19:28:35 -08005047 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
Michael Chanc58ec932005-09-17 00:46:27 -07005048 &start, base_flags, mss))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005049 goto out_unlock;
5050
5051 entry = start;
5052 }
5053
5054 /* Packets are ready, update Tx producer idx local and on card. */
5055 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5056
5057 tp->tx_prod = entry;
Michael Chan1b2a7202006-08-07 21:46:02 -07005058 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005059 netif_stop_queue(dev);
Ranjit Manomohan42952232006-10-18 20:54:26 -07005060 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
Michael Chan51b91462005-09-01 17:41:28 -07005061 netif_wake_queue(tp->dev);
5062 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005063
5064out_unlock:
5065 mmiowb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005066
5067 dev->trans_start = jiffies;
5068
5069 return NETDEV_TX_OK;
5070}
5071
5072static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5073 int new_mtu)
5074{
5075 dev->mtu = new_mtu;
5076
Michael Chanef7f5ec2005-07-25 12:32:25 -07005077 if (new_mtu > ETH_DATA_LEN) {
Michael Chana4e2b342005-10-26 15:46:52 -07005078 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chanef7f5ec2005-07-25 12:32:25 -07005079 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5080 ethtool_op_set_tso(dev, 0);
5081 }
5082 else
5083 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5084 } else {
Michael Chana4e2b342005-10-26 15:46:52 -07005085 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chanef7f5ec2005-07-25 12:32:25 -07005086 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
Michael Chan0f893dc2005-07-25 12:30:38 -07005087 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
Michael Chanef7f5ec2005-07-25 12:32:25 -07005088 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005089}
5090
5091static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5092{
5093 struct tg3 *tp = netdev_priv(dev);
Michael Chanb9ec6c12006-07-25 16:37:27 -07005094 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005095
5096 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5097 return -EINVAL;
5098
5099 if (!netif_running(dev)) {
5100 /* We'll just catch it later when the
5101 * device is up'd.
5102 */
5103 tg3_set_mtu(dev, tp, new_mtu);
5104 return 0;
5105 }
5106
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07005107 tg3_phy_stop(tp);
5108
Linus Torvalds1da177e2005-04-16 15:20:36 -07005109 tg3_netif_stop(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07005110
5111 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005112
Michael Chan944d9802005-05-29 14:57:48 -07005113 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005114
5115 tg3_set_mtu(dev, tp, new_mtu);
5116
Michael Chanb9ec6c12006-07-25 16:37:27 -07005117 err = tg3_restart_hw(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005118
Michael Chanb9ec6c12006-07-25 16:37:27 -07005119 if (!err)
5120 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005121
David S. Millerf47c11e2005-06-24 20:18:35 -07005122 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005123
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07005124 if (!err)
5125 tg3_phy_start(tp);
5126
Michael Chanb9ec6c12006-07-25 16:37:27 -07005127 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005128}
5129
5130/* Free up pending packets in all rx/tx rings.
5131 *
5132 * The chip has been shut down and the driver detached from
5133 * the networking, so no interrupts or new tx packets will
5134 * end up in the driver. tp->{tx,}lock is not held and we are not
5135 * in an interrupt context and thus may sleep.
5136 */
5137static void tg3_free_rings(struct tg3 *tp)
5138{
5139 struct ring_info *rxp;
5140 int i;
5141
5142 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5143 rxp = &tp->rx_std_buffers[i];
5144
5145 if (rxp->skb == NULL)
5146 continue;
5147 pci_unmap_single(tp->pdev,
5148 pci_unmap_addr(rxp, mapping),
Michael Chan7e72aad2005-07-25 12:31:17 -07005149 tp->rx_pkt_buf_sz - tp->rx_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005150 PCI_DMA_FROMDEVICE);
5151 dev_kfree_skb_any(rxp->skb);
5152 rxp->skb = NULL;
5153 }
5154
5155 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5156 rxp = &tp->rx_jumbo_buffers[i];
5157
5158 if (rxp->skb == NULL)
5159 continue;
5160 pci_unmap_single(tp->pdev,
5161 pci_unmap_addr(rxp, mapping),
5162 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5163 PCI_DMA_FROMDEVICE);
5164 dev_kfree_skb_any(rxp->skb);
5165 rxp->skb = NULL;
5166 }
5167
5168 for (i = 0; i < TG3_TX_RING_SIZE; ) {
5169 struct tx_ring_info *txp;
5170 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005171
5172 txp = &tp->tx_buffers[i];
5173 skb = txp->skb;
5174
5175 if (skb == NULL) {
5176 i++;
5177 continue;
5178 }
5179
David S. Miller90079ce2008-09-11 04:52:51 -07005180 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5181
Linus Torvalds1da177e2005-04-16 15:20:36 -07005182 txp->skb = NULL;
5183
David S. Miller90079ce2008-09-11 04:52:51 -07005184 i += skb_shinfo(skb)->nr_frags + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005185
5186 dev_kfree_skb_any(skb);
5187 }
5188}
5189
5190/* Initialize tx/rx rings for packet processing.
5191 *
5192 * The chip has been shut down and the driver detached from
5193 * the networking, so no interrupts or new tx packets will
5194 * end up in the driver. tp->{tx,}lock are held and thus
5195 * we may not sleep.
5196 */
Michael Chan32d8c572006-07-25 16:38:29 -07005197static int tg3_init_rings(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005198{
5199 u32 i;
5200
5201 /* Free up all the SKBs. */
5202 tg3_free_rings(tp);
5203
5204 /* Zero out all descriptors. */
5205 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5206 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5207 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5208 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5209
Michael Chan7e72aad2005-07-25 12:31:17 -07005210 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
Michael Chana4e2b342005-10-26 15:46:52 -07005211 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
Michael Chan7e72aad2005-07-25 12:31:17 -07005212 (tp->dev->mtu > ETH_DATA_LEN))
5213 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5214
Linus Torvalds1da177e2005-04-16 15:20:36 -07005215 /* Initialize invariants of the rings, we only set this
5216 * stuff once. This works because the card does not
5217 * write into the rx buffer posting rings.
5218 */
5219 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5220 struct tg3_rx_buffer_desc *rxd;
5221
5222 rxd = &tp->rx_std[i];
Michael Chan7e72aad2005-07-25 12:31:17 -07005223 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005224 << RXD_LEN_SHIFT;
5225 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5226 rxd->opaque = (RXD_OPAQUE_RING_STD |
5227 (i << RXD_OPAQUE_INDEX_SHIFT));
5228 }
5229
Michael Chan0f893dc2005-07-25 12:30:38 -07005230 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005231 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5232 struct tg3_rx_buffer_desc *rxd;
5233
5234 rxd = &tp->rx_jumbo[i];
5235 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5236 << RXD_LEN_SHIFT;
5237 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5238 RXD_FLAG_JUMBO;
5239 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5240 (i << RXD_OPAQUE_INDEX_SHIFT));
5241 }
5242 }
5243
5244 /* Now allocate fresh SKBs for each rx ring. */
5245 for (i = 0; i < tp->rx_pending; i++) {
Michael Chan32d8c572006-07-25 16:38:29 -07005246 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5247 printk(KERN_WARNING PFX
5248 "%s: Using a smaller RX standard ring, "
5249 "only %d out of %d buffers were allocated "
5250 "successfully.\n",
5251 tp->dev->name, i, tp->rx_pending);
5252 if (i == 0)
5253 return -ENOMEM;
5254 tp->rx_pending = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005255 break;
Michael Chan32d8c572006-07-25 16:38:29 -07005256 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005257 }
5258
Michael Chan0f893dc2005-07-25 12:30:38 -07005259 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005260 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5261 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
Michael Chan32d8c572006-07-25 16:38:29 -07005262 -1, i) < 0) {
5263 printk(KERN_WARNING PFX
5264 "%s: Using a smaller RX jumbo ring, "
5265 "only %d out of %d buffers were "
5266 "allocated successfully.\n",
5267 tp->dev->name, i, tp->rx_jumbo_pending);
5268 if (i == 0) {
5269 tg3_free_rings(tp);
5270 return -ENOMEM;
5271 }
5272 tp->rx_jumbo_pending = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005273 break;
Michael Chan32d8c572006-07-25 16:38:29 -07005274 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005275 }
5276 }
Michael Chan32d8c572006-07-25 16:38:29 -07005277 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005278}
5279
5280/*
5281 * Must not be invoked with interrupt sources disabled and
5282 * the hardware shutdown down.
5283 */
5284static void tg3_free_consistent(struct tg3 *tp)
5285{
Jesper Juhlb4558ea2005-10-28 16:53:13 -04005286 kfree(tp->rx_std_buffers);
5287 tp->rx_std_buffers = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005288 if (tp->rx_std) {
5289 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5290 tp->rx_std, tp->rx_std_mapping);
5291 tp->rx_std = NULL;
5292 }
5293 if (tp->rx_jumbo) {
5294 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5295 tp->rx_jumbo, tp->rx_jumbo_mapping);
5296 tp->rx_jumbo = NULL;
5297 }
5298 if (tp->rx_rcb) {
5299 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5300 tp->rx_rcb, tp->rx_rcb_mapping);
5301 tp->rx_rcb = NULL;
5302 }
5303 if (tp->tx_ring) {
5304 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5305 tp->tx_ring, tp->tx_desc_mapping);
5306 tp->tx_ring = NULL;
5307 }
5308 if (tp->hw_status) {
5309 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5310 tp->hw_status, tp->status_mapping);
5311 tp->hw_status = NULL;
5312 }
5313 if (tp->hw_stats) {
5314 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5315 tp->hw_stats, tp->stats_mapping);
5316 tp->hw_stats = NULL;
5317 }
5318}
5319
5320/*
5321 * Must not be invoked with interrupt sources disabled and
5322 * the hardware shutdown down. Can sleep.
5323 */
5324static int tg3_alloc_consistent(struct tg3 *tp)
5325{
Yan Burmanbd2b3342006-12-14 15:25:00 -08005326 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005327 (TG3_RX_RING_SIZE +
5328 TG3_RX_JUMBO_RING_SIZE)) +
5329 (sizeof(struct tx_ring_info) *
5330 TG3_TX_RING_SIZE),
5331 GFP_KERNEL);
5332 if (!tp->rx_std_buffers)
5333 return -ENOMEM;
5334
Linus Torvalds1da177e2005-04-16 15:20:36 -07005335 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5336 tp->tx_buffers = (struct tx_ring_info *)
5337 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5338
5339 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5340 &tp->rx_std_mapping);
5341 if (!tp->rx_std)
5342 goto err_out;
5343
5344 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5345 &tp->rx_jumbo_mapping);
5346
5347 if (!tp->rx_jumbo)
5348 goto err_out;
5349
5350 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5351 &tp->rx_rcb_mapping);
5352 if (!tp->rx_rcb)
5353 goto err_out;
5354
5355 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5356 &tp->tx_desc_mapping);
5357 if (!tp->tx_ring)
5358 goto err_out;
5359
5360 tp->hw_status = pci_alloc_consistent(tp->pdev,
5361 TG3_HW_STATUS_SIZE,
5362 &tp->status_mapping);
5363 if (!tp->hw_status)
5364 goto err_out;
5365
5366 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5367 sizeof(struct tg3_hw_stats),
5368 &tp->stats_mapping);
5369 if (!tp->hw_stats)
5370 goto err_out;
5371
5372 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5373 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5374
5375 return 0;
5376
5377err_out:
5378 tg3_free_consistent(tp);
5379 return -ENOMEM;
5380}
5381
5382#define MAX_WAIT_CNT 1000
5383
5384/* To stop a block, clear the enable bit and poll till it
5385 * clears. tp->lock is held.
5386 */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005387static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005388{
5389 unsigned int i;
5390 u32 val;
5391
5392 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5393 switch (ofs) {
5394 case RCVLSC_MODE:
5395 case DMAC_MODE:
5396 case MBFREE_MODE:
5397 case BUFMGR_MODE:
5398 case MEMARB_MODE:
5399 /* We can't enable/disable these bits of the
5400 * 5705/5750, just say success.
5401 */
5402 return 0;
5403
5404 default:
5405 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005406 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005407 }
5408
5409 val = tr32(ofs);
5410 val &= ~enable_bit;
5411 tw32_f(ofs, val);
5412
5413 for (i = 0; i < MAX_WAIT_CNT; i++) {
5414 udelay(100);
5415 val = tr32(ofs);
5416 if ((val & enable_bit) == 0)
5417 break;
5418 }
5419
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005420 if (i == MAX_WAIT_CNT && !silent) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005421 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5422 "ofs=%lx enable_bit=%x\n",
5423 ofs, enable_bit);
5424 return -ENODEV;
5425 }
5426
5427 return 0;
5428}
5429
5430/* tp->lock is held. */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005431static int tg3_abort_hw(struct tg3 *tp, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005432{
5433 int i, err;
5434
5435 tg3_disable_ints(tp);
5436
5437 tp->rx_mode &= ~RX_MODE_ENABLE;
5438 tw32_f(MAC_RX_MODE, tp->rx_mode);
5439 udelay(10);
5440
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005441 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5442 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5443 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5444 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5445 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5446 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005447
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005448 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5449 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5450 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5451 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5452 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5453 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5454 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005455
5456 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5457 tw32_f(MAC_MODE, tp->mac_mode);
5458 udelay(40);
5459
5460 tp->tx_mode &= ~TX_MODE_ENABLE;
5461 tw32_f(MAC_TX_MODE, tp->tx_mode);
5462
5463 for (i = 0; i < MAX_WAIT_CNT; i++) {
5464 udelay(100);
5465 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5466 break;
5467 }
5468 if (i >= MAX_WAIT_CNT) {
5469 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5470 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5471 tp->dev->name, tr32(MAC_TX_MODE));
Michael Chane6de8ad2005-05-05 14:42:41 -07005472 err |= -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005473 }
5474
Michael Chane6de8ad2005-05-05 14:42:41 -07005475 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005476 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5477 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005478
5479 tw32(FTQ_RESET, 0xffffffff);
5480 tw32(FTQ_RESET, 0x00000000);
5481
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005482 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5483 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005484
5485 if (tp->hw_status)
5486 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5487 if (tp->hw_stats)
5488 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5489
Linus Torvalds1da177e2005-04-16 15:20:36 -07005490 return err;
5491}
5492
5493/* tp->lock is held. */
5494static int tg3_nvram_lock(struct tg3 *tp)
5495{
5496 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5497 int i;
5498
Michael Chanec41c7d2006-01-17 02:40:55 -08005499 if (tp->nvram_lock_cnt == 0) {
5500 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5501 for (i = 0; i < 8000; i++) {
5502 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5503 break;
5504 udelay(20);
5505 }
5506 if (i == 8000) {
5507 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5508 return -ENODEV;
5509 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005510 }
Michael Chanec41c7d2006-01-17 02:40:55 -08005511 tp->nvram_lock_cnt++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005512 }
5513 return 0;
5514}
5515
5516/* tp->lock is held. */
5517static void tg3_nvram_unlock(struct tg3 *tp)
5518{
Michael Chanec41c7d2006-01-17 02:40:55 -08005519 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5520 if (tp->nvram_lock_cnt > 0)
5521 tp->nvram_lock_cnt--;
5522 if (tp->nvram_lock_cnt == 0)
5523 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5524 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005525}
5526
5527/* tp->lock is held. */
Michael Chane6af3012005-04-21 17:12:05 -07005528static void tg3_enable_nvram_access(struct tg3 *tp)
5529{
5530 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5531 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5532 u32 nvaccess = tr32(NVRAM_ACCESS);
5533
5534 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5535 }
5536}
5537
5538/* tp->lock is held. */
5539static void tg3_disable_nvram_access(struct tg3 *tp)
5540{
5541 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5542 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5543 u32 nvaccess = tr32(NVRAM_ACCESS);
5544
5545 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5546 }
5547}
5548
Matt Carlson0d3031d2007-10-10 18:02:43 -07005549static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5550{
5551 int i;
5552 u32 apedata;
5553
5554 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5555 if (apedata != APE_SEG_SIG_MAGIC)
5556 return;
5557
5558 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
Matt Carlson731fd792008-08-15 14:07:51 -07005559 if (!(apedata & APE_FW_STATUS_READY))
Matt Carlson0d3031d2007-10-10 18:02:43 -07005560 return;
5561
5562 /* Wait for up to 1 millisecond for APE to service previous event. */
5563 for (i = 0; i < 10; i++) {
5564 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5565 return;
5566
5567 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5568
5569 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5570 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5571 event | APE_EVENT_STATUS_EVENT_PENDING);
5572
5573 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5574
5575 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5576 break;
5577
5578 udelay(100);
5579 }
5580
5581 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5582 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5583}
5584
5585static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5586{
5587 u32 event;
5588 u32 apedata;
5589
5590 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5591 return;
5592
5593 switch (kind) {
5594 case RESET_KIND_INIT:
5595 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5596 APE_HOST_SEG_SIG_MAGIC);
5597 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5598 APE_HOST_SEG_LEN_MAGIC);
5599 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5600 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5601 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5602 APE_HOST_DRIVER_ID_MAGIC);
5603 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5604 APE_HOST_BEHAV_NO_PHYLOCK);
5605
5606 event = APE_EVENT_STATUS_STATE_START;
5607 break;
5608 case RESET_KIND_SHUTDOWN:
Matt Carlsonb2aee152008-11-03 16:51:11 -08005609 /* With the interface we are currently using,
5610 * APE does not track driver state. Wiping
5611 * out the HOST SEGMENT SIGNATURE forces
5612 * the APE to assume OS absent status.
5613 */
5614 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
5615
Matt Carlson0d3031d2007-10-10 18:02:43 -07005616 event = APE_EVENT_STATUS_STATE_UNLOAD;
5617 break;
5618 case RESET_KIND_SUSPEND:
5619 event = APE_EVENT_STATUS_STATE_SUSPEND;
5620 break;
5621 default:
5622 return;
5623 }
5624
5625 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5626
5627 tg3_ape_send_event(tp, event);
5628}
5629
Michael Chane6af3012005-04-21 17:12:05 -07005630/* tp->lock is held. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005631static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5632{
David S. Millerf49639e2006-06-09 11:58:36 -07005633 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5634 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005635
5636 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5637 switch (kind) {
5638 case RESET_KIND_INIT:
5639 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5640 DRV_STATE_START);
5641 break;
5642
5643 case RESET_KIND_SHUTDOWN:
5644 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5645 DRV_STATE_UNLOAD);
5646 break;
5647
5648 case RESET_KIND_SUSPEND:
5649 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5650 DRV_STATE_SUSPEND);
5651 break;
5652
5653 default:
5654 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005655 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005656 }
Matt Carlson0d3031d2007-10-10 18:02:43 -07005657
5658 if (kind == RESET_KIND_INIT ||
5659 kind == RESET_KIND_SUSPEND)
5660 tg3_ape_driver_state_change(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005661}
5662
5663/* tp->lock is held. */
5664static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5665{
5666 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5667 switch (kind) {
5668 case RESET_KIND_INIT:
5669 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5670 DRV_STATE_START_DONE);
5671 break;
5672
5673 case RESET_KIND_SHUTDOWN:
5674 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5675 DRV_STATE_UNLOAD_DONE);
5676 break;
5677
5678 default:
5679 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005680 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005681 }
Matt Carlson0d3031d2007-10-10 18:02:43 -07005682
5683 if (kind == RESET_KIND_SHUTDOWN)
5684 tg3_ape_driver_state_change(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005685}
5686
5687/* tp->lock is held. */
5688static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5689{
5690 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5691 switch (kind) {
5692 case RESET_KIND_INIT:
5693 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5694 DRV_STATE_START);
5695 break;
5696
5697 case RESET_KIND_SHUTDOWN:
5698 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5699 DRV_STATE_UNLOAD);
5700 break;
5701
5702 case RESET_KIND_SUSPEND:
5703 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5704 DRV_STATE_SUSPEND);
5705 break;
5706
5707 default:
5708 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005709 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005710 }
5711}
5712
Michael Chan7a6f4362006-09-27 16:03:31 -07005713static int tg3_poll_fw(struct tg3 *tp)
5714{
5715 int i;
5716 u32 val;
5717
Michael Chanb5d37722006-09-27 16:06:21 -07005718 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Gary Zambrano0ccead12006-11-14 16:34:00 -08005719 /* Wait up to 20ms for init done. */
5720 for (i = 0; i < 200; i++) {
Michael Chanb5d37722006-09-27 16:06:21 -07005721 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5722 return 0;
Gary Zambrano0ccead12006-11-14 16:34:00 -08005723 udelay(100);
Michael Chanb5d37722006-09-27 16:06:21 -07005724 }
5725 return -ENODEV;
5726 }
5727
Michael Chan7a6f4362006-09-27 16:03:31 -07005728 /* Wait for firmware initialization to complete. */
5729 for (i = 0; i < 100000; i++) {
5730 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5731 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5732 break;
5733 udelay(10);
5734 }
5735
5736 /* Chip might not be fitted with firmware. Some Sun onboard
5737 * parts are configured like that. So don't signal the timeout
5738 * of the above loop as an error, but do report the lack of
5739 * running firmware once.
5740 */
5741 if (i >= 100000 &&
5742 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5743 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5744
5745 printk(KERN_INFO PFX "%s: No firmware running.\n",
5746 tp->dev->name);
5747 }
5748
5749 return 0;
5750}
5751
Michael Chanee6a99b2007-07-18 21:49:10 -07005752/* Save PCI command register before chip reset */
5753static void tg3_save_pci_state(struct tg3 *tp)
5754{
Matt Carlson8a6eac92007-10-21 16:17:55 -07005755 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07005756}
5757
5758/* Restore PCI state after chip reset */
5759static void tg3_restore_pci_state(struct tg3 *tp)
5760{
5761 u32 val;
5762
5763 /* Re-enable indirect register accesses. */
5764 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5765 tp->misc_host_ctrl);
5766
5767 /* Set MAX PCI retry to zero. */
5768 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5769 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5770 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5771 val |= PCISTATE_RETRY_SAME_DMA;
Matt Carlson0d3031d2007-10-10 18:02:43 -07005772 /* Allow reads and writes to the APE register and memory space. */
5773 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5774 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5775 PCISTATE_ALLOW_APE_SHMEM_WR;
Michael Chanee6a99b2007-07-18 21:49:10 -07005776 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5777
Matt Carlson8a6eac92007-10-21 16:17:55 -07005778 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07005779
Matt Carlson5f5c51e2007-11-12 21:19:37 -08005780 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5781 pcie_set_readrq(tp->pdev, 4096);
5782 else {
Michael Chan114342f2007-10-15 02:12:26 -07005783 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5784 tp->pci_cacheline_sz);
5785 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5786 tp->pci_lat_timer);
5787 }
Matt Carlson5f5c51e2007-11-12 21:19:37 -08005788
Michael Chanee6a99b2007-07-18 21:49:10 -07005789 /* Make sure PCI-X relaxed ordering bit is clear. */
Matt Carlson9974a352007-10-07 23:27:28 -07005790 if (tp->pcix_cap) {
5791 u16 pcix_cmd;
5792
5793 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5794 &pcix_cmd);
5795 pcix_cmd &= ~PCI_X_CMD_ERO;
5796 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5797 pcix_cmd);
5798 }
Michael Chanee6a99b2007-07-18 21:49:10 -07005799
5800 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chanee6a99b2007-07-18 21:49:10 -07005801
5802 /* Chip reset on 5780 will reset MSI enable bit,
5803 * so need to restore it.
5804 */
5805 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5806 u16 ctrl;
5807
5808 pci_read_config_word(tp->pdev,
5809 tp->msi_cap + PCI_MSI_FLAGS,
5810 &ctrl);
5811 pci_write_config_word(tp->pdev,
5812 tp->msi_cap + PCI_MSI_FLAGS,
5813 ctrl | PCI_MSI_FLAGS_ENABLE);
5814 val = tr32(MSGINT_MODE);
5815 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5816 }
5817 }
5818}
5819
Linus Torvalds1da177e2005-04-16 15:20:36 -07005820static void tg3_stop_fw(struct tg3 *);
5821
5822/* tp->lock is held. */
5823static int tg3_chip_reset(struct tg3 *tp)
5824{
5825 u32 val;
Michael Chan1ee582d2005-08-09 20:16:46 -07005826 void (*write_op)(struct tg3 *, u32, u32);
Michael Chan7a6f4362006-09-27 16:03:31 -07005827 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005828
David S. Millerf49639e2006-06-09 11:58:36 -07005829 tg3_nvram_lock(tp);
5830
Matt Carlson158d7ab2008-05-29 01:37:54 -07005831 tg3_mdio_stop(tp);
5832
Matt Carlson77b483f2008-08-15 14:07:24 -07005833 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5834
David S. Millerf49639e2006-06-09 11:58:36 -07005835 /* No matching tg3_nvram_unlock() after this because
5836 * chip reset below will undo the nvram lock.
5837 */
5838 tp->nvram_lock_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005839
Michael Chanee6a99b2007-07-18 21:49:10 -07005840 /* GRC_MISC_CFG core clock reset will clear the memory
5841 * enable bit in PCI register 4 and the MSI enable bit
5842 * on some chips, so we save relevant registers here.
5843 */
5844 tg3_save_pci_state(tp);
5845
Michael Chand9ab5ad2006-03-20 22:27:35 -08005846 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -08005847 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07005848 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07005849 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -07005850 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5851 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chand9ab5ad2006-03-20 22:27:35 -08005852 tw32(GRC_FASTBOOT_PC, 0);
5853
Linus Torvalds1da177e2005-04-16 15:20:36 -07005854 /*
5855 * We must avoid the readl() that normally takes place.
5856 * It locks machines, causes machine checks, and other
5857 * fun things. So, temporarily disable the 5701
5858 * hardware workaround, while we do the reset.
5859 */
Michael Chan1ee582d2005-08-09 20:16:46 -07005860 write_op = tp->write32;
5861 if (write_op == tg3_write_flush_reg32)
5862 tp->write32 = tg3_write32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005863
Michael Chand18edcb2007-03-24 20:57:11 -07005864 /* Prevent the irq handler from reading or writing PCI registers
5865 * during chip reset when the memory enable bit in the PCI command
5866 * register may be cleared. The chip does not generate interrupt
5867 * at this time, but the irq handler may still be called due to irq
5868 * sharing or irqpoll.
5869 */
5870 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
Michael Chanb8fa2f32007-04-06 17:35:37 -07005871 if (tp->hw_status) {
5872 tp->hw_status->status = 0;
5873 tp->hw_status->status_tag = 0;
5874 }
Michael Chand18edcb2007-03-24 20:57:11 -07005875 tp->last_tag = 0;
5876 smp_mb();
5877 synchronize_irq(tp->pdev->irq);
5878
Linus Torvalds1da177e2005-04-16 15:20:36 -07005879 /* do the reset */
5880 val = GRC_MISC_CFG_CORECLK_RESET;
5881
5882 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5883 if (tr32(0x7e2c) == 0x60) {
5884 tw32(0x7e2c, 0x20);
5885 }
5886 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5887 tw32(GRC_MISC_CFG, (1 << 29));
5888 val |= (1 << 29);
5889 }
5890 }
5891
Michael Chanb5d37722006-09-27 16:06:21 -07005892 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5893 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5894 tw32(GRC_VCPU_EXT_CTRL,
5895 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5896 }
5897
Linus Torvalds1da177e2005-04-16 15:20:36 -07005898 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5899 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5900 tw32(GRC_MISC_CFG, val);
5901
Michael Chan1ee582d2005-08-09 20:16:46 -07005902 /* restore 5701 hardware bug workaround write method */
5903 tp->write32 = write_op;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005904
5905 /* Unfortunately, we have to delay before the PCI read back.
5906 * Some 575X chips even will not respond to a PCI cfg access
5907 * when the reset command is given to the chip.
5908 *
5909 * How do these hardware designers expect things to work
5910 * properly if the PCI write is posted for a long period
5911 * of time? It is always necessary to have some method by
5912 * which a register read back can occur to push the write
5913 * out which does the reset.
5914 *
5915 * For most tg3 variants the trick below was working.
5916 * Ho hum...
5917 */
5918 udelay(120);
5919
5920 /* Flush PCI posted writes. The normal MMIO registers
5921 * are inaccessible at this time so this is the only
5922 * way to make this reliably (actually, this is no longer
5923 * the case, see above). I tried to use indirect
5924 * register read/write but this upset some 5701 variants.
5925 */
5926 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5927
5928 udelay(120);
5929
5930 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5931 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5932 int i;
5933 u32 cfg_val;
5934
5935 /* Wait for link training to complete. */
5936 for (i = 0; i < 5000; i++)
5937 udelay(100);
5938
5939 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5940 pci_write_config_dword(tp->pdev, 0xc4,
5941 cfg_val | (1 << 15));
5942 }
5943 /* Set PCIE max payload size and clear error status. */
5944 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5945 }
5946
Michael Chanee6a99b2007-07-18 21:49:10 -07005947 tg3_restore_pci_state(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005948
Michael Chand18edcb2007-03-24 20:57:11 -07005949 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5950
Michael Chanee6a99b2007-07-18 21:49:10 -07005951 val = 0;
5952 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan4cf78e42005-07-25 12:29:19 -07005953 val = tr32(MEMARB_MODE);
Michael Chanee6a99b2007-07-18 21:49:10 -07005954 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005955
5956 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5957 tg3_stop_fw(tp);
5958 tw32(0x5000, 0x400);
5959 }
5960
5961 tw32(GRC_MODE, tp->grc_mode);
5962
5963 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01005964 val = tr32(0xc4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005965
5966 tw32(0xc4, val | (1 << 15));
5967 }
5968
5969 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5970 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5971 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5972 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5973 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5974 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5975 }
5976
5977 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5978 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5979 tw32_f(MAC_MODE, tp->mac_mode);
Michael Chan747e8f82005-07-25 12:33:22 -07005980 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5981 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5982 tw32_f(MAC_MODE, tp->mac_mode);
Matt Carlson3bda1252008-08-15 14:08:22 -07005983 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
5984 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
5985 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
5986 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
5987 tw32_f(MAC_MODE, tp->mac_mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005988 } else
5989 tw32_f(MAC_MODE, 0);
5990 udelay(40);
5991
Matt Carlson158d7ab2008-05-29 01:37:54 -07005992 tg3_mdio_start(tp);
5993
Matt Carlson77b483f2008-08-15 14:07:24 -07005994 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
5995
Michael Chan7a6f4362006-09-27 16:03:31 -07005996 err = tg3_poll_fw(tp);
5997 if (err)
5998 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005999
6000 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6001 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01006002 val = tr32(0x7c00);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006003
6004 tw32(0x7c00, val | (1 << 25));
6005 }
6006
6007 /* Reprobe ASF enable state. */
6008 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6009 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6010 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6011 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6012 u32 nic_cfg;
6013
6014 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6015 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6016 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
Matt Carlson4ba526c2008-08-15 14:10:04 -07006017 tp->last_event_jiffies = jiffies;
John W. Linvillecbf46852005-04-21 17:01:29 -07006018 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006019 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6020 }
6021 }
6022
6023 return 0;
6024}
6025
6026/* tp->lock is held. */
6027static void tg3_stop_fw(struct tg3 *tp)
6028{
Matt Carlson0d3031d2007-10-10 18:02:43 -07006029 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6030 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Matt Carlson7c5026a2008-05-02 16:49:29 -07006031 /* Wait for RX cpu to ACK the previous event. */
6032 tg3_wait_for_event_ack(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006033
6034 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
Matt Carlson4ba526c2008-08-15 14:10:04 -07006035
6036 tg3_generate_fw_event(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006037
Matt Carlson7c5026a2008-05-02 16:49:29 -07006038 /* Wait for RX cpu to ACK this event. */
6039 tg3_wait_for_event_ack(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006040 }
6041}
6042
6043/* tp->lock is held. */
Michael Chan944d9802005-05-29 14:57:48 -07006044static int tg3_halt(struct tg3 *tp, int kind, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006045{
6046 int err;
6047
6048 tg3_stop_fw(tp);
6049
Michael Chan944d9802005-05-29 14:57:48 -07006050 tg3_write_sig_pre_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006051
David S. Millerb3b7d6b2005-05-05 14:40:20 -07006052 tg3_abort_hw(tp, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006053 err = tg3_chip_reset(tp);
6054
Michael Chan944d9802005-05-29 14:57:48 -07006055 tg3_write_sig_legacy(tp, kind);
6056 tg3_write_sig_post_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006057
6058 if (err)
6059 return err;
6060
6061 return 0;
6062}
6063
6064#define TG3_FW_RELEASE_MAJOR 0x0
6065#define TG3_FW_RELASE_MINOR 0x0
6066#define TG3_FW_RELEASE_FIX 0x0
6067#define TG3_FW_START_ADDR 0x08000000
6068#define TG3_FW_TEXT_ADDR 0x08000000
6069#define TG3_FW_TEXT_LEN 0x9c0
6070#define TG3_FW_RODATA_ADDR 0x080009c0
6071#define TG3_FW_RODATA_LEN 0x60
6072#define TG3_FW_DATA_ADDR 0x08000a40
6073#define TG3_FW_DATA_LEN 0x20
6074#define TG3_FW_SBSS_ADDR 0x08000a60
6075#define TG3_FW_SBSS_LEN 0xc
6076#define TG3_FW_BSS_ADDR 0x08000a70
6077#define TG3_FW_BSS_LEN 0x10
6078
Andreas Mohr50da8592006-08-14 23:54:30 -07006079static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006080 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6081 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6082 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6083 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6084 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6085 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6086 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6087 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6088 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6089 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6090 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6091 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6092 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6093 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6094 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6095 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6096 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6097 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6098 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6099 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6100 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6101 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6102 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6105 0, 0, 0, 0, 0, 0,
6106 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6107 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6108 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6109 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6110 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6111 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6112 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6113 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6114 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6115 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6116 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6117 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6118 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6119 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6120 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6121 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6122 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6123 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6124 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6125 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6126 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6127 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6128 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6129 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6130 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6131 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6132 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6133 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6134 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6135 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6136 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6137 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6138 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6139 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6140 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6141 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6142 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6143 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6144 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6145 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6146 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6147 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6148 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6149 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6150 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6151 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6152 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6153 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6154 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6155 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6156 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6157 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6158 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6159 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6160 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6161 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6162 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6163 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6164 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6165 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6166 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6167 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6168 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6169 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6170 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6171};
6172
Andreas Mohr50da8592006-08-14 23:54:30 -07006173static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006174 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6175 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6176 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6177 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6178 0x00000000
6179};
6180
6181#if 0 /* All zeros, don't eat up space with it. */
6182u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6183 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6184 0x00000000, 0x00000000, 0x00000000, 0x00000000
6185};
6186#endif
6187
6188#define RX_CPU_SCRATCH_BASE 0x30000
6189#define RX_CPU_SCRATCH_SIZE 0x04000
6190#define TX_CPU_SCRATCH_BASE 0x34000
6191#define TX_CPU_SCRATCH_SIZE 0x04000
6192
6193/* tp->lock is held. */
6194static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6195{
6196 int i;
6197
Eric Sesterhenn5d9428d2006-04-02 13:52:48 +02006198 BUG_ON(offset == TX_CPU_BASE &&
6199 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006200
Michael Chanb5d37722006-09-27 16:06:21 -07006201 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6202 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6203
6204 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6205 return 0;
6206 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006207 if (offset == RX_CPU_BASE) {
6208 for (i = 0; i < 10000; i++) {
6209 tw32(offset + CPU_STATE, 0xffffffff);
6210 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6211 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6212 break;
6213 }
6214
6215 tw32(offset + CPU_STATE, 0xffffffff);
6216 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
6217 udelay(10);
6218 } else {
6219 for (i = 0; i < 10000; i++) {
6220 tw32(offset + CPU_STATE, 0xffffffff);
6221 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6222 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6223 break;
6224 }
6225 }
6226
6227 if (i >= 10000) {
6228 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6229 "and %s CPU\n",
6230 tp->dev->name,
6231 (offset == RX_CPU_BASE ? "RX" : "TX"));
6232 return -ENODEV;
6233 }
Michael Chanec41c7d2006-01-17 02:40:55 -08006234
6235 /* Clear firmware's nvram arbitration. */
6236 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6237 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006238 return 0;
6239}
6240
6241struct fw_info {
6242 unsigned int text_base;
6243 unsigned int text_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006244 const u32 *text_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006245 unsigned int rodata_base;
6246 unsigned int rodata_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006247 const u32 *rodata_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006248 unsigned int data_base;
6249 unsigned int data_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006250 const u32 *data_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006251};
6252
6253/* tp->lock is held. */
6254static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6255 int cpu_scratch_size, struct fw_info *info)
6256{
Michael Chanec41c7d2006-01-17 02:40:55 -08006257 int err, lock_err, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006258 void (*write_op)(struct tg3 *, u32, u32);
6259
6260 if (cpu_base == TX_CPU_BASE &&
6261 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6262 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6263 "TX cpu firmware on %s which is 5705.\n",
6264 tp->dev->name);
6265 return -EINVAL;
6266 }
6267
6268 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6269 write_op = tg3_write_mem;
6270 else
6271 write_op = tg3_write_indirect_reg32;
6272
Michael Chan1b628152005-05-29 14:59:49 -07006273 /* It is possible that bootcode is still loading at this point.
6274 * Get the nvram lock first before halting the cpu.
6275 */
Michael Chanec41c7d2006-01-17 02:40:55 -08006276 lock_err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006277 err = tg3_halt_cpu(tp, cpu_base);
Michael Chanec41c7d2006-01-17 02:40:55 -08006278 if (!lock_err)
6279 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006280 if (err)
6281 goto out;
6282
6283 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6284 write_op(tp, cpu_scratch_base + i, 0);
6285 tw32(cpu_base + CPU_STATE, 0xffffffff);
6286 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6287 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6288 write_op(tp, (cpu_scratch_base +
6289 (info->text_base & 0xffff) +
6290 (i * sizeof(u32))),
6291 (info->text_data ?
6292 info->text_data[i] : 0));
6293 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6294 write_op(tp, (cpu_scratch_base +
6295 (info->rodata_base & 0xffff) +
6296 (i * sizeof(u32))),
6297 (info->rodata_data ?
6298 info->rodata_data[i] : 0));
6299 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6300 write_op(tp, (cpu_scratch_base +
6301 (info->data_base & 0xffff) +
6302 (i * sizeof(u32))),
6303 (info->data_data ?
6304 info->data_data[i] : 0));
6305
6306 err = 0;
6307
6308out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006309 return err;
6310}
6311
6312/* tp->lock is held. */
6313static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6314{
6315 struct fw_info info;
6316 int err, i;
6317
6318 info.text_base = TG3_FW_TEXT_ADDR;
6319 info.text_len = TG3_FW_TEXT_LEN;
6320 info.text_data = &tg3FwText[0];
6321 info.rodata_base = TG3_FW_RODATA_ADDR;
6322 info.rodata_len = TG3_FW_RODATA_LEN;
6323 info.rodata_data = &tg3FwRodata[0];
6324 info.data_base = TG3_FW_DATA_ADDR;
6325 info.data_len = TG3_FW_DATA_LEN;
6326 info.data_data = NULL;
6327
6328 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6329 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6330 &info);
6331 if (err)
6332 return err;
6333
6334 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6335 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6336 &info);
6337 if (err)
6338 return err;
6339
6340 /* Now startup only the RX cpu. */
6341 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6342 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6343
6344 for (i = 0; i < 5; i++) {
6345 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6346 break;
6347 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6348 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
6349 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6350 udelay(1000);
6351 }
6352 if (i >= 5) {
6353 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6354 "to set RX CPU PC, is %08x should be %08x\n",
6355 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6356 TG3_FW_TEXT_ADDR);
6357 return -ENODEV;
6358 }
6359 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6360 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
6361
6362 return 0;
6363}
6364
Linus Torvalds1da177e2005-04-16 15:20:36 -07006365
6366#define TG3_TSO_FW_RELEASE_MAJOR 0x1
6367#define TG3_TSO_FW_RELASE_MINOR 0x6
6368#define TG3_TSO_FW_RELEASE_FIX 0x0
6369#define TG3_TSO_FW_START_ADDR 0x08000000
6370#define TG3_TSO_FW_TEXT_ADDR 0x08000000
6371#define TG3_TSO_FW_TEXT_LEN 0x1aa0
6372#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
6373#define TG3_TSO_FW_RODATA_LEN 0x60
6374#define TG3_TSO_FW_DATA_ADDR 0x08001b20
6375#define TG3_TSO_FW_DATA_LEN 0x30
6376#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
6377#define TG3_TSO_FW_SBSS_LEN 0x2c
6378#define TG3_TSO_FW_BSS_ADDR 0x08001b80
6379#define TG3_TSO_FW_BSS_LEN 0x894
6380
Andreas Mohr50da8592006-08-14 23:54:30 -07006381static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006382 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6383 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6384 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6385 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6386 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6387 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6388 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6389 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6390 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6391 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6392 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6393 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6394 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6395 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6396 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6397 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6398 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6399 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6400 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6401 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6402 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6403 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6404 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6405 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6406 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6407 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6408 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6409 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6410 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6411 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6412 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6413 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6414 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6415 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6416 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6417 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6418 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6419 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6420 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6421 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6422 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6423 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6424 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6425 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6426 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6427 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6428 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6429 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6430 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6431 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6432 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6433 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6434 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6435 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6436 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6437 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6438 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6439 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6440 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6441 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6442 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6443 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6444 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6445 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6446 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6447 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6448 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6449 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6450 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6451 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6452 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6453 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6454 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6455 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6456 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6457 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6458 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6459 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6460 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6461 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6462 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6463 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6464 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6465 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6466 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6467 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6468 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6469 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6470 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6471 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6472 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6473 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6474 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6475 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6476 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6477 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6478 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6479 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6480 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6481 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6482 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6483 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6484 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6485 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6486 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6487 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6488 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6489 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6490 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6491 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6492 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6493 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6494 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6495 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6496 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6497 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6498 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6499 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6500 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6501 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6502 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6503 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6504 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6505 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6506 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6507 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6508 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6509 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6510 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6511 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6512 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6513 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6514 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6515 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6516 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6517 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6518 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6519 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6520 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6521 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6522 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6523 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6524 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6525 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6526 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6527 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6528 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6529 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6530 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6531 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6532 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6533 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6534 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6535 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6536 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6537 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6538 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6539 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6540 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6541 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6542 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6543 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6544 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6545 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6546 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6547 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6548 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6549 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6550 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6551 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6552 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6553 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6554 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6555 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6556 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6557 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6558 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6559 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6560 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6561 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6562 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6563 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6564 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6565 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6566 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6567 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6568 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6569 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6570 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6571 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6572 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6573 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6574 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6575 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6576 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6577 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6578 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6579 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6580 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6581 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6582 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6583 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6584 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6585 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6586 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6587 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6588 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6589 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6590 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6591 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6592 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6593 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6594 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6595 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6596 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6597 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6598 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6599 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6600 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6601 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6602 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6603 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6604 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6605 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6606 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6607 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6608 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6609 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6610 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6611 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6612 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6613 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6614 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6615 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6616 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6617 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6618 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6619 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6620 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6621 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6622 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6623 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6624 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6625 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6626 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6627 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6628 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6629 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6630 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6631 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6632 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6633 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6634 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6635 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6636 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6637 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6638 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6639 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6640 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6641 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6642 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6643 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6644 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6645 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6646 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6647 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6648 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6649 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6650 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6651 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6652 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6653 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6654 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6655 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6656 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6657 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6658 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6659 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6660 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6661 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6662 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6663 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6664 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6665 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6666};
6667
Andreas Mohr50da8592006-08-14 23:54:30 -07006668static const u32 tg3TsoFwRodata[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006669 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6670 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6671 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6672 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6673 0x00000000,
6674};
6675
Andreas Mohr50da8592006-08-14 23:54:30 -07006676static const u32 tg3TsoFwData[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006677 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6678 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6679 0x00000000,
6680};
6681
6682/* 5705 needs a special version of the TSO firmware. */
6683#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
6684#define TG3_TSO5_FW_RELASE_MINOR 0x2
6685#define TG3_TSO5_FW_RELEASE_FIX 0x0
6686#define TG3_TSO5_FW_START_ADDR 0x00010000
6687#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
6688#define TG3_TSO5_FW_TEXT_LEN 0xe90
6689#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
6690#define TG3_TSO5_FW_RODATA_LEN 0x50
6691#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
6692#define TG3_TSO5_FW_DATA_LEN 0x20
6693#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
6694#define TG3_TSO5_FW_SBSS_LEN 0x28
6695#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
6696#define TG3_TSO5_FW_BSS_LEN 0x88
6697
Andreas Mohr50da8592006-08-14 23:54:30 -07006698static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006699 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6700 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6701 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6702 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6703 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6704 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6705 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6706 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6707 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6708 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6709 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6710 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6711 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6712 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6713 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6714 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6715 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6716 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6717 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6718 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6719 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6720 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6721 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6722 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6723 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6724 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6725 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6726 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6727 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6728 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6729 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6730 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6731 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6732 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6733 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6734 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6735 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6736 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6737 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6738 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6739 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6740 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6741 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6742 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6743 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6744 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6745 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6746 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6747 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6748 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6749 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6750 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6751 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6752 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6753 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6754 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6755 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6756 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6757 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6758 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6759 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6760 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6761 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6762 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6763 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6764 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6765 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6766 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6767 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6768 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6769 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6770 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6771 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6772 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6773 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6774 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6775 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6776 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6777 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6778 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6779 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6780 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6781 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6782 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6783 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6784 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6785 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6786 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6787 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6788 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6789 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6790 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6791 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6792 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6793 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6794 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6795 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6796 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6797 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6798 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6799 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6800 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6801 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6802 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6803 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6804 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6805 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6806 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6807 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6808 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6809 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6810 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6811 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6812 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6813 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6814 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6815 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6816 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6817 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6818 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6819 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6820 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6821 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6822 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6823 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6824 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6825 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6826 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6827 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6828 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6829 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6830 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6831 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6832 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6833 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6834 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6835 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6836 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6837 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6838 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6839 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6840 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6841 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6842 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6843 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6844 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6845 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6846 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6847 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6848 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6849 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6850 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6851 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6852 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6853 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6854 0x00000000, 0x00000000, 0x00000000,
6855};
6856
Andreas Mohr50da8592006-08-14 23:54:30 -07006857static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006858 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6859 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6860 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6861 0x00000000, 0x00000000, 0x00000000,
6862};
6863
Andreas Mohr50da8592006-08-14 23:54:30 -07006864static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006865 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6866 0x00000000, 0x00000000, 0x00000000,
6867};
6868
6869/* tp->lock is held. */
6870static int tg3_load_tso_firmware(struct tg3 *tp)
6871{
6872 struct fw_info info;
6873 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6874 int err, i;
6875
6876 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6877 return 0;
6878
6879 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6880 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6881 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6882 info.text_data = &tg3Tso5FwText[0];
6883 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6884 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6885 info.rodata_data = &tg3Tso5FwRodata[0];
6886 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6887 info.data_len = TG3_TSO5_FW_DATA_LEN;
6888 info.data_data = &tg3Tso5FwData[0];
6889 cpu_base = RX_CPU_BASE;
6890 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6891 cpu_scratch_size = (info.text_len +
6892 info.rodata_len +
6893 info.data_len +
6894 TG3_TSO5_FW_SBSS_LEN +
6895 TG3_TSO5_FW_BSS_LEN);
6896 } else {
6897 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6898 info.text_len = TG3_TSO_FW_TEXT_LEN;
6899 info.text_data = &tg3TsoFwText[0];
6900 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6901 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6902 info.rodata_data = &tg3TsoFwRodata[0];
6903 info.data_base = TG3_TSO_FW_DATA_ADDR;
6904 info.data_len = TG3_TSO_FW_DATA_LEN;
6905 info.data_data = &tg3TsoFwData[0];
6906 cpu_base = TX_CPU_BASE;
6907 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6908 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6909 }
6910
6911 err = tg3_load_firmware_cpu(tp, cpu_base,
6912 cpu_scratch_base, cpu_scratch_size,
6913 &info);
6914 if (err)
6915 return err;
6916
6917 /* Now startup the cpu. */
6918 tw32(cpu_base + CPU_STATE, 0xffffffff);
6919 tw32_f(cpu_base + CPU_PC, info.text_base);
6920
6921 for (i = 0; i < 5; i++) {
6922 if (tr32(cpu_base + CPU_PC) == info.text_base)
6923 break;
6924 tw32(cpu_base + CPU_STATE, 0xffffffff);
6925 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6926 tw32_f(cpu_base + CPU_PC, info.text_base);
6927 udelay(1000);
6928 }
6929 if (i >= 5) {
6930 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6931 "to set CPU PC, is %08x should be %08x\n",
6932 tp->dev->name, tr32(cpu_base + CPU_PC),
6933 info.text_base);
6934 return -ENODEV;
6935 }
6936 tw32(cpu_base + CPU_STATE, 0xffffffff);
6937 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6938 return 0;
6939}
6940
Linus Torvalds1da177e2005-04-16 15:20:36 -07006941
Linus Torvalds1da177e2005-04-16 15:20:36 -07006942static int tg3_set_mac_addr(struct net_device *dev, void *p)
6943{
6944 struct tg3 *tp = netdev_priv(dev);
6945 struct sockaddr *addr = p;
Michael Chan986e0ae2007-05-05 12:10:20 -07006946 int err = 0, skip_mac_1 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006947
Michael Chanf9804dd2005-09-27 12:13:10 -07006948 if (!is_valid_ether_addr(addr->sa_data))
6949 return -EINVAL;
6950
Linus Torvalds1da177e2005-04-16 15:20:36 -07006951 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6952
Michael Chane75f7c92006-03-20 21:33:26 -08006953 if (!netif_running(dev))
6954 return 0;
6955
Michael Chan58712ef2006-04-29 18:58:01 -07006956 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
Michael Chan986e0ae2007-05-05 12:10:20 -07006957 u32 addr0_high, addr0_low, addr1_high, addr1_low;
Michael Chan58712ef2006-04-29 18:58:01 -07006958
Michael Chan986e0ae2007-05-05 12:10:20 -07006959 addr0_high = tr32(MAC_ADDR_0_HIGH);
6960 addr0_low = tr32(MAC_ADDR_0_LOW);
6961 addr1_high = tr32(MAC_ADDR_1_HIGH);
6962 addr1_low = tr32(MAC_ADDR_1_LOW);
6963
6964 /* Skip MAC addr 1 if ASF is using it. */
6965 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6966 !(addr1_high == 0 && addr1_low == 0))
6967 skip_mac_1 = 1;
Michael Chan58712ef2006-04-29 18:58:01 -07006968 }
Michael Chan986e0ae2007-05-05 12:10:20 -07006969 spin_lock_bh(&tp->lock);
6970 __tg3_set_mac_addr(tp, skip_mac_1);
6971 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006972
Michael Chanb9ec6c12006-07-25 16:37:27 -07006973 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006974}
6975
6976/* tp->lock is held. */
6977static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6978 dma_addr_t mapping, u32 maxlen_flags,
6979 u32 nic_addr)
6980{
6981 tg3_write_mem(tp,
6982 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6983 ((u64) mapping >> 32));
6984 tg3_write_mem(tp,
6985 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6986 ((u64) mapping & 0xffffffff));
6987 tg3_write_mem(tp,
6988 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6989 maxlen_flags);
6990
6991 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6992 tg3_write_mem(tp,
6993 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6994 nic_addr);
6995}
6996
6997static void __tg3_set_rx_mode(struct net_device *);
Michael Chand244c892005-07-05 14:42:33 -07006998static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
David S. Miller15f98502005-05-18 22:49:26 -07006999{
7000 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7001 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7002 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7003 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7004 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7005 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7006 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7007 }
7008 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7009 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7010 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7011 u32 val = ec->stats_block_coalesce_usecs;
7012
7013 if (!netif_carrier_ok(tp->dev))
7014 val = 0;
7015
7016 tw32(HOSTCC_STAT_COAL_TICKS, val);
7017 }
7018}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007019
7020/* tp->lock is held. */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007021static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007022{
7023 u32 val, rdmac_mode;
7024 int i, err, limit;
7025
7026 tg3_disable_ints(tp);
7027
7028 tg3_stop_fw(tp);
7029
7030 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7031
7032 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
Michael Chane6de8ad2005-05-05 14:42:41 -07007033 tg3_abort_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007034 }
7035
Matt Carlsondd477002008-05-25 23:45:58 -07007036 if (reset_phy &&
7037 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
Michael Chand4d2c552006-03-20 17:47:20 -08007038 tg3_phy_reset(tp);
7039
Linus Torvalds1da177e2005-04-16 15:20:36 -07007040 err = tg3_chip_reset(tp);
7041 if (err)
7042 return err;
7043
7044 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7045
Matt Carlsonbcb37f62008-11-03 16:52:09 -08007046 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07007047 val = tr32(TG3_CPMU_CTRL);
7048 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7049 tw32(TG3_CPMU_CTRL, val);
Matt Carlson9acb9612007-11-12 21:10:06 -08007050
7051 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7052 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7053 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7054 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7055
7056 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7057 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7058 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7059 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7060
7061 val = tr32(TG3_CPMU_HST_ACC);
7062 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7063 val |= CPMU_HST_ACC_MACCLK_6_25;
7064 tw32(TG3_CPMU_HST_ACC, val);
Matt Carlsond30cdd22007-10-07 23:28:35 -07007065 }
7066
Linus Torvalds1da177e2005-04-16 15:20:36 -07007067 /* This works around an issue with Athlon chipsets on
7068 * B3 tigon3 silicon. This bit has no effect on any
7069 * other revision. But do not set this on PCI Express
Matt Carlson795d01c2007-10-07 23:28:17 -07007070 * chips and don't even touch the clocks if the CPMU is present.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007071 */
Matt Carlson795d01c2007-10-07 23:28:17 -07007072 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7073 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7074 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7075 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7076 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007077
7078 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7079 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7080 val = tr32(TG3PCI_PCISTATE);
7081 val |= PCISTATE_RETRY_SAME_DMA;
7082 tw32(TG3PCI_PCISTATE, val);
7083 }
7084
Matt Carlson0d3031d2007-10-10 18:02:43 -07007085 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7086 /* Allow reads and writes to the
7087 * APE register and memory space.
7088 */
7089 val = tr32(TG3PCI_PCISTATE);
7090 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7091 PCISTATE_ALLOW_APE_SHMEM_WR;
7092 tw32(TG3PCI_PCISTATE, val);
7093 }
7094
Linus Torvalds1da177e2005-04-16 15:20:36 -07007095 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7096 /* Enable some hw fixes. */
7097 val = tr32(TG3PCI_MSI_DATA);
7098 val |= (1 << 26) | (1 << 28) | (1 << 29);
7099 tw32(TG3PCI_MSI_DATA, val);
7100 }
7101
7102 /* Descriptor ring init may make accesses to the
7103 * NIC SRAM area to setup the TX descriptors, so we
7104 * can only do this after the hardware has been
7105 * successfully reset.
7106 */
Michael Chan32d8c572006-07-25 16:38:29 -07007107 err = tg3_init_rings(tp);
7108 if (err)
7109 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007110
Matt Carlson9936bcf2007-10-10 18:03:07 -07007111 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
Matt Carlson57e69832008-05-25 23:48:31 -07007112 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
7113 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07007114 /* This value is determined during the probe time DMA
7115 * engine test, tg3_test_dma.
7116 */
7117 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7118 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007119
7120 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7121 GRC_MODE_4X_NIC_SEND_RINGS |
7122 GRC_MODE_NO_TX_PHDR_CSUM |
7123 GRC_MODE_NO_RX_PHDR_CSUM);
7124 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
Michael Chand2d746f2006-04-06 21:45:39 -07007125
7126 /* Pseudo-header checksum is done by hardware logic and not
7127 * the offload processers, so make the chip do the pseudo-
7128 * header checksums on receive. For transmit it is more
7129 * convenient to do the pseudo-header checksum in software
7130 * as Linux does that on transmit for us in all cases.
7131 */
7132 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007133
7134 tw32(GRC_MODE,
7135 tp->grc_mode |
7136 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7137
7138 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7139 val = tr32(GRC_MISC_CFG);
7140 val &= ~0xff;
7141 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7142 tw32(GRC_MISC_CFG, val);
7143
7144 /* Initialize MBUF/DESC pool. */
John W. Linvillecbf46852005-04-21 17:01:29 -07007145 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007146 /* Do nothing. */
7147 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7148 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7149 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7150 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7151 else
7152 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7153 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7154 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7155 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007156 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7157 int fw_len;
7158
7159 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7160 TG3_TSO5_FW_RODATA_LEN +
7161 TG3_TSO5_FW_DATA_LEN +
7162 TG3_TSO5_FW_SBSS_LEN +
7163 TG3_TSO5_FW_BSS_LEN);
7164 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7165 tw32(BUFMGR_MB_POOL_ADDR,
7166 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7167 tw32(BUFMGR_MB_POOL_SIZE,
7168 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7169 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007170
Michael Chan0f893dc2005-07-25 12:30:38 -07007171 if (tp->dev->mtu <= ETH_DATA_LEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007172 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7173 tp->bufmgr_config.mbuf_read_dma_low_water);
7174 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7175 tp->bufmgr_config.mbuf_mac_rx_low_water);
7176 tw32(BUFMGR_MB_HIGH_WATER,
7177 tp->bufmgr_config.mbuf_high_water);
7178 } else {
7179 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7180 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7181 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7182 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7183 tw32(BUFMGR_MB_HIGH_WATER,
7184 tp->bufmgr_config.mbuf_high_water_jumbo);
7185 }
7186 tw32(BUFMGR_DMA_LOW_WATER,
7187 tp->bufmgr_config.dma_low_water);
7188 tw32(BUFMGR_DMA_HIGH_WATER,
7189 tp->bufmgr_config.dma_high_water);
7190
7191 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7192 for (i = 0; i < 2000; i++) {
7193 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7194 break;
7195 udelay(10);
7196 }
7197 if (i >= 2000) {
7198 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7199 tp->dev->name);
7200 return -ENODEV;
7201 }
7202
7203 /* Setup replenish threshold. */
Michael Chanf92905d2006-06-29 20:14:29 -07007204 val = tp->rx_pending / 8;
7205 if (val == 0)
7206 val = 1;
7207 else if (val > tp->rx_std_max_post)
7208 val = tp->rx_std_max_post;
Michael Chanb5d37722006-09-27 16:06:21 -07007209 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7210 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7211 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7212
7213 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7214 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7215 }
Michael Chanf92905d2006-06-29 20:14:29 -07007216
7217 tw32(RCVBDI_STD_THRESH, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007218
7219 /* Initialize TG3_BDINFO's at:
7220 * RCVDBDI_STD_BD: standard eth size rx ring
7221 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7222 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7223 *
7224 * like so:
7225 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7226 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7227 * ring attribute flags
7228 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7229 *
7230 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7231 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7232 *
7233 * The size of each ring is fixed in the firmware, but the location is
7234 * configurable.
7235 */
7236 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7237 ((u64) tp->rx_std_mapping >> 32));
7238 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7239 ((u64) tp->rx_std_mapping & 0xffffffff));
7240 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7241 NIC_SRAM_RX_BUFFER_DESC);
7242
7243 /* Don't even try to program the JUMBO/MINI buffer descriptor
7244 * configs on 5705.
7245 */
7246 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7247 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7248 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7249 } else {
7250 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7251 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7252
7253 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7254 BDINFO_FLAGS_DISABLED);
7255
7256 /* Setup replenish threshold. */
7257 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7258
Michael Chan0f893dc2005-07-25 12:30:38 -07007259 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007260 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7261 ((u64) tp->rx_jumbo_mapping >> 32));
7262 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7263 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7264 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7265 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7266 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7267 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7268 } else {
7269 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7270 BDINFO_FLAGS_DISABLED);
7271 }
7272
7273 }
7274
7275 /* There is only one send ring on 5705/5750, no need to explicitly
7276 * disable the others.
7277 */
7278 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7279 /* Clear out send RCB ring in SRAM. */
7280 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7281 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7282 BDINFO_FLAGS_DISABLED);
7283 }
7284
7285 tp->tx_prod = 0;
7286 tp->tx_cons = 0;
7287 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7288 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7289
7290 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7291 tp->tx_desc_mapping,
7292 (TG3_TX_RING_SIZE <<
7293 BDINFO_FLAGS_MAXLEN_SHIFT),
7294 NIC_SRAM_TX_BUFFER_DESC);
7295
7296 /* There is only one receive return ring on 5705/5750, no need
7297 * to explicitly disable the others.
7298 */
7299 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7300 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7301 i += TG3_BDINFO_SIZE) {
7302 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7303 BDINFO_FLAGS_DISABLED);
7304 }
7305 }
7306
7307 tp->rx_rcb_ptr = 0;
7308 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7309
7310 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7311 tp->rx_rcb_mapping,
7312 (TG3_RX_RCB_RING_SIZE(tp) <<
7313 BDINFO_FLAGS_MAXLEN_SHIFT),
7314 0);
7315
7316 tp->rx_std_ptr = tp->rx_pending;
7317 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7318 tp->rx_std_ptr);
7319
Michael Chan0f893dc2005-07-25 12:30:38 -07007320 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
Linus Torvalds1da177e2005-04-16 15:20:36 -07007321 tp->rx_jumbo_pending : 0;
7322 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7323 tp->rx_jumbo_ptr);
7324
7325 /* Initialize MAC address and backoff seed. */
Michael Chan986e0ae2007-05-05 12:10:20 -07007326 __tg3_set_mac_addr(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007327
7328 /* MTU + ethernet header + FCS + optional VLAN tag */
7329 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7330
7331 /* The slot time is changed by tg3_setup_phy if we
7332 * run at gigabit with half duplex.
7333 */
7334 tw32(MAC_TX_LENGTHS,
7335 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7336 (6 << TX_LENGTHS_IPG_SHIFT) |
7337 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7338
7339 /* Receive rules. */
7340 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7341 tw32(RCVLPC_CONFIG, 0x0181);
7342
7343 /* Calculate RDMAC_MODE setting early, we need it to determine
7344 * the RCVLPC_STATE_ENABLE mask.
7345 */
7346 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7347 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7348 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7349 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7350 RDMAC_MODE_LNGREAD_ENAB);
Michael Chan85e94ce2005-04-21 17:05:28 -07007351
Matt Carlson57e69832008-05-25 23:48:31 -07007352 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7353 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsond30cdd22007-10-07 23:28:35 -07007354 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7355 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7356 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7357
Michael Chan85e94ce2005-04-21 17:05:28 -07007358 /* If statement applies to 5705 and 5750 PCI devices only */
7359 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7360 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7361 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007362 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
Matt Carlsonc13e3712007-05-05 11:50:04 -07007363 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007364 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7365 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7366 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7367 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7368 }
7369 }
7370
Michael Chan85e94ce2005-04-21 17:05:28 -07007371 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7372 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7373
Linus Torvalds1da177e2005-04-16 15:20:36 -07007374 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7375 rdmac_mode |= (1 << 27);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007376
7377 /* Receive/send statistics. */
Michael Chan16613942006-06-29 20:15:13 -07007378 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7379 val = tr32(RCVLPC_STATS_ENABLE);
7380 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7381 tw32(RCVLPC_STATS_ENABLE, val);
7382 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7383 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007384 val = tr32(RCVLPC_STATS_ENABLE);
7385 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7386 tw32(RCVLPC_STATS_ENABLE, val);
7387 } else {
7388 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7389 }
7390 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7391 tw32(SNDDATAI_STATSENAB, 0xffffff);
7392 tw32(SNDDATAI_STATSCTRL,
7393 (SNDDATAI_SCTRL_ENABLE |
7394 SNDDATAI_SCTRL_FASTUPD));
7395
7396 /* Setup host coalescing engine. */
7397 tw32(HOSTCC_MODE, 0);
7398 for (i = 0; i < 2000; i++) {
7399 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7400 break;
7401 udelay(10);
7402 }
7403
Michael Chand244c892005-07-05 14:42:33 -07007404 __tg3_set_coalesce(tp, &tp->coal);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007405
7406 /* set status block DMA address */
7407 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7408 ((u64) tp->status_mapping >> 32));
7409 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7410 ((u64) tp->status_mapping & 0xffffffff));
7411
7412 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7413 /* Status/statistics block address. See tg3_timer,
7414 * the tg3_periodic_fetch_stats call there, and
7415 * tg3_get_stats to see how this works for 5705/5750 chips.
7416 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007417 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7418 ((u64) tp->stats_mapping >> 32));
7419 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7420 ((u64) tp->stats_mapping & 0xffffffff));
7421 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7422 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7423 }
7424
7425 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7426
7427 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7428 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7429 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7430 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7431
7432 /* Clear statistics/status block in chip, and status block in ram. */
7433 for (i = NIC_SRAM_STATS_BLK;
7434 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7435 i += sizeof(u32)) {
7436 tg3_write_mem(tp, i, 0);
7437 udelay(40);
7438 }
7439 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7440
Michael Chanc94e3942005-09-27 12:12:42 -07007441 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7442 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7443 /* reset to prevent losing 1st rx packet intermittently */
7444 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7445 udelay(10);
7446 }
7447
Matt Carlson3bda1252008-08-15 14:08:22 -07007448 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7449 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7450 else
7451 tp->mac_mode = 0;
7452 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
Linus Torvalds1da177e2005-04-16 15:20:36 -07007453 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07007454 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7455 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7456 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7457 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007458 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7459 udelay(40);
7460
Michael Chan314fba32005-04-21 17:07:04 -07007461 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
Michael Chan9d26e212006-12-07 00:21:14 -08007462 * If TG3_FLG2_IS_NIC is zero, we should read the
Michael Chan314fba32005-04-21 17:07:04 -07007463 * register to preserve the GPIO settings for LOMs. The GPIOs,
7464 * whether used as inputs or outputs, are set by boot code after
7465 * reset.
7466 */
Michael Chan9d26e212006-12-07 00:21:14 -08007467 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
Michael Chan314fba32005-04-21 17:07:04 -07007468 u32 gpio_mask;
7469
Michael Chan9d26e212006-12-07 00:21:14 -08007470 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7471 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7472 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chan3e7d83b2005-04-21 17:10:36 -07007473
7474 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7475 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7476 GRC_LCLCTRL_GPIO_OUTPUT3;
7477
Michael Chanaf36e6b2006-03-23 01:28:06 -08007478 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7479 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7480
Gary Zambranoaaf84462007-05-05 11:51:45 -07007481 tp->grc_local_ctrl &= ~gpio_mask;
Michael Chan314fba32005-04-21 17:07:04 -07007482 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7483
7484 /* GPIO1 must be driven high for eeprom write protect */
Michael Chan9d26e212006-12-07 00:21:14 -08007485 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7486 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7487 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan314fba32005-04-21 17:07:04 -07007488 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007489 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7490 udelay(100);
7491
Michael Chan09ee9292005-08-09 20:17:00 -07007492 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
David S. Millerfac9b832005-05-18 22:46:34 -07007493 tp->last_tag = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007494
7495 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7496 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7497 udelay(40);
7498 }
7499
7500 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7501 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7502 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7503 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7504 WDMAC_MODE_LNGREAD_ENAB);
7505
Michael Chan85e94ce2005-04-21 17:05:28 -07007506 /* If statement applies to 5705 and 5750 PCI devices only */
7507 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7508 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7509 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007510 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7511 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7512 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7513 /* nothing */
7514 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7515 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7516 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7517 val |= WDMAC_MODE_RX_ACCEL;
7518 }
7519 }
7520
Michael Chand9ab5ad2006-03-20 22:27:35 -08007521 /* Enable host coalescing bug fix */
Michael Chanaf36e6b2006-03-23 01:28:06 -08007522 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07007523 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07007524 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
Matt Carlson57e69832008-05-25 23:48:31 -07007525 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7526 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
Matt Carlsonf51f3562008-05-25 23:45:08 -07007527 val |= WDMAC_MODE_STATUS_TAG_FIX;
Michael Chand9ab5ad2006-03-20 22:27:35 -08007528
Linus Torvalds1da177e2005-04-16 15:20:36 -07007529 tw32_f(WDMAC_MODE, val);
7530 udelay(40);
7531
Matt Carlson9974a352007-10-07 23:27:28 -07007532 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7533 u16 pcix_cmd;
7534
7535 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7536 &pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007537 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
Matt Carlson9974a352007-10-07 23:27:28 -07007538 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7539 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007540 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
Matt Carlson9974a352007-10-07 23:27:28 -07007541 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7542 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007543 }
Matt Carlson9974a352007-10-07 23:27:28 -07007544 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7545 pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007546 }
7547
7548 tw32_f(RDMAC_MODE, rdmac_mode);
7549 udelay(40);
7550
7551 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7552 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7553 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
Matt Carlson9936bcf2007-10-10 18:03:07 -07007554
7555 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7556 tw32(SNDDATAC_MODE,
7557 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7558 else
7559 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7560
Linus Torvalds1da177e2005-04-16 15:20:36 -07007561 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7562 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7563 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7564 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007565 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7566 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007567 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7568 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7569
7570 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7571 err = tg3_load_5701_a0_firmware_fix(tp);
7572 if (err)
7573 return err;
7574 }
7575
Linus Torvalds1da177e2005-04-16 15:20:36 -07007576 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7577 err = tg3_load_tso_firmware(tp);
7578 if (err)
7579 return err;
7580 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007581
7582 tp->tx_mode = TX_MODE_ENABLE;
7583 tw32_f(MAC_TX_MODE, tp->tx_mode);
7584 udelay(100);
7585
7586 tp->rx_mode = RX_MODE_ENABLE;
Matt Carlson9936bcf2007-10-10 18:03:07 -07007587 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlson57e69832008-05-25 23:48:31 -07007588 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7589 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7590 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chanaf36e6b2006-03-23 01:28:06 -08007591 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7592
Linus Torvalds1da177e2005-04-16 15:20:36 -07007593 tw32_f(MAC_RX_MODE, tp->rx_mode);
7594 udelay(10);
7595
Linus Torvalds1da177e2005-04-16 15:20:36 -07007596 tw32(MAC_LED_CTRL, tp->led_ctrl);
7597
7598 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
Michael Chanc94e3942005-09-27 12:12:42 -07007599 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007600 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7601 udelay(10);
7602 }
7603 tw32_f(MAC_RX_MODE, tp->rx_mode);
7604 udelay(10);
7605
7606 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7607 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7608 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7609 /* Set drive transmission level to 1.2V */
7610 /* only if the signal pre-emphasis bit is not set */
7611 val = tr32(MAC_SERDES_CFG);
7612 val &= 0xfffff000;
7613 val |= 0x880;
7614 tw32(MAC_SERDES_CFG, val);
7615 }
7616 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7617 tw32(MAC_SERDES_CFG, 0x616000);
7618 }
7619
7620 /* Prevent chip from dropping frames when flow control
7621 * is enabled.
7622 */
7623 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7624
7625 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7626 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7627 /* Use hardware link auto-negotiation */
7628 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7629 }
7630
Michael Chand4d2c552006-03-20 17:47:20 -08007631 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7632 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7633 u32 tmp;
7634
7635 tmp = tr32(SERDES_RX_CTRL);
7636 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7637 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7638 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7639 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7640 }
7641
Matt Carlsondd477002008-05-25 23:45:58 -07007642 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7643 if (tp->link_config.phy_is_low_power) {
7644 tp->link_config.phy_is_low_power = 0;
7645 tp->link_config.speed = tp->link_config.orig_speed;
7646 tp->link_config.duplex = tp->link_config.orig_duplex;
7647 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7648 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007649
Matt Carlsondd477002008-05-25 23:45:58 -07007650 err = tg3_setup_phy(tp, 0);
7651 if (err)
7652 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007653
Matt Carlsondd477002008-05-25 23:45:58 -07007654 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7655 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7656 u32 tmp;
7657
7658 /* Clear CRC stats. */
7659 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7660 tg3_writephy(tp, MII_TG3_TEST1,
7661 tmp | MII_TG3_TEST1_CRC_EN);
7662 tg3_readphy(tp, 0x14, &tmp);
7663 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007664 }
7665 }
7666
7667 __tg3_set_rx_mode(tp->dev);
7668
7669 /* Initialize receive rules. */
7670 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7671 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7672 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7673 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7674
Michael Chan4cf78e42005-07-25 12:29:19 -07007675 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
Michael Chana4e2b342005-10-26 15:46:52 -07007676 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007677 limit = 8;
7678 else
7679 limit = 16;
7680 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7681 limit -= 4;
7682 switch (limit) {
7683 case 16:
7684 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7685 case 15:
7686 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7687 case 14:
7688 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7689 case 13:
7690 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7691 case 12:
7692 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7693 case 11:
7694 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7695 case 10:
7696 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7697 case 9:
7698 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7699 case 8:
7700 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7701 case 7:
7702 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7703 case 6:
7704 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7705 case 5:
7706 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7707 case 4:
7708 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7709 case 3:
7710 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7711 case 2:
7712 case 1:
7713
7714 default:
7715 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07007716 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007717
Matt Carlson9ce768e2007-10-11 19:49:11 -07007718 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7719 /* Write our heartbeat update interval to APE. */
7720 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7721 APE_HOST_HEARTBEAT_INT_DISABLE);
Matt Carlson0d3031d2007-10-10 18:02:43 -07007722
Linus Torvalds1da177e2005-04-16 15:20:36 -07007723 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7724
Linus Torvalds1da177e2005-04-16 15:20:36 -07007725 return 0;
7726}
7727
7728/* Called at device open time to get the chip ready for
7729 * packet processing. Invoked with tp->lock held.
7730 */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007731static int tg3_init_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007732{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007733 tg3_switch_clocks(tp);
7734
7735 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7736
Matt Carlson2f751b62008-08-04 23:17:34 -07007737 return tg3_reset_hw(tp, reset_phy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007738}
7739
7740#define TG3_STAT_ADD32(PSTAT, REG) \
7741do { u32 __val = tr32(REG); \
7742 (PSTAT)->low += __val; \
7743 if ((PSTAT)->low < __val) \
7744 (PSTAT)->high += 1; \
7745} while (0)
7746
7747static void tg3_periodic_fetch_stats(struct tg3 *tp)
7748{
7749 struct tg3_hw_stats *sp = tp->hw_stats;
7750
7751 if (!netif_carrier_ok(tp->dev))
7752 return;
7753
7754 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7755 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7756 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7757 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7758 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7759 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7760 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7761 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7762 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7763 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7764 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7765 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7766 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7767
7768 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7769 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7770 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7771 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7772 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7773 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7774 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7775 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7776 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7777 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7778 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7779 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7780 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7781 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
Michael Chan463d3052006-05-22 16:36:27 -07007782
7783 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7784 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7785 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007786}
7787
7788static void tg3_timer(unsigned long __opaque)
7789{
7790 struct tg3 *tp = (struct tg3 *) __opaque;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007791
Michael Chanf475f162006-03-27 23:20:14 -08007792 if (tp->irq_sync)
7793 goto restart_timer;
7794
David S. Millerf47c11e2005-06-24 20:18:35 -07007795 spin_lock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007796
David S. Millerfac9b832005-05-18 22:46:34 -07007797 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7798 /* All of this garbage is because when using non-tagged
7799 * IRQ status the mailbox/status_block protocol the chip
7800 * uses with the cpu is race prone.
7801 */
7802 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7803 tw32(GRC_LOCAL_CTRL,
7804 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7805 } else {
7806 tw32(HOSTCC_MODE, tp->coalesce_mode |
7807 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7808 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007809
David S. Millerfac9b832005-05-18 22:46:34 -07007810 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7811 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
David S. Millerf47c11e2005-06-24 20:18:35 -07007812 spin_unlock(&tp->lock);
David S. Millerfac9b832005-05-18 22:46:34 -07007813 schedule_work(&tp->reset_task);
7814 return;
7815 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007816 }
7817
Linus Torvalds1da177e2005-04-16 15:20:36 -07007818 /* This part only runs once per second. */
7819 if (!--tp->timer_counter) {
David S. Millerfac9b832005-05-18 22:46:34 -07007820 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7821 tg3_periodic_fetch_stats(tp);
7822
Linus Torvalds1da177e2005-04-16 15:20:36 -07007823 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7824 u32 mac_stat;
7825 int phy_event;
7826
7827 mac_stat = tr32(MAC_STATUS);
7828
7829 phy_event = 0;
7830 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7831 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7832 phy_event = 1;
7833 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7834 phy_event = 1;
7835
7836 if (phy_event)
7837 tg3_setup_phy(tp, 0);
7838 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7839 u32 mac_stat = tr32(MAC_STATUS);
7840 int need_setup = 0;
7841
7842 if (netif_carrier_ok(tp->dev) &&
7843 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7844 need_setup = 1;
7845 }
7846 if (! netif_carrier_ok(tp->dev) &&
7847 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7848 MAC_STATUS_SIGNAL_DET))) {
7849 need_setup = 1;
7850 }
7851 if (need_setup) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07007852 if (!tp->serdes_counter) {
7853 tw32_f(MAC_MODE,
7854 (tp->mac_mode &
7855 ~MAC_MODE_PORT_MODE_MASK));
7856 udelay(40);
7857 tw32_f(MAC_MODE, tp->mac_mode);
7858 udelay(40);
7859 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007860 tg3_setup_phy(tp, 0);
7861 }
Michael Chan747e8f82005-07-25 12:33:22 -07007862 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7863 tg3_serdes_parallel_detect(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007864
7865 tp->timer_counter = tp->timer_multiplier;
7866 }
7867
Michael Chan130b8e42006-09-27 16:00:40 -07007868 /* Heartbeat is only sent once every 2 seconds.
7869 *
7870 * The heartbeat is to tell the ASF firmware that the host
7871 * driver is still alive. In the event that the OS crashes,
7872 * ASF needs to reset the hardware to free up the FIFO space
7873 * that may be filled with rx packets destined for the host.
7874 * If the FIFO is full, ASF will no longer function properly.
7875 *
7876 * Unintended resets have been reported on real time kernels
7877 * where the timer doesn't run on time. Netpoll will also have
7878 * same problem.
7879 *
7880 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7881 * to check the ring condition when the heartbeat is expiring
7882 * before doing the reset. This will prevent most unintended
7883 * resets.
7884 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007885 if (!--tp->asf_counter) {
Matt Carlsonbc7959b2008-08-15 14:08:55 -07007886 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7887 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Matt Carlson7c5026a2008-05-02 16:49:29 -07007888 tg3_wait_for_event_ack(tp);
7889
Michael Chanbbadf502006-04-06 21:46:34 -07007890 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
Michael Chan130b8e42006-09-27 16:00:40 -07007891 FWCMD_NICDRV_ALIVE3);
Michael Chanbbadf502006-04-06 21:46:34 -07007892 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
Michael Chan28fbef72005-10-26 15:48:35 -07007893 /* 5 seconds timeout */
Michael Chanbbadf502006-04-06 21:46:34 -07007894 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
Matt Carlson4ba526c2008-08-15 14:10:04 -07007895
7896 tg3_generate_fw_event(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007897 }
7898 tp->asf_counter = tp->asf_multiplier;
7899 }
7900
David S. Millerf47c11e2005-06-24 20:18:35 -07007901 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007902
Michael Chanf475f162006-03-27 23:20:14 -08007903restart_timer:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007904 tp->timer.expires = jiffies + tp->timer_offset;
7905 add_timer(&tp->timer);
7906}
7907
Adrian Bunk81789ef2006-03-20 23:00:14 -08007908static int tg3_request_irq(struct tg3 *tp)
Michael Chanfcfa0a32006-03-20 22:28:41 -08007909{
David Howells7d12e782006-10-05 14:55:46 +01007910 irq_handler_t fn;
Michael Chanfcfa0a32006-03-20 22:28:41 -08007911 unsigned long flags;
7912 struct net_device *dev = tp->dev;
7913
7914 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7915 fn = tg3_msi;
7916 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7917 fn = tg3_msi_1shot;
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07007918 flags = IRQF_SAMPLE_RANDOM;
Michael Chanfcfa0a32006-03-20 22:28:41 -08007919 } else {
7920 fn = tg3_interrupt;
7921 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7922 fn = tg3_interrupt_tagged;
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07007923 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
Michael Chanfcfa0a32006-03-20 22:28:41 -08007924 }
7925 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7926}
7927
Michael Chan79381092005-04-21 17:13:59 -07007928static int tg3_test_interrupt(struct tg3 *tp)
7929{
7930 struct net_device *dev = tp->dev;
Michael Chanb16250e2006-09-27 16:10:14 -07007931 int err, i, intr_ok = 0;
Michael Chan79381092005-04-21 17:13:59 -07007932
Michael Chand4bc3922005-05-29 14:59:20 -07007933 if (!netif_running(dev))
7934 return -ENODEV;
7935
Michael Chan79381092005-04-21 17:13:59 -07007936 tg3_disable_ints(tp);
7937
7938 free_irq(tp->pdev->irq, dev);
7939
7940 err = request_irq(tp->pdev->irq, tg3_test_isr,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07007941 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
Michael Chan79381092005-04-21 17:13:59 -07007942 if (err)
7943 return err;
7944
Michael Chan38f38432005-09-05 17:53:32 -07007945 tp->hw_status->status &= ~SD_STATUS_UPDATED;
Michael Chan79381092005-04-21 17:13:59 -07007946 tg3_enable_ints(tp);
7947
7948 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7949 HOSTCC_MODE_NOW);
7950
7951 for (i = 0; i < 5; i++) {
Michael Chanb16250e2006-09-27 16:10:14 -07007952 u32 int_mbox, misc_host_ctrl;
7953
Michael Chan09ee9292005-08-09 20:17:00 -07007954 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7955 TG3_64BIT_REG_LOW);
Michael Chanb16250e2006-09-27 16:10:14 -07007956 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7957
7958 if ((int_mbox != 0) ||
7959 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7960 intr_ok = 1;
Michael Chan79381092005-04-21 17:13:59 -07007961 break;
Michael Chanb16250e2006-09-27 16:10:14 -07007962 }
7963
Michael Chan79381092005-04-21 17:13:59 -07007964 msleep(10);
7965 }
7966
7967 tg3_disable_ints(tp);
7968
7969 free_irq(tp->pdev->irq, dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04007970
Michael Chanfcfa0a32006-03-20 22:28:41 -08007971 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07007972
7973 if (err)
7974 return err;
7975
Michael Chanb16250e2006-09-27 16:10:14 -07007976 if (intr_ok)
Michael Chan79381092005-04-21 17:13:59 -07007977 return 0;
7978
7979 return -EIO;
7980}
7981
7982/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7983 * successfully restored
7984 */
7985static int tg3_test_msi(struct tg3 *tp)
7986{
7987 struct net_device *dev = tp->dev;
7988 int err;
7989 u16 pci_cmd;
7990
7991 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7992 return 0;
7993
7994 /* Turn off SERR reporting in case MSI terminates with Master
7995 * Abort.
7996 */
7997 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7998 pci_write_config_word(tp->pdev, PCI_COMMAND,
7999 pci_cmd & ~PCI_COMMAND_SERR);
8000
8001 err = tg3_test_interrupt(tp);
8002
8003 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8004
8005 if (!err)
8006 return 0;
8007
8008 /* other failures */
8009 if (err != -EIO)
8010 return err;
8011
8012 /* MSI test failed, go back to INTx mode */
8013 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
8014 "switching to INTx mode. Please report this failure to "
8015 "the PCI maintainer and include system chipset information.\n",
8016 tp->dev->name);
8017
8018 free_irq(tp->pdev->irq, dev);
8019 pci_disable_msi(tp->pdev);
8020
8021 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8022
Michael Chanfcfa0a32006-03-20 22:28:41 -08008023 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07008024 if (err)
8025 return err;
8026
8027 /* Need to reset the chip because the MSI cycle may have terminated
8028 * with Master Abort.
8029 */
David S. Millerf47c11e2005-06-24 20:18:35 -07008030 tg3_full_lock(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07008031
Michael Chan944d9802005-05-29 14:57:48 -07008032 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07008033 err = tg3_init_hw(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07008034
David S. Millerf47c11e2005-06-24 20:18:35 -07008035 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07008036
8037 if (err)
8038 free_irq(tp->pdev->irq, dev);
8039
8040 return err;
8041}
8042
Linus Torvalds1da177e2005-04-16 15:20:36 -07008043static int tg3_open(struct net_device *dev)
8044{
8045 struct tg3 *tp = netdev_priv(dev);
8046 int err;
8047
Michael Chanc49a1562006-12-17 17:07:29 -08008048 netif_carrier_off(tp->dev);
8049
Michael Chanbc1c7562006-03-20 17:48:03 -08008050 err = tg3_set_power_state(tp, PCI_D0);
Matt Carlson2f751b62008-08-04 23:17:34 -07008051 if (err)
Michael Chanbc1c7562006-03-20 17:48:03 -08008052 return err;
Matt Carlson2f751b62008-08-04 23:17:34 -07008053
8054 tg3_full_lock(tp, 0);
Michael Chanbc1c7562006-03-20 17:48:03 -08008055
Linus Torvalds1da177e2005-04-16 15:20:36 -07008056 tg3_disable_ints(tp);
8057 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8058
David S. Millerf47c11e2005-06-24 20:18:35 -07008059 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008060
8061 /* The placement of this call is tied
8062 * to the setup and use of Host TX descriptors.
8063 */
8064 err = tg3_alloc_consistent(tp);
8065 if (err)
8066 return err;
8067
Michael Chan7544b092007-05-05 13:08:32 -07008068 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
David S. Millerfac9b832005-05-18 22:46:34 -07008069 /* All MSI supporting chips should support tagged
8070 * status. Assert that this is the case.
8071 */
8072 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8073 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8074 "Not using MSI.\n", tp->dev->name);
8075 } else if (pci_enable_msi(tp->pdev) == 0) {
Michael Chan88b06bc2005-04-21 17:13:25 -07008076 u32 msi_mode;
8077
8078 msi_mode = tr32(MSGINT_MODE);
8079 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8080 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8081 }
8082 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08008083 err = tg3_request_irq(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008084
8085 if (err) {
Michael Chan88b06bc2005-04-21 17:13:25 -07008086 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8087 pci_disable_msi(tp->pdev);
8088 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8089 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008090 tg3_free_consistent(tp);
8091 return err;
8092 }
8093
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008094 napi_enable(&tp->napi);
8095
David S. Millerf47c11e2005-06-24 20:18:35 -07008096 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008097
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07008098 err = tg3_init_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008099 if (err) {
Michael Chan944d9802005-05-29 14:57:48 -07008100 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008101 tg3_free_rings(tp);
8102 } else {
David S. Millerfac9b832005-05-18 22:46:34 -07008103 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8104 tp->timer_offset = HZ;
8105 else
8106 tp->timer_offset = HZ / 10;
8107
8108 BUG_ON(tp->timer_offset > HZ);
8109 tp->timer_counter = tp->timer_multiplier =
8110 (HZ / tp->timer_offset);
8111 tp->asf_counter = tp->asf_multiplier =
Michael Chan28fbef72005-10-26 15:48:35 -07008112 ((HZ / tp->timer_offset) * 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008113
8114 init_timer(&tp->timer);
8115 tp->timer.expires = jiffies + tp->timer_offset;
8116 tp->timer.data = (unsigned long) tp;
8117 tp->timer.function = tg3_timer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008118 }
8119
David S. Millerf47c11e2005-06-24 20:18:35 -07008120 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008121
8122 if (err) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008123 napi_disable(&tp->napi);
Michael Chan88b06bc2005-04-21 17:13:25 -07008124 free_irq(tp->pdev->irq, dev);
8125 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8126 pci_disable_msi(tp->pdev);
8127 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8128 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008129 tg3_free_consistent(tp);
8130 return err;
8131 }
8132
Michael Chan79381092005-04-21 17:13:59 -07008133 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8134 err = tg3_test_msi(tp);
David S. Millerfac9b832005-05-18 22:46:34 -07008135
Michael Chan79381092005-04-21 17:13:59 -07008136 if (err) {
David S. Millerf47c11e2005-06-24 20:18:35 -07008137 tg3_full_lock(tp, 0);
Michael Chan79381092005-04-21 17:13:59 -07008138
8139 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8140 pci_disable_msi(tp->pdev);
8141 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8142 }
Michael Chan944d9802005-05-29 14:57:48 -07008143 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan79381092005-04-21 17:13:59 -07008144 tg3_free_rings(tp);
8145 tg3_free_consistent(tp);
8146
David S. Millerf47c11e2005-06-24 20:18:35 -07008147 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07008148
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008149 napi_disable(&tp->napi);
8150
Michael Chan79381092005-04-21 17:13:59 -07008151 return err;
8152 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08008153
8154 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8155 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
Michael Chanb5d37722006-09-27 16:06:21 -07008156 u32 val = tr32(PCIE_TRANSACTION_CFG);
Michael Chanfcfa0a32006-03-20 22:28:41 -08008157
Michael Chanb5d37722006-09-27 16:06:21 -07008158 tw32(PCIE_TRANSACTION_CFG,
8159 val | PCIE_TRANS_CFG_1SHOT_MSI);
Michael Chanfcfa0a32006-03-20 22:28:41 -08008160 }
8161 }
Michael Chan79381092005-04-21 17:13:59 -07008162 }
8163
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07008164 tg3_phy_start(tp);
8165
David S. Millerf47c11e2005-06-24 20:18:35 -07008166 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008167
Michael Chan79381092005-04-21 17:13:59 -07008168 add_timer(&tp->timer);
8169 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008170 tg3_enable_ints(tp);
8171
David S. Millerf47c11e2005-06-24 20:18:35 -07008172 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008173
8174 netif_start_queue(dev);
8175
8176 return 0;
8177}
8178
8179#if 0
8180/*static*/ void tg3_dump_state(struct tg3 *tp)
8181{
8182 u32 val32, val32_2, val32_3, val32_4, val32_5;
8183 u16 val16;
8184 int i;
8185
8186 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8187 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8188 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8189 val16, val32);
8190
8191 /* MAC block */
8192 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8193 tr32(MAC_MODE), tr32(MAC_STATUS));
8194 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8195 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8196 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8197 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8198 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8199 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8200
8201 /* Send data initiator control block */
8202 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8203 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8204 printk(" SNDDATAI_STATSCTRL[%08x]\n",
8205 tr32(SNDDATAI_STATSCTRL));
8206
8207 /* Send data completion control block */
8208 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8209
8210 /* Send BD ring selector block */
8211 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8212 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8213
8214 /* Send BD initiator control block */
8215 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8216 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8217
8218 /* Send BD completion control block */
8219 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8220
8221 /* Receive list placement control block */
8222 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8223 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8224 printk(" RCVLPC_STATSCTRL[%08x]\n",
8225 tr32(RCVLPC_STATSCTRL));
8226
8227 /* Receive data and receive BD initiator control block */
8228 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8229 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8230
8231 /* Receive data completion control block */
8232 printk("DEBUG: RCVDCC_MODE[%08x]\n",
8233 tr32(RCVDCC_MODE));
8234
8235 /* Receive BD initiator control block */
8236 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8237 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8238
8239 /* Receive BD completion control block */
8240 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8241 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8242
8243 /* Receive list selector control block */
8244 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8245 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8246
8247 /* Mbuf cluster free block */
8248 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8249 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8250
8251 /* Host coalescing control block */
8252 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8253 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8254 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8255 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8256 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8257 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8258 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8259 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8260 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8261 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8262 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8263 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8264
8265 /* Memory arbiter control block */
8266 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8267 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8268
8269 /* Buffer manager control block */
8270 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8271 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8272 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8273 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8274 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8275 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8276 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8277 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8278
8279 /* Read DMA control block */
8280 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8281 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8282
8283 /* Write DMA control block */
8284 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8285 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8286
8287 /* DMA completion block */
8288 printk("DEBUG: DMAC_MODE[%08x]\n",
8289 tr32(DMAC_MODE));
8290
8291 /* GRC block */
8292 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8293 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8294 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8295 tr32(GRC_LOCAL_CTRL));
8296
8297 /* TG3_BDINFOs */
8298 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8299 tr32(RCVDBDI_JUMBO_BD + 0x0),
8300 tr32(RCVDBDI_JUMBO_BD + 0x4),
8301 tr32(RCVDBDI_JUMBO_BD + 0x8),
8302 tr32(RCVDBDI_JUMBO_BD + 0xc));
8303 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8304 tr32(RCVDBDI_STD_BD + 0x0),
8305 tr32(RCVDBDI_STD_BD + 0x4),
8306 tr32(RCVDBDI_STD_BD + 0x8),
8307 tr32(RCVDBDI_STD_BD + 0xc));
8308 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8309 tr32(RCVDBDI_MINI_BD + 0x0),
8310 tr32(RCVDBDI_MINI_BD + 0x4),
8311 tr32(RCVDBDI_MINI_BD + 0x8),
8312 tr32(RCVDBDI_MINI_BD + 0xc));
8313
8314 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8315 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8316 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8317 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8318 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8319 val32, val32_2, val32_3, val32_4);
8320
8321 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8322 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8323 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8324 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8325 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8326 val32, val32_2, val32_3, val32_4);
8327
8328 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8329 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8330 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8331 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8332 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8333 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8334 val32, val32_2, val32_3, val32_4, val32_5);
8335
8336 /* SW status block */
8337 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8338 tp->hw_status->status,
8339 tp->hw_status->status_tag,
8340 tp->hw_status->rx_jumbo_consumer,
8341 tp->hw_status->rx_consumer,
8342 tp->hw_status->rx_mini_consumer,
8343 tp->hw_status->idx[0].rx_producer,
8344 tp->hw_status->idx[0].tx_consumer);
8345
8346 /* SW statistics block */
8347 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8348 ((u32 *)tp->hw_stats)[0],
8349 ((u32 *)tp->hw_stats)[1],
8350 ((u32 *)tp->hw_stats)[2],
8351 ((u32 *)tp->hw_stats)[3]);
8352
8353 /* Mailboxes */
8354 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
Michael Chan09ee9292005-08-09 20:17:00 -07008355 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8356 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8357 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8358 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008359
8360 /* NIC side send descriptors. */
8361 for (i = 0; i < 6; i++) {
8362 unsigned long txd;
8363
8364 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8365 + (i * sizeof(struct tg3_tx_buffer_desc));
8366 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8367 i,
8368 readl(txd + 0x0), readl(txd + 0x4),
8369 readl(txd + 0x8), readl(txd + 0xc));
8370 }
8371
8372 /* NIC side RX descriptors. */
8373 for (i = 0; i < 6; i++) {
8374 unsigned long rxd;
8375
8376 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8377 + (i * sizeof(struct tg3_rx_buffer_desc));
8378 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8379 i,
8380 readl(rxd + 0x0), readl(rxd + 0x4),
8381 readl(rxd + 0x8), readl(rxd + 0xc));
8382 rxd += (4 * sizeof(u32));
8383 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8384 i,
8385 readl(rxd + 0x0), readl(rxd + 0x4),
8386 readl(rxd + 0x8), readl(rxd + 0xc));
8387 }
8388
8389 for (i = 0; i < 6; i++) {
8390 unsigned long rxd;
8391
8392 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8393 + (i * sizeof(struct tg3_rx_buffer_desc));
8394 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8395 i,
8396 readl(rxd + 0x0), readl(rxd + 0x4),
8397 readl(rxd + 0x8), readl(rxd + 0xc));
8398 rxd += (4 * sizeof(u32));
8399 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8400 i,
8401 readl(rxd + 0x0), readl(rxd + 0x4),
8402 readl(rxd + 0x8), readl(rxd + 0xc));
8403 }
8404}
8405#endif
8406
8407static struct net_device_stats *tg3_get_stats(struct net_device *);
8408static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8409
8410static int tg3_close(struct net_device *dev)
8411{
8412 struct tg3 *tp = netdev_priv(dev);
8413
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008414 napi_disable(&tp->napi);
Oleg Nesterov28e53bd2007-05-09 02:34:22 -07008415 cancel_work_sync(&tp->reset_task);
Michael Chan7faa0062006-02-02 17:29:28 -08008416
Linus Torvalds1da177e2005-04-16 15:20:36 -07008417 netif_stop_queue(dev);
8418
8419 del_timer_sync(&tp->timer);
8420
David S. Millerf47c11e2005-06-24 20:18:35 -07008421 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008422#if 0
8423 tg3_dump_state(tp);
8424#endif
8425
8426 tg3_disable_ints(tp);
8427
Michael Chan944d9802005-05-29 14:57:48 -07008428 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008429 tg3_free_rings(tp);
Michael Chan5cf64b82007-05-05 12:11:21 -07008430 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008431
David S. Millerf47c11e2005-06-24 20:18:35 -07008432 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008433
Michael Chan88b06bc2005-04-21 17:13:25 -07008434 free_irq(tp->pdev->irq, dev);
8435 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8436 pci_disable_msi(tp->pdev);
8437 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8438 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008439
8440 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8441 sizeof(tp->net_stats_prev));
8442 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8443 sizeof(tp->estats_prev));
8444
8445 tg3_free_consistent(tp);
8446
Michael Chanbc1c7562006-03-20 17:48:03 -08008447 tg3_set_power_state(tp, PCI_D3hot);
8448
8449 netif_carrier_off(tp->dev);
8450
Linus Torvalds1da177e2005-04-16 15:20:36 -07008451 return 0;
8452}
8453
8454static inline unsigned long get_stat64(tg3_stat64_t *val)
8455{
8456 unsigned long ret;
8457
8458#if (BITS_PER_LONG == 32)
8459 ret = val->low;
8460#else
8461 ret = ((u64)val->high << 32) | ((u64)val->low);
8462#endif
8463 return ret;
8464}
8465
Stefan Buehler816f8b82008-08-15 14:10:54 -07008466static inline u64 get_estat64(tg3_stat64_t *val)
8467{
8468 return ((u64)val->high << 32) | ((u64)val->low);
8469}
8470
Linus Torvalds1da177e2005-04-16 15:20:36 -07008471static unsigned long calc_crc_errors(struct tg3 *tp)
8472{
8473 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8474
8475 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8476 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8477 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008478 u32 val;
8479
David S. Millerf47c11e2005-06-24 20:18:35 -07008480 spin_lock_bh(&tp->lock);
Michael Chan569a5df2007-02-13 12:18:15 -08008481 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8482 tg3_writephy(tp, MII_TG3_TEST1,
8483 val | MII_TG3_TEST1_CRC_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008484 tg3_readphy(tp, 0x14, &val);
8485 } else
8486 val = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07008487 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008488
8489 tp->phy_crc_errors += val;
8490
8491 return tp->phy_crc_errors;
8492 }
8493
8494 return get_stat64(&hw_stats->rx_fcs_errors);
8495}
8496
8497#define ESTAT_ADD(member) \
8498 estats->member = old_estats->member + \
Stefan Buehler816f8b82008-08-15 14:10:54 -07008499 get_estat64(&hw_stats->member)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008500
8501static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8502{
8503 struct tg3_ethtool_stats *estats = &tp->estats;
8504 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8505 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8506
8507 if (!hw_stats)
8508 return old_estats;
8509
8510 ESTAT_ADD(rx_octets);
8511 ESTAT_ADD(rx_fragments);
8512 ESTAT_ADD(rx_ucast_packets);
8513 ESTAT_ADD(rx_mcast_packets);
8514 ESTAT_ADD(rx_bcast_packets);
8515 ESTAT_ADD(rx_fcs_errors);
8516 ESTAT_ADD(rx_align_errors);
8517 ESTAT_ADD(rx_xon_pause_rcvd);
8518 ESTAT_ADD(rx_xoff_pause_rcvd);
8519 ESTAT_ADD(rx_mac_ctrl_rcvd);
8520 ESTAT_ADD(rx_xoff_entered);
8521 ESTAT_ADD(rx_frame_too_long_errors);
8522 ESTAT_ADD(rx_jabbers);
8523 ESTAT_ADD(rx_undersize_packets);
8524 ESTAT_ADD(rx_in_length_errors);
8525 ESTAT_ADD(rx_out_length_errors);
8526 ESTAT_ADD(rx_64_or_less_octet_packets);
8527 ESTAT_ADD(rx_65_to_127_octet_packets);
8528 ESTAT_ADD(rx_128_to_255_octet_packets);
8529 ESTAT_ADD(rx_256_to_511_octet_packets);
8530 ESTAT_ADD(rx_512_to_1023_octet_packets);
8531 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8532 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8533 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8534 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8535 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8536
8537 ESTAT_ADD(tx_octets);
8538 ESTAT_ADD(tx_collisions);
8539 ESTAT_ADD(tx_xon_sent);
8540 ESTAT_ADD(tx_xoff_sent);
8541 ESTAT_ADD(tx_flow_control);
8542 ESTAT_ADD(tx_mac_errors);
8543 ESTAT_ADD(tx_single_collisions);
8544 ESTAT_ADD(tx_mult_collisions);
8545 ESTAT_ADD(tx_deferred);
8546 ESTAT_ADD(tx_excessive_collisions);
8547 ESTAT_ADD(tx_late_collisions);
8548 ESTAT_ADD(tx_collide_2times);
8549 ESTAT_ADD(tx_collide_3times);
8550 ESTAT_ADD(tx_collide_4times);
8551 ESTAT_ADD(tx_collide_5times);
8552 ESTAT_ADD(tx_collide_6times);
8553 ESTAT_ADD(tx_collide_7times);
8554 ESTAT_ADD(tx_collide_8times);
8555 ESTAT_ADD(tx_collide_9times);
8556 ESTAT_ADD(tx_collide_10times);
8557 ESTAT_ADD(tx_collide_11times);
8558 ESTAT_ADD(tx_collide_12times);
8559 ESTAT_ADD(tx_collide_13times);
8560 ESTAT_ADD(tx_collide_14times);
8561 ESTAT_ADD(tx_collide_15times);
8562 ESTAT_ADD(tx_ucast_packets);
8563 ESTAT_ADD(tx_mcast_packets);
8564 ESTAT_ADD(tx_bcast_packets);
8565 ESTAT_ADD(tx_carrier_sense_errors);
8566 ESTAT_ADD(tx_discards);
8567 ESTAT_ADD(tx_errors);
8568
8569 ESTAT_ADD(dma_writeq_full);
8570 ESTAT_ADD(dma_write_prioq_full);
8571 ESTAT_ADD(rxbds_empty);
8572 ESTAT_ADD(rx_discards);
8573 ESTAT_ADD(rx_errors);
8574 ESTAT_ADD(rx_threshold_hit);
8575
8576 ESTAT_ADD(dma_readq_full);
8577 ESTAT_ADD(dma_read_prioq_full);
8578 ESTAT_ADD(tx_comp_queue_full);
8579
8580 ESTAT_ADD(ring_set_send_prod_index);
8581 ESTAT_ADD(ring_status_update);
8582 ESTAT_ADD(nic_irqs);
8583 ESTAT_ADD(nic_avoided_irqs);
8584 ESTAT_ADD(nic_tx_threshold_hit);
8585
8586 return estats;
8587}
8588
8589static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8590{
8591 struct tg3 *tp = netdev_priv(dev);
8592 struct net_device_stats *stats = &tp->net_stats;
8593 struct net_device_stats *old_stats = &tp->net_stats_prev;
8594 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8595
8596 if (!hw_stats)
8597 return old_stats;
8598
8599 stats->rx_packets = old_stats->rx_packets +
8600 get_stat64(&hw_stats->rx_ucast_packets) +
8601 get_stat64(&hw_stats->rx_mcast_packets) +
8602 get_stat64(&hw_stats->rx_bcast_packets);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008603
Linus Torvalds1da177e2005-04-16 15:20:36 -07008604 stats->tx_packets = old_stats->tx_packets +
8605 get_stat64(&hw_stats->tx_ucast_packets) +
8606 get_stat64(&hw_stats->tx_mcast_packets) +
8607 get_stat64(&hw_stats->tx_bcast_packets);
8608
8609 stats->rx_bytes = old_stats->rx_bytes +
8610 get_stat64(&hw_stats->rx_octets);
8611 stats->tx_bytes = old_stats->tx_bytes +
8612 get_stat64(&hw_stats->tx_octets);
8613
8614 stats->rx_errors = old_stats->rx_errors +
John W. Linville4f63b872005-09-12 14:43:18 -07008615 get_stat64(&hw_stats->rx_errors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008616 stats->tx_errors = old_stats->tx_errors +
8617 get_stat64(&hw_stats->tx_errors) +
8618 get_stat64(&hw_stats->tx_mac_errors) +
8619 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8620 get_stat64(&hw_stats->tx_discards);
8621
8622 stats->multicast = old_stats->multicast +
8623 get_stat64(&hw_stats->rx_mcast_packets);
8624 stats->collisions = old_stats->collisions +
8625 get_stat64(&hw_stats->tx_collisions);
8626
8627 stats->rx_length_errors = old_stats->rx_length_errors +
8628 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8629 get_stat64(&hw_stats->rx_undersize_packets);
8630
8631 stats->rx_over_errors = old_stats->rx_over_errors +
8632 get_stat64(&hw_stats->rxbds_empty);
8633 stats->rx_frame_errors = old_stats->rx_frame_errors +
8634 get_stat64(&hw_stats->rx_align_errors);
8635 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8636 get_stat64(&hw_stats->tx_discards);
8637 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8638 get_stat64(&hw_stats->tx_carrier_sense_errors);
8639
8640 stats->rx_crc_errors = old_stats->rx_crc_errors +
8641 calc_crc_errors(tp);
8642
John W. Linville4f63b872005-09-12 14:43:18 -07008643 stats->rx_missed_errors = old_stats->rx_missed_errors +
8644 get_stat64(&hw_stats->rx_discards);
8645
Linus Torvalds1da177e2005-04-16 15:20:36 -07008646 return stats;
8647}
8648
8649static inline u32 calc_crc(unsigned char *buf, int len)
8650{
8651 u32 reg;
8652 u32 tmp;
8653 int j, k;
8654
8655 reg = 0xffffffff;
8656
8657 for (j = 0; j < len; j++) {
8658 reg ^= buf[j];
8659
8660 for (k = 0; k < 8; k++) {
8661 tmp = reg & 0x01;
8662
8663 reg >>= 1;
8664
8665 if (tmp) {
8666 reg ^= 0xedb88320;
8667 }
8668 }
8669 }
8670
8671 return ~reg;
8672}
8673
8674static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8675{
8676 /* accept or reject all multicast frames */
8677 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8678 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8679 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8680 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8681}
8682
8683static void __tg3_set_rx_mode(struct net_device *dev)
8684{
8685 struct tg3 *tp = netdev_priv(dev);
8686 u32 rx_mode;
8687
8688 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8689 RX_MODE_KEEP_VLAN_TAG);
8690
8691 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8692 * flag clear.
8693 */
8694#if TG3_VLAN_TAG_USED
8695 if (!tp->vlgrp &&
8696 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8697 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8698#else
8699 /* By definition, VLAN is disabled always in this
8700 * case.
8701 */
8702 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8703 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8704#endif
8705
8706 if (dev->flags & IFF_PROMISC) {
8707 /* Promiscuous mode. */
8708 rx_mode |= RX_MODE_PROMISC;
8709 } else if (dev->flags & IFF_ALLMULTI) {
8710 /* Accept all multicast. */
8711 tg3_set_multi (tp, 1);
8712 } else if (dev->mc_count < 1) {
8713 /* Reject all multicast. */
8714 tg3_set_multi (tp, 0);
8715 } else {
8716 /* Accept one or more multicast(s). */
8717 struct dev_mc_list *mclist;
8718 unsigned int i;
8719 u32 mc_filter[4] = { 0, };
8720 u32 regidx;
8721 u32 bit;
8722 u32 crc;
8723
8724 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8725 i++, mclist = mclist->next) {
8726
8727 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8728 bit = ~crc & 0x7f;
8729 regidx = (bit & 0x60) >> 5;
8730 bit &= 0x1f;
8731 mc_filter[regidx] |= (1 << bit);
8732 }
8733
8734 tw32(MAC_HASH_REG_0, mc_filter[0]);
8735 tw32(MAC_HASH_REG_1, mc_filter[1]);
8736 tw32(MAC_HASH_REG_2, mc_filter[2]);
8737 tw32(MAC_HASH_REG_3, mc_filter[3]);
8738 }
8739
8740 if (rx_mode != tp->rx_mode) {
8741 tp->rx_mode = rx_mode;
8742 tw32_f(MAC_RX_MODE, rx_mode);
8743 udelay(10);
8744 }
8745}
8746
8747static void tg3_set_rx_mode(struct net_device *dev)
8748{
8749 struct tg3 *tp = netdev_priv(dev);
8750
Michael Chane75f7c92006-03-20 21:33:26 -08008751 if (!netif_running(dev))
8752 return;
8753
David S. Millerf47c11e2005-06-24 20:18:35 -07008754 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008755 __tg3_set_rx_mode(dev);
David S. Millerf47c11e2005-06-24 20:18:35 -07008756 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008757}
8758
8759#define TG3_REGDUMP_LEN (32 * 1024)
8760
8761static int tg3_get_regs_len(struct net_device *dev)
8762{
8763 return TG3_REGDUMP_LEN;
8764}
8765
8766static void tg3_get_regs(struct net_device *dev,
8767 struct ethtool_regs *regs, void *_p)
8768{
8769 u32 *p = _p;
8770 struct tg3 *tp = netdev_priv(dev);
8771 u8 *orig_p = _p;
8772 int i;
8773
8774 regs->version = 0;
8775
8776 memset(p, 0, TG3_REGDUMP_LEN);
8777
Michael Chanbc1c7562006-03-20 17:48:03 -08008778 if (tp->link_config.phy_is_low_power)
8779 return;
8780
David S. Millerf47c11e2005-06-24 20:18:35 -07008781 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008782
8783#define __GET_REG32(reg) (*(p)++ = tr32(reg))
8784#define GET_REG32_LOOP(base,len) \
8785do { p = (u32 *)(orig_p + (base)); \
8786 for (i = 0; i < len; i += 4) \
8787 __GET_REG32((base) + i); \
8788} while (0)
8789#define GET_REG32_1(reg) \
8790do { p = (u32 *)(orig_p + (reg)); \
8791 __GET_REG32((reg)); \
8792} while (0)
8793
8794 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8795 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8796 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8797 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8798 GET_REG32_1(SNDDATAC_MODE);
8799 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8800 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8801 GET_REG32_1(SNDBDC_MODE);
8802 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8803 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8804 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8805 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8806 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8807 GET_REG32_1(RCVDCC_MODE);
8808 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8809 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8810 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8811 GET_REG32_1(MBFREE_MODE);
8812 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8813 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8814 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8815 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8816 GET_REG32_LOOP(WDMAC_MODE, 0x08);
Chris Elmquist091465d2005-12-20 13:25:19 -08008817 GET_REG32_1(RX_CPU_MODE);
8818 GET_REG32_1(RX_CPU_STATE);
8819 GET_REG32_1(RX_CPU_PGMCTR);
8820 GET_REG32_1(RX_CPU_HWBKPT);
8821 GET_REG32_1(TX_CPU_MODE);
8822 GET_REG32_1(TX_CPU_STATE);
8823 GET_REG32_1(TX_CPU_PGMCTR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008824 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8825 GET_REG32_LOOP(FTQ_RESET, 0x120);
8826 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8827 GET_REG32_1(DMAC_MODE);
8828 GET_REG32_LOOP(GRC_MODE, 0x4c);
8829 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8830 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8831
8832#undef __GET_REG32
8833#undef GET_REG32_LOOP
8834#undef GET_REG32_1
8835
David S. Millerf47c11e2005-06-24 20:18:35 -07008836 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008837}
8838
8839static int tg3_get_eeprom_len(struct net_device *dev)
8840{
8841 struct tg3 *tp = netdev_priv(dev);
8842
8843 return tp->nvram_size;
8844}
8845
8846static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
Al Virob9fc7dc2007-12-17 22:59:57 -08008847static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
Michael Chan18201802006-03-20 22:29:15 -08008848static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008849
8850static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8851{
8852 struct tg3 *tp = netdev_priv(dev);
8853 int ret;
8854 u8 *pd;
Al Virob9fc7dc2007-12-17 22:59:57 -08008855 u32 i, offset, len, b_offset, b_count;
8856 __le32 val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008857
Michael Chanbc1c7562006-03-20 17:48:03 -08008858 if (tp->link_config.phy_is_low_power)
8859 return -EAGAIN;
8860
Linus Torvalds1da177e2005-04-16 15:20:36 -07008861 offset = eeprom->offset;
8862 len = eeprom->len;
8863 eeprom->len = 0;
8864
8865 eeprom->magic = TG3_EEPROM_MAGIC;
8866
8867 if (offset & 3) {
8868 /* adjustments to start on required 4 byte boundary */
8869 b_offset = offset & 3;
8870 b_count = 4 - b_offset;
8871 if (b_count > len) {
8872 /* i.e. offset=1 len=2 */
8873 b_count = len;
8874 }
Al Virob9fc7dc2007-12-17 22:59:57 -08008875 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008876 if (ret)
8877 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008878 memcpy(data, ((char*)&val) + b_offset, b_count);
8879 len -= b_count;
8880 offset += b_count;
8881 eeprom->len += b_count;
8882 }
8883
8884 /* read bytes upto the last 4 byte boundary */
8885 pd = &data[eeprom->len];
8886 for (i = 0; i < (len - (len & 3)); i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -08008887 ret = tg3_nvram_read_le(tp, offset + i, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008888 if (ret) {
8889 eeprom->len += i;
8890 return ret;
8891 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008892 memcpy(pd + i, &val, 4);
8893 }
8894 eeprom->len += i;
8895
8896 if (len & 3) {
8897 /* read last bytes not ending on 4 byte boundary */
8898 pd = &data[eeprom->len];
8899 b_count = len & 3;
8900 b_offset = offset + len - b_count;
Al Virob9fc7dc2007-12-17 22:59:57 -08008901 ret = tg3_nvram_read_le(tp, b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008902 if (ret)
8903 return ret;
Al Virob9fc7dc2007-12-17 22:59:57 -08008904 memcpy(pd, &val, b_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008905 eeprom->len += b_count;
8906 }
8907 return 0;
8908}
8909
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008910static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008911
8912static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8913{
8914 struct tg3 *tp = netdev_priv(dev);
8915 int ret;
Al Virob9fc7dc2007-12-17 22:59:57 -08008916 u32 offset, len, b_offset, odd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008917 u8 *buf;
Al Virob9fc7dc2007-12-17 22:59:57 -08008918 __le32 start, end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008919
Michael Chanbc1c7562006-03-20 17:48:03 -08008920 if (tp->link_config.phy_is_low_power)
8921 return -EAGAIN;
8922
Linus Torvalds1da177e2005-04-16 15:20:36 -07008923 if (eeprom->magic != TG3_EEPROM_MAGIC)
8924 return -EINVAL;
8925
8926 offset = eeprom->offset;
8927 len = eeprom->len;
8928
8929 if ((b_offset = (offset & 3))) {
8930 /* adjustments to start on required 4 byte boundary */
Al Virob9fc7dc2007-12-17 22:59:57 -08008931 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008932 if (ret)
8933 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008934 len += b_offset;
8935 offset &= ~3;
Michael Chan1c8594b2005-04-21 17:12:46 -07008936 if (len < 4)
8937 len = 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008938 }
8939
8940 odd_len = 0;
Michael Chan1c8594b2005-04-21 17:12:46 -07008941 if (len & 3) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008942 /* adjustments to end on required 4 byte boundary */
8943 odd_len = 1;
8944 len = (len + 3) & ~3;
Al Virob9fc7dc2007-12-17 22:59:57 -08008945 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008946 if (ret)
8947 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008948 }
8949
8950 buf = data;
8951 if (b_offset || odd_len) {
8952 buf = kmalloc(len, GFP_KERNEL);
Andy Gospodarekab0049b2007-09-06 20:42:14 +01008953 if (!buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008954 return -ENOMEM;
8955 if (b_offset)
8956 memcpy(buf, &start, 4);
8957 if (odd_len)
8958 memcpy(buf+len-4, &end, 4);
8959 memcpy(buf + b_offset, data, eeprom->len);
8960 }
8961
8962 ret = tg3_nvram_write_block(tp, offset, len, buf);
8963
8964 if (buf != data)
8965 kfree(buf);
8966
8967 return ret;
8968}
8969
8970static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8971{
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07008972 struct tg3 *tp = netdev_priv(dev);
8973
8974 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8975 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8976 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07008977 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07008978 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008979
Linus Torvalds1da177e2005-04-16 15:20:36 -07008980 cmd->supported = (SUPPORTED_Autoneg);
8981
8982 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8983 cmd->supported |= (SUPPORTED_1000baseT_Half |
8984 SUPPORTED_1000baseT_Full);
8985
Karsten Keilef348142006-05-12 12:49:08 -07008986 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008987 cmd->supported |= (SUPPORTED_100baseT_Half |
8988 SUPPORTED_100baseT_Full |
8989 SUPPORTED_10baseT_Half |
8990 SUPPORTED_10baseT_Full |
Matt Carlson3bebab52007-11-12 21:22:40 -08008991 SUPPORTED_TP);
Karsten Keilef348142006-05-12 12:49:08 -07008992 cmd->port = PORT_TP;
8993 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008994 cmd->supported |= SUPPORTED_FIBRE;
Karsten Keilef348142006-05-12 12:49:08 -07008995 cmd->port = PORT_FIBRE;
8996 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008997
Linus Torvalds1da177e2005-04-16 15:20:36 -07008998 cmd->advertising = tp->link_config.advertising;
8999 if (netif_running(dev)) {
9000 cmd->speed = tp->link_config.active_speed;
9001 cmd->duplex = tp->link_config.active_duplex;
9002 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009003 cmd->phy_address = PHY_ADDR;
9004 cmd->transceiver = 0;
9005 cmd->autoneg = tp->link_config.autoneg;
9006 cmd->maxtxpkt = 0;
9007 cmd->maxrxpkt = 0;
9008 return 0;
9009}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009010
Linus Torvalds1da177e2005-04-16 15:20:36 -07009011static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9012{
9013 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009014
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009015 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9016 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9017 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009018 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009019 }
9020
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009021 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009022 /* These are the only valid advertisement bits allowed. */
9023 if (cmd->autoneg == AUTONEG_ENABLE &&
9024 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
9025 ADVERTISED_1000baseT_Full |
9026 ADVERTISED_Autoneg |
9027 ADVERTISED_FIBRE)))
9028 return -EINVAL;
Michael Chan37ff2382005-10-26 15:49:51 -07009029 /* Fiber can only do SPEED_1000. */
9030 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9031 (cmd->speed != SPEED_1000))
9032 return -EINVAL;
9033 /* Copper cannot force SPEED_1000. */
9034 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9035 (cmd->speed == SPEED_1000))
9036 return -EINVAL;
9037 else if ((cmd->speed == SPEED_1000) &&
Matt Carlson0ba11fb2008-06-09 15:40:26 -07009038 (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
Michael Chan37ff2382005-10-26 15:49:51 -07009039 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009040
David S. Millerf47c11e2005-06-24 20:18:35 -07009041 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009042
9043 tp->link_config.autoneg = cmd->autoneg;
9044 if (cmd->autoneg == AUTONEG_ENABLE) {
Andy Gospodarek405d8e52007-10-08 01:08:47 -07009045 tp->link_config.advertising = (cmd->advertising |
9046 ADVERTISED_Autoneg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009047 tp->link_config.speed = SPEED_INVALID;
9048 tp->link_config.duplex = DUPLEX_INVALID;
9049 } else {
9050 tp->link_config.advertising = 0;
9051 tp->link_config.speed = cmd->speed;
9052 tp->link_config.duplex = cmd->duplex;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009053 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009054
Michael Chan24fcad62006-12-17 17:06:46 -08009055 tp->link_config.orig_speed = tp->link_config.speed;
9056 tp->link_config.orig_duplex = tp->link_config.duplex;
9057 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9058
Linus Torvalds1da177e2005-04-16 15:20:36 -07009059 if (netif_running(dev))
9060 tg3_setup_phy(tp, 1);
9061
David S. Millerf47c11e2005-06-24 20:18:35 -07009062 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009063
Linus Torvalds1da177e2005-04-16 15:20:36 -07009064 return 0;
9065}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009066
Linus Torvalds1da177e2005-04-16 15:20:36 -07009067static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9068{
9069 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009070
Linus Torvalds1da177e2005-04-16 15:20:36 -07009071 strcpy(info->driver, DRV_MODULE_NAME);
9072 strcpy(info->version, DRV_MODULE_VERSION);
Michael Chanc4e65752006-03-20 22:29:32 -08009073 strcpy(info->fw_version, tp->fw_ver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009074 strcpy(info->bus_info, pci_name(tp->pdev));
9075}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009076
Linus Torvalds1da177e2005-04-16 15:20:36 -07009077static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9078{
9079 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009080
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009081 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9082 device_can_wakeup(&tp->pdev->dev))
Gary Zambranoa85feb82007-05-05 11:52:19 -07009083 wol->supported = WAKE_MAGIC;
9084 else
9085 wol->supported = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009086 wol->wolopts = 0;
Matt Carlson05ac4cb2008-11-03 16:53:46 -08009087 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9088 device_can_wakeup(&tp->pdev->dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009089 wol->wolopts = WAKE_MAGIC;
9090 memset(&wol->sopass, 0, sizeof(wol->sopass));
9091}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009092
Linus Torvalds1da177e2005-04-16 15:20:36 -07009093static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9094{
9095 struct tg3 *tp = netdev_priv(dev);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009096 struct device *dp = &tp->pdev->dev;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009097
Linus Torvalds1da177e2005-04-16 15:20:36 -07009098 if (wol->wolopts & ~WAKE_MAGIC)
9099 return -EINVAL;
9100 if ((wol->wolopts & WAKE_MAGIC) &&
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009101 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009102 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009103
David S. Millerf47c11e2005-06-24 20:18:35 -07009104 spin_lock_bh(&tp->lock);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009105 if (wol->wolopts & WAKE_MAGIC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009106 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009107 device_set_wakeup_enable(dp, true);
9108 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009109 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009110 device_set_wakeup_enable(dp, false);
9111 }
David S. Millerf47c11e2005-06-24 20:18:35 -07009112 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009113
Linus Torvalds1da177e2005-04-16 15:20:36 -07009114 return 0;
9115}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009116
Linus Torvalds1da177e2005-04-16 15:20:36 -07009117static u32 tg3_get_msglevel(struct net_device *dev)
9118{
9119 struct tg3 *tp = netdev_priv(dev);
9120 return tp->msg_enable;
9121}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009122
Linus Torvalds1da177e2005-04-16 15:20:36 -07009123static void tg3_set_msglevel(struct net_device *dev, u32 value)
9124{
9125 struct tg3 *tp = netdev_priv(dev);
9126 tp->msg_enable = value;
9127}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009128
Linus Torvalds1da177e2005-04-16 15:20:36 -07009129static int tg3_set_tso(struct net_device *dev, u32 value)
9130{
9131 struct tg3 *tp = netdev_priv(dev);
9132
9133 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9134 if (value)
9135 return -EINVAL;
9136 return 0;
9137 }
Michael Chanb5d37722006-09-27 16:06:21 -07009138 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
9139 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
Matt Carlson9936bcf2007-10-10 18:03:07 -07009140 if (value) {
Michael Chanb0026622006-07-03 19:42:14 -07009141 dev->features |= NETIF_F_TSO6;
Matt Carlson57e69832008-05-25 23:48:31 -07009142 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9143 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9144 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9145 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlson9936bcf2007-10-10 18:03:07 -07009146 dev->features |= NETIF_F_TSO_ECN;
9147 } else
9148 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
Michael Chanb0026622006-07-03 19:42:14 -07009149 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009150 return ethtool_op_set_tso(dev, value);
9151}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009152
Linus Torvalds1da177e2005-04-16 15:20:36 -07009153static int tg3_nway_reset(struct net_device *dev)
9154{
9155 struct tg3 *tp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009156 int r;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009157
Linus Torvalds1da177e2005-04-16 15:20:36 -07009158 if (!netif_running(dev))
9159 return -EAGAIN;
9160
Michael Chanc94e3942005-09-27 12:12:42 -07009161 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9162 return -EINVAL;
9163
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009164 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9165 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9166 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009167 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009168 } else {
9169 u32 bmcr;
9170
9171 spin_lock_bh(&tp->lock);
9172 r = -EINVAL;
9173 tg3_readphy(tp, MII_BMCR, &bmcr);
9174 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9175 ((bmcr & BMCR_ANENABLE) ||
9176 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9177 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9178 BMCR_ANENABLE);
9179 r = 0;
9180 }
9181 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009182 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009183
Linus Torvalds1da177e2005-04-16 15:20:36 -07009184 return r;
9185}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009186
Linus Torvalds1da177e2005-04-16 15:20:36 -07009187static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9188{
9189 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009190
Linus Torvalds1da177e2005-04-16 15:20:36 -07009191 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9192 ering->rx_mini_max_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08009193 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9194 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9195 else
9196 ering->rx_jumbo_max_pending = 0;
9197
9198 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009199
9200 ering->rx_pending = tp->rx_pending;
9201 ering->rx_mini_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08009202 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9203 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9204 else
9205 ering->rx_jumbo_pending = 0;
9206
Linus Torvalds1da177e2005-04-16 15:20:36 -07009207 ering->tx_pending = tp->tx_pending;
9208}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009209
Linus Torvalds1da177e2005-04-16 15:20:36 -07009210static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9211{
9212 struct tg3 *tp = netdev_priv(dev);
Michael Chanb9ec6c12006-07-25 16:37:27 -07009213 int irq_sync = 0, err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009214
Linus Torvalds1da177e2005-04-16 15:20:36 -07009215 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9216 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
Michael Chanbc3a9252006-10-18 20:55:18 -07009217 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9218 (ering->tx_pending <= MAX_SKB_FRAGS) ||
Michael Chan7f62ad52007-02-20 23:25:40 -08009219 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
Michael Chanbc3a9252006-10-18 20:55:18 -07009220 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009221 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009222
Michael Chanbbe832c2005-06-24 20:20:04 -07009223 if (netif_running(dev)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009224 tg3_phy_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009225 tg3_netif_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07009226 irq_sync = 1;
9227 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009228
Michael Chanbbe832c2005-06-24 20:20:04 -07009229 tg3_full_lock(tp, irq_sync);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009230
Linus Torvalds1da177e2005-04-16 15:20:36 -07009231 tp->rx_pending = ering->rx_pending;
9232
9233 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9234 tp->rx_pending > 63)
9235 tp->rx_pending = 63;
9236 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9237 tp->tx_pending = ering->tx_pending;
9238
9239 if (netif_running(dev)) {
Michael Chan944d9802005-05-29 14:57:48 -07009240 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chanb9ec6c12006-07-25 16:37:27 -07009241 err = tg3_restart_hw(tp, 1);
9242 if (!err)
9243 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009244 }
9245
David S. Millerf47c11e2005-06-24 20:18:35 -07009246 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009247
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009248 if (irq_sync && !err)
9249 tg3_phy_start(tp);
9250
Michael Chanb9ec6c12006-07-25 16:37:27 -07009251 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009252}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009253
Linus Torvalds1da177e2005-04-16 15:20:36 -07009254static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9255{
9256 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009257
Linus Torvalds1da177e2005-04-16 15:20:36 -07009258 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
Matt Carlson8d018622007-12-20 20:05:44 -08009259
9260 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
9261 epause->rx_pause = 1;
9262 else
9263 epause->rx_pause = 0;
9264
9265 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
9266 epause->tx_pause = 1;
9267 else
9268 epause->tx_pause = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009269}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009270
Linus Torvalds1da177e2005-04-16 15:20:36 -07009271static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9272{
9273 struct tg3 *tp = netdev_priv(dev);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009274 int err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009275
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009276 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9277 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9278 return -EAGAIN;
9279
9280 if (epause->autoneg) {
9281 u32 newadv;
9282 struct phy_device *phydev;
9283
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009284 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009285
9286 if (epause->rx_pause) {
9287 if (epause->tx_pause)
9288 newadv = ADVERTISED_Pause;
9289 else
9290 newadv = ADVERTISED_Pause |
9291 ADVERTISED_Asym_Pause;
9292 } else if (epause->tx_pause) {
9293 newadv = ADVERTISED_Asym_Pause;
9294 } else
9295 newadv = 0;
9296
9297 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9298 u32 oldadv = phydev->advertising &
9299 (ADVERTISED_Pause |
9300 ADVERTISED_Asym_Pause);
9301 if (oldadv != newadv) {
9302 phydev->advertising &=
9303 ~(ADVERTISED_Pause |
9304 ADVERTISED_Asym_Pause);
9305 phydev->advertising |= newadv;
9306 err = phy_start_aneg(phydev);
9307 }
9308 } else {
9309 tp->link_config.advertising &=
9310 ~(ADVERTISED_Pause |
9311 ADVERTISED_Asym_Pause);
9312 tp->link_config.advertising |= newadv;
9313 }
9314 } else {
9315 if (epause->rx_pause)
9316 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9317 else
9318 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9319
9320 if (epause->tx_pause)
9321 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9322 else
9323 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9324
9325 if (netif_running(dev))
9326 tg3_setup_flow_control(tp, 0, 0);
9327 }
9328 } else {
9329 int irq_sync = 0;
9330
9331 if (netif_running(dev)) {
9332 tg3_netif_stop(tp);
9333 irq_sync = 1;
9334 }
9335
9336 tg3_full_lock(tp, irq_sync);
9337
9338 if (epause->autoneg)
9339 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9340 else
9341 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9342 if (epause->rx_pause)
9343 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9344 else
9345 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9346 if (epause->tx_pause)
9347 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9348 else
9349 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9350
9351 if (netif_running(dev)) {
9352 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9353 err = tg3_restart_hw(tp, 1);
9354 if (!err)
9355 tg3_netif_start(tp);
9356 }
9357
9358 tg3_full_unlock(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07009359 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009360
Michael Chanb9ec6c12006-07-25 16:37:27 -07009361 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009362}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009363
Linus Torvalds1da177e2005-04-16 15:20:36 -07009364static u32 tg3_get_rx_csum(struct net_device *dev)
9365{
9366 struct tg3 *tp = netdev_priv(dev);
9367 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9368}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009369
Linus Torvalds1da177e2005-04-16 15:20:36 -07009370static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9371{
9372 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009373
Linus Torvalds1da177e2005-04-16 15:20:36 -07009374 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9375 if (data != 0)
9376 return -EINVAL;
9377 return 0;
9378 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009379
David S. Millerf47c11e2005-06-24 20:18:35 -07009380 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009381 if (data)
9382 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9383 else
9384 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
David S. Millerf47c11e2005-06-24 20:18:35 -07009385 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009386
Linus Torvalds1da177e2005-04-16 15:20:36 -07009387 return 0;
9388}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009389
Linus Torvalds1da177e2005-04-16 15:20:36 -07009390static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9391{
9392 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009393
Linus Torvalds1da177e2005-04-16 15:20:36 -07009394 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9395 if (data != 0)
9396 return -EINVAL;
9397 return 0;
9398 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009399
Michael Chanaf36e6b2006-03-23 01:28:06 -08009400 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07009401 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07009402 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -07009403 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9404 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan6460d942007-07-14 19:07:52 -07009405 ethtool_op_set_tx_ipv6_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009406 else
Michael Chan9c27dbd2006-03-20 22:28:27 -08009407 ethtool_op_set_tx_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009408
9409 return 0;
9410}
9411
Jeff Garzikb9f2c042007-10-03 18:07:32 -07009412static int tg3_get_sset_count (struct net_device *dev, int sset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009413{
Jeff Garzikb9f2c042007-10-03 18:07:32 -07009414 switch (sset) {
9415 case ETH_SS_TEST:
9416 return TG3_NUM_TEST;
9417 case ETH_SS_STATS:
9418 return TG3_NUM_STATS;
9419 default:
9420 return -EOPNOTSUPP;
9421 }
Michael Chan4cafd3f2005-05-29 14:56:34 -07009422}
9423
Linus Torvalds1da177e2005-04-16 15:20:36 -07009424static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9425{
9426 switch (stringset) {
9427 case ETH_SS_STATS:
9428 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9429 break;
Michael Chan4cafd3f2005-05-29 14:56:34 -07009430 case ETH_SS_TEST:
9431 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9432 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009433 default:
9434 WARN_ON(1); /* we need a WARN() */
9435 break;
9436 }
9437}
9438
Michael Chan4009a932005-09-05 17:52:54 -07009439static int tg3_phys_id(struct net_device *dev, u32 data)
9440{
9441 struct tg3 *tp = netdev_priv(dev);
9442 int i;
9443
9444 if (!netif_running(tp->dev))
9445 return -EAGAIN;
9446
9447 if (data == 0)
Stephen Hemminger759afc32008-02-23 19:51:59 -08009448 data = UINT_MAX / 2;
Michael Chan4009a932005-09-05 17:52:54 -07009449
9450 for (i = 0; i < (data * 2); i++) {
9451 if ((i % 2) == 0)
9452 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9453 LED_CTRL_1000MBPS_ON |
9454 LED_CTRL_100MBPS_ON |
9455 LED_CTRL_10MBPS_ON |
9456 LED_CTRL_TRAFFIC_OVERRIDE |
9457 LED_CTRL_TRAFFIC_BLINK |
9458 LED_CTRL_TRAFFIC_LED);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009459
Michael Chan4009a932005-09-05 17:52:54 -07009460 else
9461 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9462 LED_CTRL_TRAFFIC_OVERRIDE);
9463
9464 if (msleep_interruptible(500))
9465 break;
9466 }
9467 tw32(MAC_LED_CTRL, tp->led_ctrl);
9468 return 0;
9469}
9470
Linus Torvalds1da177e2005-04-16 15:20:36 -07009471static void tg3_get_ethtool_stats (struct net_device *dev,
9472 struct ethtool_stats *estats, u64 *tmp_stats)
9473{
9474 struct tg3 *tp = netdev_priv(dev);
9475 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9476}
9477
Michael Chan566f86a2005-05-29 14:56:58 -07009478#define NVRAM_TEST_SIZE 0x100
Matt Carlsona5767de2007-11-12 21:10:58 -08009479#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
9480#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
9481#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
Michael Chanb16250e2006-09-27 16:10:14 -07009482#define NVRAM_SELFBOOT_HW_SIZE 0x20
9483#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
Michael Chan566f86a2005-05-29 14:56:58 -07009484
9485static int tg3_test_nvram(struct tg3 *tp)
9486{
Al Virob9fc7dc2007-12-17 22:59:57 -08009487 u32 csum, magic;
9488 __le32 *buf;
Andy Gospodarekab0049b2007-09-06 20:42:14 +01009489 int i, j, k, err = 0, size;
Michael Chan566f86a2005-05-29 14:56:58 -07009490
Michael Chan18201802006-03-20 22:29:15 -08009491 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Michael Chan1b277772006-03-20 22:27:48 -08009492 return -EIO;
9493
Michael Chan1b277772006-03-20 22:27:48 -08009494 if (magic == TG3_EEPROM_MAGIC)
9495 size = NVRAM_TEST_SIZE;
Michael Chanb16250e2006-09-27 16:10:14 -07009496 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
Matt Carlsona5767de2007-11-12 21:10:58 -08009497 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9498 TG3_EEPROM_SB_FORMAT_1) {
9499 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9500 case TG3_EEPROM_SB_REVISION_0:
9501 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9502 break;
9503 case TG3_EEPROM_SB_REVISION_2:
9504 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9505 break;
9506 case TG3_EEPROM_SB_REVISION_3:
9507 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9508 break;
9509 default:
9510 return 0;
9511 }
9512 } else
Michael Chan1b277772006-03-20 22:27:48 -08009513 return 0;
Michael Chanb16250e2006-09-27 16:10:14 -07009514 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9515 size = NVRAM_SELFBOOT_HW_SIZE;
9516 else
Michael Chan1b277772006-03-20 22:27:48 -08009517 return -EIO;
9518
9519 buf = kmalloc(size, GFP_KERNEL);
Michael Chan566f86a2005-05-29 14:56:58 -07009520 if (buf == NULL)
9521 return -ENOMEM;
9522
Michael Chan1b277772006-03-20 22:27:48 -08009523 err = -EIO;
9524 for (i = 0, j = 0; i < size; i += 4, j++) {
Al Virob9fc7dc2007-12-17 22:59:57 -08009525 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
Michael Chan566f86a2005-05-29 14:56:58 -07009526 break;
Michael Chan566f86a2005-05-29 14:56:58 -07009527 }
Michael Chan1b277772006-03-20 22:27:48 -08009528 if (i < size)
Michael Chan566f86a2005-05-29 14:56:58 -07009529 goto out;
9530
Michael Chan1b277772006-03-20 22:27:48 -08009531 /* Selfboot format */
Al Virob9fc7dc2007-12-17 22:59:57 -08009532 magic = swab32(le32_to_cpu(buf[0]));
9533 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -07009534 TG3_EEPROM_MAGIC_FW) {
Michael Chan1b277772006-03-20 22:27:48 -08009535 u8 *buf8 = (u8 *) buf, csum8 = 0;
9536
Al Virob9fc7dc2007-12-17 22:59:57 -08009537 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
Matt Carlsona5767de2007-11-12 21:10:58 -08009538 TG3_EEPROM_SB_REVISION_2) {
9539 /* For rev 2, the csum doesn't include the MBA. */
9540 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9541 csum8 += buf8[i];
9542 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9543 csum8 += buf8[i];
9544 } else {
9545 for (i = 0; i < size; i++)
9546 csum8 += buf8[i];
9547 }
Michael Chan1b277772006-03-20 22:27:48 -08009548
Adrian Bunkad96b482006-04-05 22:21:04 -07009549 if (csum8 == 0) {
9550 err = 0;
9551 goto out;
9552 }
9553
9554 err = -EIO;
9555 goto out;
Michael Chan1b277772006-03-20 22:27:48 -08009556 }
Michael Chan566f86a2005-05-29 14:56:58 -07009557
Al Virob9fc7dc2007-12-17 22:59:57 -08009558 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -07009559 TG3_EEPROM_MAGIC_HW) {
9560 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9561 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9562 u8 *buf8 = (u8 *) buf;
Michael Chanb16250e2006-09-27 16:10:14 -07009563
9564 /* Separate the parity bits and the data bytes. */
9565 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9566 if ((i == 0) || (i == 8)) {
9567 int l;
9568 u8 msk;
9569
9570 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9571 parity[k++] = buf8[i] & msk;
9572 i++;
9573 }
9574 else if (i == 16) {
9575 int l;
9576 u8 msk;
9577
9578 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9579 parity[k++] = buf8[i] & msk;
9580 i++;
9581
9582 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9583 parity[k++] = buf8[i] & msk;
9584 i++;
9585 }
9586 data[j++] = buf8[i];
9587 }
9588
9589 err = -EIO;
9590 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9591 u8 hw8 = hweight8(data[i]);
9592
9593 if ((hw8 & 0x1) && parity[i])
9594 goto out;
9595 else if (!(hw8 & 0x1) && !parity[i])
9596 goto out;
9597 }
9598 err = 0;
9599 goto out;
9600 }
9601
Michael Chan566f86a2005-05-29 14:56:58 -07009602 /* Bootstrap checksum at offset 0x10 */
9603 csum = calc_crc((unsigned char *) buf, 0x10);
Al Virob9fc7dc2007-12-17 22:59:57 -08009604 if(csum != le32_to_cpu(buf[0x10/4]))
Michael Chan566f86a2005-05-29 14:56:58 -07009605 goto out;
9606
9607 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9608 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
Al Virob9fc7dc2007-12-17 22:59:57 -08009609 if (csum != le32_to_cpu(buf[0xfc/4]))
Michael Chan566f86a2005-05-29 14:56:58 -07009610 goto out;
9611
9612 err = 0;
9613
9614out:
9615 kfree(buf);
9616 return err;
9617}
9618
Michael Chanca430072005-05-29 14:57:23 -07009619#define TG3_SERDES_TIMEOUT_SEC 2
9620#define TG3_COPPER_TIMEOUT_SEC 6
9621
9622static int tg3_test_link(struct tg3 *tp)
9623{
9624 int i, max;
9625
9626 if (!netif_running(tp->dev))
9627 return -ENODEV;
9628
Michael Chan4c987482005-09-05 17:52:38 -07009629 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Michael Chanca430072005-05-29 14:57:23 -07009630 max = TG3_SERDES_TIMEOUT_SEC;
9631 else
9632 max = TG3_COPPER_TIMEOUT_SEC;
9633
9634 for (i = 0; i < max; i++) {
9635 if (netif_carrier_ok(tp->dev))
9636 return 0;
9637
9638 if (msleep_interruptible(1000))
9639 break;
9640 }
9641
9642 return -EIO;
9643}
9644
Michael Chana71116d2005-05-29 14:58:11 -07009645/* Only test the commonly used registers */
David S. Miller30ca3e32006-03-20 23:02:36 -08009646static int tg3_test_registers(struct tg3 *tp)
Michael Chana71116d2005-05-29 14:58:11 -07009647{
Michael Chanb16250e2006-09-27 16:10:14 -07009648 int i, is_5705, is_5750;
Michael Chana71116d2005-05-29 14:58:11 -07009649 u32 offset, read_mask, write_mask, val, save_val, read_val;
9650 static struct {
9651 u16 offset;
9652 u16 flags;
9653#define TG3_FL_5705 0x1
9654#define TG3_FL_NOT_5705 0x2
9655#define TG3_FL_NOT_5788 0x4
Michael Chanb16250e2006-09-27 16:10:14 -07009656#define TG3_FL_NOT_5750 0x8
Michael Chana71116d2005-05-29 14:58:11 -07009657 u32 read_mask;
9658 u32 write_mask;
9659 } reg_tbl[] = {
9660 /* MAC Control Registers */
9661 { MAC_MODE, TG3_FL_NOT_5705,
9662 0x00000000, 0x00ef6f8c },
9663 { MAC_MODE, TG3_FL_5705,
9664 0x00000000, 0x01ef6b8c },
9665 { MAC_STATUS, TG3_FL_NOT_5705,
9666 0x03800107, 0x00000000 },
9667 { MAC_STATUS, TG3_FL_5705,
9668 0x03800100, 0x00000000 },
9669 { MAC_ADDR_0_HIGH, 0x0000,
9670 0x00000000, 0x0000ffff },
9671 { MAC_ADDR_0_LOW, 0x0000,
9672 0x00000000, 0xffffffff },
9673 { MAC_RX_MTU_SIZE, 0x0000,
9674 0x00000000, 0x0000ffff },
9675 { MAC_TX_MODE, 0x0000,
9676 0x00000000, 0x00000070 },
9677 { MAC_TX_LENGTHS, 0x0000,
9678 0x00000000, 0x00003fff },
9679 { MAC_RX_MODE, TG3_FL_NOT_5705,
9680 0x00000000, 0x000007fc },
9681 { MAC_RX_MODE, TG3_FL_5705,
9682 0x00000000, 0x000007dc },
9683 { MAC_HASH_REG_0, 0x0000,
9684 0x00000000, 0xffffffff },
9685 { MAC_HASH_REG_1, 0x0000,
9686 0x00000000, 0xffffffff },
9687 { MAC_HASH_REG_2, 0x0000,
9688 0x00000000, 0xffffffff },
9689 { MAC_HASH_REG_3, 0x0000,
9690 0x00000000, 0xffffffff },
9691
9692 /* Receive Data and Receive BD Initiator Control Registers. */
9693 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9694 0x00000000, 0xffffffff },
9695 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9696 0x00000000, 0xffffffff },
9697 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9698 0x00000000, 0x00000003 },
9699 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9700 0x00000000, 0xffffffff },
9701 { RCVDBDI_STD_BD+0, 0x0000,
9702 0x00000000, 0xffffffff },
9703 { RCVDBDI_STD_BD+4, 0x0000,
9704 0x00000000, 0xffffffff },
9705 { RCVDBDI_STD_BD+8, 0x0000,
9706 0x00000000, 0xffff0002 },
9707 { RCVDBDI_STD_BD+0xc, 0x0000,
9708 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009709
Michael Chana71116d2005-05-29 14:58:11 -07009710 /* Receive BD Initiator Control Registers. */
9711 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9712 0x00000000, 0xffffffff },
9713 { RCVBDI_STD_THRESH, TG3_FL_5705,
9714 0x00000000, 0x000003ff },
9715 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9716 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009717
Michael Chana71116d2005-05-29 14:58:11 -07009718 /* Host Coalescing Control Registers. */
9719 { HOSTCC_MODE, TG3_FL_NOT_5705,
9720 0x00000000, 0x00000004 },
9721 { HOSTCC_MODE, TG3_FL_5705,
9722 0x00000000, 0x000000f6 },
9723 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9724 0x00000000, 0xffffffff },
9725 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9726 0x00000000, 0x000003ff },
9727 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9728 0x00000000, 0xffffffff },
9729 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9730 0x00000000, 0x000003ff },
9731 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9732 0x00000000, 0xffffffff },
9733 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9734 0x00000000, 0x000000ff },
9735 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9736 0x00000000, 0xffffffff },
9737 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9738 0x00000000, 0x000000ff },
9739 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9740 0x00000000, 0xffffffff },
9741 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9742 0x00000000, 0xffffffff },
9743 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9744 0x00000000, 0xffffffff },
9745 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9746 0x00000000, 0x000000ff },
9747 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9748 0x00000000, 0xffffffff },
9749 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9750 0x00000000, 0x000000ff },
9751 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9752 0x00000000, 0xffffffff },
9753 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9754 0x00000000, 0xffffffff },
9755 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9756 0x00000000, 0xffffffff },
9757 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9758 0x00000000, 0xffffffff },
9759 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9760 0x00000000, 0xffffffff },
9761 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9762 0xffffffff, 0x00000000 },
9763 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9764 0xffffffff, 0x00000000 },
9765
9766 /* Buffer Manager Control Registers. */
Michael Chanb16250e2006-09-27 16:10:14 -07009767 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -07009768 0x00000000, 0x007fff80 },
Michael Chanb16250e2006-09-27 16:10:14 -07009769 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -07009770 0x00000000, 0x007fffff },
9771 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9772 0x00000000, 0x0000003f },
9773 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9774 0x00000000, 0x000001ff },
9775 { BUFMGR_MB_HIGH_WATER, 0x0000,
9776 0x00000000, 0x000001ff },
9777 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9778 0xffffffff, 0x00000000 },
9779 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9780 0xffffffff, 0x00000000 },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009781
Michael Chana71116d2005-05-29 14:58:11 -07009782 /* Mailbox Registers */
9783 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9784 0x00000000, 0x000001ff },
9785 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9786 0x00000000, 0x000001ff },
9787 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9788 0x00000000, 0x000007ff },
9789 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9790 0x00000000, 0x000001ff },
9791
9792 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9793 };
9794
Michael Chanb16250e2006-09-27 16:10:14 -07009795 is_5705 = is_5750 = 0;
9796 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
Michael Chana71116d2005-05-29 14:58:11 -07009797 is_5705 = 1;
Michael Chanb16250e2006-09-27 16:10:14 -07009798 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9799 is_5750 = 1;
9800 }
Michael Chana71116d2005-05-29 14:58:11 -07009801
9802 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9803 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9804 continue;
9805
9806 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9807 continue;
9808
9809 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9810 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9811 continue;
9812
Michael Chanb16250e2006-09-27 16:10:14 -07009813 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9814 continue;
9815
Michael Chana71116d2005-05-29 14:58:11 -07009816 offset = (u32) reg_tbl[i].offset;
9817 read_mask = reg_tbl[i].read_mask;
9818 write_mask = reg_tbl[i].write_mask;
9819
9820 /* Save the original register content */
9821 save_val = tr32(offset);
9822
9823 /* Determine the read-only value. */
9824 read_val = save_val & read_mask;
9825
9826 /* Write zero to the register, then make sure the read-only bits
9827 * are not changed and the read/write bits are all zeros.
9828 */
9829 tw32(offset, 0);
9830
9831 val = tr32(offset);
9832
9833 /* Test the read-only and read/write bits. */
9834 if (((val & read_mask) != read_val) || (val & write_mask))
9835 goto out;
9836
9837 /* Write ones to all the bits defined by RdMask and WrMask, then
9838 * make sure the read-only bits are not changed and the
9839 * read/write bits are all ones.
9840 */
9841 tw32(offset, read_mask | write_mask);
9842
9843 val = tr32(offset);
9844
9845 /* Test the read-only bits. */
9846 if ((val & read_mask) != read_val)
9847 goto out;
9848
9849 /* Test the read/write bits. */
9850 if ((val & write_mask) != write_mask)
9851 goto out;
9852
9853 tw32(offset, save_val);
9854 }
9855
9856 return 0;
9857
9858out:
Michael Chan9f88f292006-12-07 00:22:54 -08009859 if (netif_msg_hw(tp))
9860 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9861 offset);
Michael Chana71116d2005-05-29 14:58:11 -07009862 tw32(offset, save_val);
9863 return -EIO;
9864}
9865
Michael Chan7942e1d2005-05-29 14:58:36 -07009866static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9867{
Arjan van de Venf71e1302006-03-03 21:33:57 -05009868 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
Michael Chan7942e1d2005-05-29 14:58:36 -07009869 int i;
9870 u32 j;
9871
Alejandro Martinez Ruize9edda62007-10-15 03:37:43 +02009872 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
Michael Chan7942e1d2005-05-29 14:58:36 -07009873 for (j = 0; j < len; j += 4) {
9874 u32 val;
9875
9876 tg3_write_mem(tp, offset + j, test_pattern[i]);
9877 tg3_read_mem(tp, offset + j, &val);
9878 if (val != test_pattern[i])
9879 return -EIO;
9880 }
9881 }
9882 return 0;
9883}
9884
9885static int tg3_test_memory(struct tg3 *tp)
9886{
9887 static struct mem_entry {
9888 u32 offset;
9889 u32 len;
9890 } mem_tbl_570x[] = {
Michael Chan38690192005-12-19 16:27:28 -08009891 { 0x00000000, 0x00b50},
Michael Chan7942e1d2005-05-29 14:58:36 -07009892 { 0x00002000, 0x1c000},
9893 { 0xffffffff, 0x00000}
9894 }, mem_tbl_5705[] = {
9895 { 0x00000100, 0x0000c},
9896 { 0x00000200, 0x00008},
Michael Chan7942e1d2005-05-29 14:58:36 -07009897 { 0x00004000, 0x00800},
9898 { 0x00006000, 0x01000},
9899 { 0x00008000, 0x02000},
9900 { 0x00010000, 0x0e000},
9901 { 0xffffffff, 0x00000}
Michael Chan79f4d132006-03-20 22:28:57 -08009902 }, mem_tbl_5755[] = {
9903 { 0x00000200, 0x00008},
9904 { 0x00004000, 0x00800},
9905 { 0x00006000, 0x00800},
9906 { 0x00008000, 0x02000},
9907 { 0x00010000, 0x0c000},
9908 { 0xffffffff, 0x00000}
Michael Chanb16250e2006-09-27 16:10:14 -07009909 }, mem_tbl_5906[] = {
9910 { 0x00000200, 0x00008},
9911 { 0x00004000, 0x00400},
9912 { 0x00006000, 0x00400},
9913 { 0x00008000, 0x01000},
9914 { 0x00010000, 0x01000},
9915 { 0xffffffff, 0x00000}
Michael Chan7942e1d2005-05-29 14:58:36 -07009916 };
9917 struct mem_entry *mem_tbl;
9918 int err = 0;
9919 int i;
9920
Michael Chan79f4d132006-03-20 22:28:57 -08009921 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
Michael Chanaf36e6b2006-03-23 01:28:06 -08009922 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07009923 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07009924 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -07009925 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9926 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan79f4d132006-03-20 22:28:57 -08009927 mem_tbl = mem_tbl_5755;
Michael Chanb16250e2006-09-27 16:10:14 -07009928 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9929 mem_tbl = mem_tbl_5906;
Michael Chan79f4d132006-03-20 22:28:57 -08009930 else
9931 mem_tbl = mem_tbl_5705;
9932 } else
Michael Chan7942e1d2005-05-29 14:58:36 -07009933 mem_tbl = mem_tbl_570x;
9934
9935 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9936 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9937 mem_tbl[i].len)) != 0)
9938 break;
9939 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009940
Michael Chan7942e1d2005-05-29 14:58:36 -07009941 return err;
9942}
9943
Michael Chan9f40dea2005-09-05 17:53:06 -07009944#define TG3_MAC_LOOPBACK 0
9945#define TG3_PHY_LOOPBACK 1
9946
9947static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
Michael Chanc76949a2005-05-29 14:58:59 -07009948{
Michael Chan9f40dea2005-09-05 17:53:06 -07009949 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
Michael Chanc76949a2005-05-29 14:58:59 -07009950 u32 desc_idx;
9951 struct sk_buff *skb, *rx_skb;
9952 u8 *tx_data;
9953 dma_addr_t map;
9954 int num_pkts, tx_len, rx_len, i, err;
9955 struct tg3_rx_buffer_desc *desc;
9956
Michael Chan9f40dea2005-09-05 17:53:06 -07009957 if (loopback_mode == TG3_MAC_LOOPBACK) {
Michael Chanc94e3942005-09-27 12:12:42 -07009958 /* HW errata - mac loopback fails in some cases on 5780.
9959 * Normal traffic and PHY loopback are not affected by
9960 * errata.
9961 */
9962 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9963 return 0;
9964
Michael Chan9f40dea2005-09-05 17:53:06 -07009965 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07009966 MAC_MODE_PORT_INT_LPBACK;
9967 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9968 mac_mode |= MAC_MODE_LINK_POLARITY;
Michael Chan3f7045c2006-09-27 16:02:29 -07009969 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9970 mac_mode |= MAC_MODE_PORT_MODE_MII;
9971 else
9972 mac_mode |= MAC_MODE_PORT_MODE_GMII;
Michael Chan9f40dea2005-09-05 17:53:06 -07009973 tw32(MAC_MODE, mac_mode);
9974 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
Michael Chan3f7045c2006-09-27 16:02:29 -07009975 u32 val;
9976
Michael Chanb16250e2006-09-27 16:10:14 -07009977 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9978 u32 phytest;
9979
9980 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9981 u32 phy;
9982
9983 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9984 phytest | MII_TG3_EPHY_SHADOW_EN);
9985 if (!tg3_readphy(tp, 0x1b, &phy))
9986 tg3_writephy(tp, 0x1b, phy & ~0x20);
Michael Chanb16250e2006-09-27 16:10:14 -07009987 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9988 }
Michael Chan5d64ad32006-12-07 00:19:40 -08009989 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9990 } else
9991 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
Michael Chan3f7045c2006-09-27 16:02:29 -07009992
Matt Carlson9ef8ca92007-07-11 19:48:29 -07009993 tg3_phy_toggle_automdix(tp, 0);
9994
Michael Chan3f7045c2006-09-27 16:02:29 -07009995 tg3_writephy(tp, MII_BMCR, val);
Michael Chanc94e3942005-09-27 12:12:42 -07009996 udelay(40);
Michael Chan5d64ad32006-12-07 00:19:40 -08009997
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07009998 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
Michael Chan5d64ad32006-12-07 00:19:40 -08009999 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chanb16250e2006-09-27 16:10:14 -070010000 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
Michael Chan5d64ad32006-12-07 00:19:40 -080010001 mac_mode |= MAC_MODE_PORT_MODE_MII;
10002 } else
10003 mac_mode |= MAC_MODE_PORT_MODE_GMII;
Michael Chanb16250e2006-09-27 16:10:14 -070010004
Michael Chanc94e3942005-09-27 12:12:42 -070010005 /* reset to prevent losing 1st rx packet intermittently */
10006 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10007 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10008 udelay(10);
10009 tw32_f(MAC_RX_MODE, tp->rx_mode);
10010 }
Matt Carlsone8f3f6c2007-07-11 19:47:55 -070010011 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10012 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10013 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10014 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10015 mac_mode |= MAC_MODE_LINK_POLARITY;
Michael Chanff18ff02006-03-27 23:17:27 -080010016 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10017 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10018 }
Michael Chan9f40dea2005-09-05 17:53:06 -070010019 tw32(MAC_MODE, mac_mode);
Michael Chan9f40dea2005-09-05 17:53:06 -070010020 }
10021 else
10022 return -EINVAL;
Michael Chanc76949a2005-05-29 14:58:59 -070010023
10024 err = -EIO;
10025
Michael Chanc76949a2005-05-29 14:58:59 -070010026 tx_len = 1514;
David S. Millera20e9c62006-07-31 22:38:16 -070010027 skb = netdev_alloc_skb(tp->dev, tx_len);
Jesper Juhla50bb7b2006-05-09 23:14:35 -070010028 if (!skb)
10029 return -ENOMEM;
10030
Michael Chanc76949a2005-05-29 14:58:59 -070010031 tx_data = skb_put(skb, tx_len);
10032 memcpy(tx_data, tp->dev->dev_addr, 6);
10033 memset(tx_data + 6, 0x0, 8);
10034
10035 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10036
10037 for (i = 14; i < tx_len; i++)
10038 tx_data[i] = (u8) (i & 0xff);
10039
10040 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10041
10042 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10043 HOSTCC_MODE_NOW);
10044
10045 udelay(10);
10046
10047 rx_start_idx = tp->hw_status->idx[0].rx_producer;
10048
Michael Chanc76949a2005-05-29 14:58:59 -070010049 num_pkts = 0;
10050
Michael Chan9f40dea2005-09-05 17:53:06 -070010051 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
Michael Chanc76949a2005-05-29 14:58:59 -070010052
Michael Chan9f40dea2005-09-05 17:53:06 -070010053 tp->tx_prod++;
Michael Chanc76949a2005-05-29 14:58:59 -070010054 num_pkts++;
10055
Michael Chan9f40dea2005-09-05 17:53:06 -070010056 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10057 tp->tx_prod);
Michael Chan09ee9292005-08-09 20:17:00 -070010058 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
Michael Chanc76949a2005-05-29 14:58:59 -070010059
10060 udelay(10);
10061
Michael Chan3f7045c2006-09-27 16:02:29 -070010062 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
10063 for (i = 0; i < 25; i++) {
Michael Chanc76949a2005-05-29 14:58:59 -070010064 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10065 HOSTCC_MODE_NOW);
10066
10067 udelay(10);
10068
10069 tx_idx = tp->hw_status->idx[0].tx_consumer;
10070 rx_idx = tp->hw_status->idx[0].rx_producer;
Michael Chan9f40dea2005-09-05 17:53:06 -070010071 if ((tx_idx == tp->tx_prod) &&
Michael Chanc76949a2005-05-29 14:58:59 -070010072 (rx_idx == (rx_start_idx + num_pkts)))
10073 break;
10074 }
10075
10076 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10077 dev_kfree_skb(skb);
10078
Michael Chan9f40dea2005-09-05 17:53:06 -070010079 if (tx_idx != tp->tx_prod)
Michael Chanc76949a2005-05-29 14:58:59 -070010080 goto out;
10081
10082 if (rx_idx != rx_start_idx + num_pkts)
10083 goto out;
10084
10085 desc = &tp->rx_rcb[rx_start_idx];
10086 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10087 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10088 if (opaque_key != RXD_OPAQUE_RING_STD)
10089 goto out;
10090
10091 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10092 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10093 goto out;
10094
10095 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10096 if (rx_len != tx_len)
10097 goto out;
10098
10099 rx_skb = tp->rx_std_buffers[desc_idx].skb;
10100
10101 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10102 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10103
10104 for (i = 14; i < tx_len; i++) {
10105 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10106 goto out;
10107 }
10108 err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010109
Michael Chanc76949a2005-05-29 14:58:59 -070010110 /* tg3_free_rings will unmap and free the rx_skb */
10111out:
10112 return err;
10113}
10114
Michael Chan9f40dea2005-09-05 17:53:06 -070010115#define TG3_MAC_LOOPBACK_FAILED 1
10116#define TG3_PHY_LOOPBACK_FAILED 2
10117#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10118 TG3_PHY_LOOPBACK_FAILED)
10119
10120static int tg3_test_loopback(struct tg3 *tp)
10121{
10122 int err = 0;
Matt Carlson9936bcf2007-10-10 18:03:07 -070010123 u32 cpmuctrl = 0;
Michael Chan9f40dea2005-09-05 17:53:06 -070010124
10125 if (!netif_running(tp->dev))
10126 return TG3_LOOPBACK_FAILED;
10127
Michael Chanb9ec6c12006-07-25 16:37:27 -070010128 err = tg3_reset_hw(tp, 1);
10129 if (err)
10130 return TG3_LOOPBACK_FAILED;
Michael Chan9f40dea2005-09-05 17:53:06 -070010131
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010132 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010133 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10134 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Matt Carlson9936bcf2007-10-10 18:03:07 -070010135 int i;
10136 u32 status;
10137
10138 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10139
10140 /* Wait for up to 40 microseconds to acquire lock. */
10141 for (i = 0; i < 4; i++) {
10142 status = tr32(TG3_CPMU_MUTEX_GNT);
10143 if (status == CPMU_MUTEX_GNT_DRIVER)
10144 break;
10145 udelay(10);
10146 }
10147
10148 if (status != CPMU_MUTEX_GNT_DRIVER)
10149 return TG3_LOOPBACK_FAILED;
10150
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010151 /* Turn off link-based power management. */
Matt Carlsone8750932007-11-12 21:11:51 -080010152 cpmuctrl = tr32(TG3_CPMU_CTRL);
Matt Carlson109115e2008-05-02 16:48:59 -070010153 tw32(TG3_CPMU_CTRL,
10154 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10155 CPMU_CTRL_LINK_AWARE_MODE));
Matt Carlson9936bcf2007-10-10 18:03:07 -070010156 }
10157
Michael Chan9f40dea2005-09-05 17:53:06 -070010158 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10159 err |= TG3_MAC_LOOPBACK_FAILED;
Matt Carlson9936bcf2007-10-10 18:03:07 -070010160
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010161 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010162 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10163 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Matt Carlson9936bcf2007-10-10 18:03:07 -070010164 tw32(TG3_CPMU_CTRL, cpmuctrl);
10165
10166 /* Release the mutex */
10167 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10168 }
10169
Matt Carlsondd477002008-05-25 23:45:58 -070010170 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10171 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
Michael Chan9f40dea2005-09-05 17:53:06 -070010172 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10173 err |= TG3_PHY_LOOPBACK_FAILED;
10174 }
10175
10176 return err;
10177}
10178
Michael Chan4cafd3f2005-05-29 14:56:34 -070010179static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10180 u64 *data)
10181{
Michael Chan566f86a2005-05-29 14:56:58 -070010182 struct tg3 *tp = netdev_priv(dev);
10183
Michael Chanbc1c7562006-03-20 17:48:03 -080010184 if (tp->link_config.phy_is_low_power)
10185 tg3_set_power_state(tp, PCI_D0);
10186
Michael Chan566f86a2005-05-29 14:56:58 -070010187 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10188
10189 if (tg3_test_nvram(tp) != 0) {
10190 etest->flags |= ETH_TEST_FL_FAILED;
10191 data[0] = 1;
10192 }
Michael Chanca430072005-05-29 14:57:23 -070010193 if (tg3_test_link(tp) != 0) {
10194 etest->flags |= ETH_TEST_FL_FAILED;
10195 data[1] = 1;
10196 }
Michael Chana71116d2005-05-29 14:58:11 -070010197 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010198 int err, err2 = 0, irq_sync = 0;
Michael Chana71116d2005-05-29 14:58:11 -070010199
Michael Chanbbe832c2005-06-24 20:20:04 -070010200 if (netif_running(dev)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010201 tg3_phy_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -070010202 tg3_netif_stop(tp);
10203 irq_sync = 1;
10204 }
10205
10206 tg3_full_lock(tp, irq_sync);
Michael Chana71116d2005-05-29 14:58:11 -070010207
10208 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
Michael Chanec41c7d2006-01-17 02:40:55 -080010209 err = tg3_nvram_lock(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010210 tg3_halt_cpu(tp, RX_CPU_BASE);
10211 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10212 tg3_halt_cpu(tp, TX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -080010213 if (!err)
10214 tg3_nvram_unlock(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010215
Michael Chand9ab5ad2006-03-20 22:27:35 -080010216 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10217 tg3_phy_reset(tp);
10218
Michael Chana71116d2005-05-29 14:58:11 -070010219 if (tg3_test_registers(tp) != 0) {
10220 etest->flags |= ETH_TEST_FL_FAILED;
10221 data[2] = 1;
10222 }
Michael Chan7942e1d2005-05-29 14:58:36 -070010223 if (tg3_test_memory(tp) != 0) {
10224 etest->flags |= ETH_TEST_FL_FAILED;
10225 data[3] = 1;
10226 }
Michael Chan9f40dea2005-09-05 17:53:06 -070010227 if ((data[4] = tg3_test_loopback(tp)) != 0)
Michael Chanc76949a2005-05-29 14:58:59 -070010228 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chana71116d2005-05-29 14:58:11 -070010229
David S. Millerf47c11e2005-06-24 20:18:35 -070010230 tg3_full_unlock(tp);
10231
Michael Chand4bc3922005-05-29 14:59:20 -070010232 if (tg3_test_interrupt(tp) != 0) {
10233 etest->flags |= ETH_TEST_FL_FAILED;
10234 data[5] = 1;
10235 }
David S. Millerf47c11e2005-06-24 20:18:35 -070010236
10237 tg3_full_lock(tp, 0);
Michael Chand4bc3922005-05-29 14:59:20 -070010238
Michael Chana71116d2005-05-29 14:58:11 -070010239 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10240 if (netif_running(dev)) {
10241 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010242 err2 = tg3_restart_hw(tp, 1);
10243 if (!err2)
Michael Chanb9ec6c12006-07-25 16:37:27 -070010244 tg3_netif_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010245 }
David S. Millerf47c11e2005-06-24 20:18:35 -070010246
10247 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010248
10249 if (irq_sync && !err2)
10250 tg3_phy_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010251 }
Michael Chanbc1c7562006-03-20 17:48:03 -080010252 if (tp->link_config.phy_is_low_power)
10253 tg3_set_power_state(tp, PCI_D3hot);
10254
Michael Chan4cafd3f2005-05-29 14:56:34 -070010255}
10256
Linus Torvalds1da177e2005-04-16 15:20:36 -070010257static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10258{
10259 struct mii_ioctl_data *data = if_mii(ifr);
10260 struct tg3 *tp = netdev_priv(dev);
10261 int err;
10262
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010263 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10264 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10265 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -070010266 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010267 }
10268
Linus Torvalds1da177e2005-04-16 15:20:36 -070010269 switch(cmd) {
10270 case SIOCGMIIPHY:
10271 data->phy_id = PHY_ADDR;
10272
10273 /* fallthru */
10274 case SIOCGMIIREG: {
10275 u32 mii_regval;
10276
10277 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10278 break; /* We have no PHY */
10279
Michael Chanbc1c7562006-03-20 17:48:03 -080010280 if (tp->link_config.phy_is_low_power)
10281 return -EAGAIN;
10282
David S. Millerf47c11e2005-06-24 20:18:35 -070010283 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010284 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
David S. Millerf47c11e2005-06-24 20:18:35 -070010285 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010286
10287 data->val_out = mii_regval;
10288
10289 return err;
10290 }
10291
10292 case SIOCSMIIREG:
10293 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10294 break; /* We have no PHY */
10295
10296 if (!capable(CAP_NET_ADMIN))
10297 return -EPERM;
10298
Michael Chanbc1c7562006-03-20 17:48:03 -080010299 if (tp->link_config.phy_is_low_power)
10300 return -EAGAIN;
10301
David S. Millerf47c11e2005-06-24 20:18:35 -070010302 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010303 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
David S. Millerf47c11e2005-06-24 20:18:35 -070010304 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010305
10306 return err;
10307
10308 default:
10309 /* do nothing */
10310 break;
10311 }
10312 return -EOPNOTSUPP;
10313}
10314
10315#if TG3_VLAN_TAG_USED
10316static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10317{
10318 struct tg3 *tp = netdev_priv(dev);
10319
Michael Chan29315e82006-06-29 20:12:30 -070010320 if (netif_running(dev))
10321 tg3_netif_stop(tp);
10322
David S. Millerf47c11e2005-06-24 20:18:35 -070010323 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010324
10325 tp->vlgrp = grp;
10326
10327 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10328 __tg3_set_rx_mode(dev);
10329
Michael Chan29315e82006-06-29 20:12:30 -070010330 if (netif_running(dev))
10331 tg3_netif_start(tp);
Michael Chan46966542007-07-11 19:47:19 -070010332
10333 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010334}
Linus Torvalds1da177e2005-04-16 15:20:36 -070010335#endif
10336
David S. Miller15f98502005-05-18 22:49:26 -070010337static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10338{
10339 struct tg3 *tp = netdev_priv(dev);
10340
10341 memcpy(ec, &tp->coal, sizeof(*ec));
10342 return 0;
10343}
10344
Michael Chand244c892005-07-05 14:42:33 -070010345static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10346{
10347 struct tg3 *tp = netdev_priv(dev);
10348 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10349 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10350
10351 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10352 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10353 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10354 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10355 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10356 }
10357
10358 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10359 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10360 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10361 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10362 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10363 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10364 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10365 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10366 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10367 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10368 return -EINVAL;
10369
10370 /* No rx interrupts will be generated if both are zero */
10371 if ((ec->rx_coalesce_usecs == 0) &&
10372 (ec->rx_max_coalesced_frames == 0))
10373 return -EINVAL;
10374
10375 /* No tx interrupts will be generated if both are zero */
10376 if ((ec->tx_coalesce_usecs == 0) &&
10377 (ec->tx_max_coalesced_frames == 0))
10378 return -EINVAL;
10379
10380 /* Only copy relevant parameters, ignore all others. */
10381 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10382 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10383 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10384 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10385 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10386 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10387 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10388 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10389 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10390
10391 if (netif_running(dev)) {
10392 tg3_full_lock(tp, 0);
10393 __tg3_set_coalesce(tp, &tp->coal);
10394 tg3_full_unlock(tp);
10395 }
10396 return 0;
10397}
10398
Jeff Garzik7282d492006-09-13 14:30:00 -040010399static const struct ethtool_ops tg3_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010400 .get_settings = tg3_get_settings,
10401 .set_settings = tg3_set_settings,
10402 .get_drvinfo = tg3_get_drvinfo,
10403 .get_regs_len = tg3_get_regs_len,
10404 .get_regs = tg3_get_regs,
10405 .get_wol = tg3_get_wol,
10406 .set_wol = tg3_set_wol,
10407 .get_msglevel = tg3_get_msglevel,
10408 .set_msglevel = tg3_set_msglevel,
10409 .nway_reset = tg3_nway_reset,
10410 .get_link = ethtool_op_get_link,
10411 .get_eeprom_len = tg3_get_eeprom_len,
10412 .get_eeprom = tg3_get_eeprom,
10413 .set_eeprom = tg3_set_eeprom,
10414 .get_ringparam = tg3_get_ringparam,
10415 .set_ringparam = tg3_set_ringparam,
10416 .get_pauseparam = tg3_get_pauseparam,
10417 .set_pauseparam = tg3_set_pauseparam,
10418 .get_rx_csum = tg3_get_rx_csum,
10419 .set_rx_csum = tg3_set_rx_csum,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010420 .set_tx_csum = tg3_set_tx_csum,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010421 .set_sg = ethtool_op_set_sg,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010422 .set_tso = tg3_set_tso,
Michael Chan4cafd3f2005-05-29 14:56:34 -070010423 .self_test = tg3_self_test,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010424 .get_strings = tg3_get_strings,
Michael Chan4009a932005-09-05 17:52:54 -070010425 .phys_id = tg3_phys_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010426 .get_ethtool_stats = tg3_get_ethtool_stats,
David S. Miller15f98502005-05-18 22:49:26 -070010427 .get_coalesce = tg3_get_coalesce,
Michael Chand244c892005-07-05 14:42:33 -070010428 .set_coalesce = tg3_set_coalesce,
Jeff Garzikb9f2c042007-10-03 18:07:32 -070010429 .get_sset_count = tg3_get_sset_count,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010430};
10431
10432static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10433{
Michael Chan1b277772006-03-20 22:27:48 -080010434 u32 cursize, val, magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010435
10436 tp->nvram_size = EEPROM_CHIP_SIZE;
10437
Michael Chan18201802006-03-20 22:29:15 -080010438 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010439 return;
10440
Michael Chanb16250e2006-09-27 16:10:14 -070010441 if ((magic != TG3_EEPROM_MAGIC) &&
10442 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10443 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
Linus Torvalds1da177e2005-04-16 15:20:36 -070010444 return;
10445
10446 /*
10447 * Size the chip by reading offsets at increasing powers of two.
10448 * When we encounter our validation signature, we know the addressing
10449 * has wrapped around, and thus have our chip size.
10450 */
Michael Chan1b277772006-03-20 22:27:48 -080010451 cursize = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010452
10453 while (cursize < tp->nvram_size) {
Michael Chan18201802006-03-20 22:29:15 -080010454 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010455 return;
10456
Michael Chan18201802006-03-20 22:29:15 -080010457 if (val == magic)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010458 break;
10459
10460 cursize <<= 1;
10461 }
10462
10463 tp->nvram_size = cursize;
10464}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010465
Linus Torvalds1da177e2005-04-16 15:20:36 -070010466static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10467{
10468 u32 val;
10469
Michael Chan18201802006-03-20 22:29:15 -080010470 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
Michael Chan1b277772006-03-20 22:27:48 -080010471 return;
10472
10473 /* Selfboot format */
Michael Chan18201802006-03-20 22:29:15 -080010474 if (val != TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -080010475 tg3_get_eeprom_size(tp);
10476 return;
10477 }
10478
Linus Torvalds1da177e2005-04-16 15:20:36 -070010479 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10480 if (val != 0) {
10481 tp->nvram_size = (val >> 16) * 1024;
10482 return;
10483 }
10484 }
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010485 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010486}
10487
10488static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10489{
10490 u32 nvcfg1;
10491
10492 nvcfg1 = tr32(NVRAM_CFG1);
10493 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10494 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10495 }
10496 else {
10497 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10498 tw32(NVRAM_CFG1, nvcfg1);
10499 }
10500
Michael Chan4c987482005-09-05 17:52:38 -070010501 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
Michael Chana4e2b342005-10-26 15:46:52 -070010502 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010503 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10504 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10505 tp->nvram_jedecnum = JEDEC_ATMEL;
10506 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10507 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10508 break;
10509 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10510 tp->nvram_jedecnum = JEDEC_ATMEL;
10511 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10512 break;
10513 case FLASH_VENDOR_ATMEL_EEPROM:
10514 tp->nvram_jedecnum = JEDEC_ATMEL;
10515 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10516 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10517 break;
10518 case FLASH_VENDOR_ST:
10519 tp->nvram_jedecnum = JEDEC_ST;
10520 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10521 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10522 break;
10523 case FLASH_VENDOR_SAIFUN:
10524 tp->nvram_jedecnum = JEDEC_SAIFUN;
10525 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10526 break;
10527 case FLASH_VENDOR_SST_SMALL:
10528 case FLASH_VENDOR_SST_LARGE:
10529 tp->nvram_jedecnum = JEDEC_SST;
10530 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10531 break;
10532 }
10533 }
10534 else {
10535 tp->nvram_jedecnum = JEDEC_ATMEL;
10536 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10537 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10538 }
10539}
10540
Michael Chan361b4ac2005-04-21 17:11:21 -070010541static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10542{
10543 u32 nvcfg1;
10544
10545 nvcfg1 = tr32(NVRAM_CFG1);
10546
Michael Chane6af3012005-04-21 17:12:05 -070010547 /* NVRAM protection for TPM */
10548 if (nvcfg1 & (1 << 27))
10549 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10550
Michael Chan361b4ac2005-04-21 17:11:21 -070010551 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10552 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10553 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10554 tp->nvram_jedecnum = JEDEC_ATMEL;
10555 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10556 break;
10557 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10558 tp->nvram_jedecnum = JEDEC_ATMEL;
10559 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10560 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10561 break;
10562 case FLASH_5752VENDOR_ST_M45PE10:
10563 case FLASH_5752VENDOR_ST_M45PE20:
10564 case FLASH_5752VENDOR_ST_M45PE40:
10565 tp->nvram_jedecnum = JEDEC_ST;
10566 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10567 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10568 break;
10569 }
10570
10571 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10572 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10573 case FLASH_5752PAGE_SIZE_256:
10574 tp->nvram_pagesize = 256;
10575 break;
10576 case FLASH_5752PAGE_SIZE_512:
10577 tp->nvram_pagesize = 512;
10578 break;
10579 case FLASH_5752PAGE_SIZE_1K:
10580 tp->nvram_pagesize = 1024;
10581 break;
10582 case FLASH_5752PAGE_SIZE_2K:
10583 tp->nvram_pagesize = 2048;
10584 break;
10585 case FLASH_5752PAGE_SIZE_4K:
10586 tp->nvram_pagesize = 4096;
10587 break;
10588 case FLASH_5752PAGE_SIZE_264:
10589 tp->nvram_pagesize = 264;
10590 break;
10591 }
10592 }
10593 else {
10594 /* For eeprom, set pagesize to maximum eeprom size */
10595 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10596
10597 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10598 tw32(NVRAM_CFG1, nvcfg1);
10599 }
10600}
10601
Michael Chand3c7b882006-03-23 01:28:25 -080010602static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10603{
Matt Carlson989a9d22007-05-05 11:51:05 -070010604 u32 nvcfg1, protect = 0;
Michael Chand3c7b882006-03-23 01:28:25 -080010605
10606 nvcfg1 = tr32(NVRAM_CFG1);
10607
10608 /* NVRAM protection for TPM */
Matt Carlson989a9d22007-05-05 11:51:05 -070010609 if (nvcfg1 & (1 << 27)) {
Michael Chand3c7b882006-03-23 01:28:25 -080010610 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
Matt Carlson989a9d22007-05-05 11:51:05 -070010611 protect = 1;
10612 }
Michael Chand3c7b882006-03-23 01:28:25 -080010613
Matt Carlson989a9d22007-05-05 11:51:05 -070010614 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10615 switch (nvcfg1) {
Michael Chand3c7b882006-03-23 01:28:25 -080010616 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10617 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10618 case FLASH_5755VENDOR_ATMEL_FLASH_3:
Matt Carlson70b65a22007-07-11 19:48:50 -070010619 case FLASH_5755VENDOR_ATMEL_FLASH_5:
Michael Chand3c7b882006-03-23 01:28:25 -080010620 tp->nvram_jedecnum = JEDEC_ATMEL;
10621 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10622 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10623 tp->nvram_pagesize = 264;
Matt Carlson70b65a22007-07-11 19:48:50 -070010624 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10625 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010626 tp->nvram_size = (protect ? 0x3e200 :
10627 TG3_NVRAM_SIZE_512KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010628 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010629 tp->nvram_size = (protect ? 0x1f200 :
10630 TG3_NVRAM_SIZE_256KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010631 else
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010632 tp->nvram_size = (protect ? 0x1f200 :
10633 TG3_NVRAM_SIZE_128KB);
Michael Chand3c7b882006-03-23 01:28:25 -080010634 break;
10635 case FLASH_5752VENDOR_ST_M45PE10:
10636 case FLASH_5752VENDOR_ST_M45PE20:
10637 case FLASH_5752VENDOR_ST_M45PE40:
10638 tp->nvram_jedecnum = JEDEC_ST;
10639 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10640 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10641 tp->nvram_pagesize = 256;
Matt Carlson989a9d22007-05-05 11:51:05 -070010642 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010643 tp->nvram_size = (protect ?
10644 TG3_NVRAM_SIZE_64KB :
10645 TG3_NVRAM_SIZE_128KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010646 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010647 tp->nvram_size = (protect ?
10648 TG3_NVRAM_SIZE_64KB :
10649 TG3_NVRAM_SIZE_256KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010650 else
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010651 tp->nvram_size = (protect ?
10652 TG3_NVRAM_SIZE_128KB :
10653 TG3_NVRAM_SIZE_512KB);
Michael Chand3c7b882006-03-23 01:28:25 -080010654 break;
10655 }
10656}
10657
Michael Chan1b277772006-03-20 22:27:48 -080010658static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10659{
10660 u32 nvcfg1;
10661
10662 nvcfg1 = tr32(NVRAM_CFG1);
10663
10664 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10665 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10666 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10667 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10668 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10669 tp->nvram_jedecnum = JEDEC_ATMEL;
10670 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10671 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10672
10673 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10674 tw32(NVRAM_CFG1, nvcfg1);
10675 break;
10676 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10677 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10678 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10679 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10680 tp->nvram_jedecnum = JEDEC_ATMEL;
10681 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10682 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10683 tp->nvram_pagesize = 264;
10684 break;
10685 case FLASH_5752VENDOR_ST_M45PE10:
10686 case FLASH_5752VENDOR_ST_M45PE20:
10687 case FLASH_5752VENDOR_ST_M45PE40:
10688 tp->nvram_jedecnum = JEDEC_ST;
10689 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10690 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10691 tp->nvram_pagesize = 256;
10692 break;
10693 }
10694}
10695
Matt Carlson6b91fa02007-10-10 18:01:09 -070010696static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10697{
10698 u32 nvcfg1, protect = 0;
10699
10700 nvcfg1 = tr32(NVRAM_CFG1);
10701
10702 /* NVRAM protection for TPM */
10703 if (nvcfg1 & (1 << 27)) {
10704 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10705 protect = 1;
10706 }
10707
10708 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10709 switch (nvcfg1) {
10710 case FLASH_5761VENDOR_ATMEL_ADB021D:
10711 case FLASH_5761VENDOR_ATMEL_ADB041D:
10712 case FLASH_5761VENDOR_ATMEL_ADB081D:
10713 case FLASH_5761VENDOR_ATMEL_ADB161D:
10714 case FLASH_5761VENDOR_ATMEL_MDB021D:
10715 case FLASH_5761VENDOR_ATMEL_MDB041D:
10716 case FLASH_5761VENDOR_ATMEL_MDB081D:
10717 case FLASH_5761VENDOR_ATMEL_MDB161D:
10718 tp->nvram_jedecnum = JEDEC_ATMEL;
10719 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10720 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10721 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10722 tp->nvram_pagesize = 256;
10723 break;
10724 case FLASH_5761VENDOR_ST_A_M45PE20:
10725 case FLASH_5761VENDOR_ST_A_M45PE40:
10726 case FLASH_5761VENDOR_ST_A_M45PE80:
10727 case FLASH_5761VENDOR_ST_A_M45PE16:
10728 case FLASH_5761VENDOR_ST_M_M45PE20:
10729 case FLASH_5761VENDOR_ST_M_M45PE40:
10730 case FLASH_5761VENDOR_ST_M_M45PE80:
10731 case FLASH_5761VENDOR_ST_M_M45PE16:
10732 tp->nvram_jedecnum = JEDEC_ST;
10733 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10734 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10735 tp->nvram_pagesize = 256;
10736 break;
10737 }
10738
10739 if (protect) {
10740 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10741 } else {
10742 switch (nvcfg1) {
10743 case FLASH_5761VENDOR_ATMEL_ADB161D:
10744 case FLASH_5761VENDOR_ATMEL_MDB161D:
10745 case FLASH_5761VENDOR_ST_A_M45PE16:
10746 case FLASH_5761VENDOR_ST_M_M45PE16:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010747 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010748 break;
10749 case FLASH_5761VENDOR_ATMEL_ADB081D:
10750 case FLASH_5761VENDOR_ATMEL_MDB081D:
10751 case FLASH_5761VENDOR_ST_A_M45PE80:
10752 case FLASH_5761VENDOR_ST_M_M45PE80:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010753 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010754 break;
10755 case FLASH_5761VENDOR_ATMEL_ADB041D:
10756 case FLASH_5761VENDOR_ATMEL_MDB041D:
10757 case FLASH_5761VENDOR_ST_A_M45PE40:
10758 case FLASH_5761VENDOR_ST_M_M45PE40:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010759 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010760 break;
10761 case FLASH_5761VENDOR_ATMEL_ADB021D:
10762 case FLASH_5761VENDOR_ATMEL_MDB021D:
10763 case FLASH_5761VENDOR_ST_A_M45PE20:
10764 case FLASH_5761VENDOR_ST_M_M45PE20:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010765 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010766 break;
10767 }
10768 }
10769}
10770
Michael Chanb5d37722006-09-27 16:06:21 -070010771static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10772{
10773 tp->nvram_jedecnum = JEDEC_ATMEL;
10774 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10775 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10776}
10777
Linus Torvalds1da177e2005-04-16 15:20:36 -070010778/* Chips other than 5700/5701 use the NVRAM for fetching info. */
10779static void __devinit tg3_nvram_init(struct tg3 *tp)
10780{
Linus Torvalds1da177e2005-04-16 15:20:36 -070010781 tw32_f(GRC_EEPROM_ADDR,
10782 (EEPROM_ADDR_FSM_RESET |
10783 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10784 EEPROM_ADDR_CLKPERD_SHIFT)));
10785
Michael Chan9d57f012006-12-07 00:23:25 -080010786 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010787
10788 /* Enable seeprom accesses. */
10789 tw32_f(GRC_LOCAL_CTRL,
10790 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10791 udelay(100);
10792
10793 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10794 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10795 tp->tg3_flags |= TG3_FLAG_NVRAM;
10796
Michael Chanec41c7d2006-01-17 02:40:55 -080010797 if (tg3_nvram_lock(tp)) {
10798 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10799 "tg3_nvram_init failed.\n", tp->dev->name);
10800 return;
10801 }
Michael Chane6af3012005-04-21 17:12:05 -070010802 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010803
Matt Carlson989a9d22007-05-05 11:51:05 -070010804 tp->nvram_size = 0;
10805
Michael Chan361b4ac2005-04-21 17:11:21 -070010806 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10807 tg3_get_5752_nvram_info(tp);
Michael Chand3c7b882006-03-23 01:28:25 -080010808 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10809 tg3_get_5755_nvram_info(tp);
Matt Carlsond30cdd22007-10-07 23:28:35 -070010810 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010811 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10812 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan1b277772006-03-20 22:27:48 -080010813 tg3_get_5787_nvram_info(tp);
Matt Carlson6b91fa02007-10-10 18:01:09 -070010814 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10815 tg3_get_5761_nvram_info(tp);
Michael Chanb5d37722006-09-27 16:06:21 -070010816 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10817 tg3_get_5906_nvram_info(tp);
Michael Chan361b4ac2005-04-21 17:11:21 -070010818 else
10819 tg3_get_nvram_info(tp);
10820
Matt Carlson989a9d22007-05-05 11:51:05 -070010821 if (tp->nvram_size == 0)
10822 tg3_get_nvram_size(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010823
Michael Chane6af3012005-04-21 17:12:05 -070010824 tg3_disable_nvram_access(tp);
Michael Chan381291b2005-12-13 21:08:21 -080010825 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010826
10827 } else {
10828 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10829
10830 tg3_get_eeprom_size(tp);
10831 }
10832}
10833
10834static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10835 u32 offset, u32 *val)
10836{
10837 u32 tmp;
10838 int i;
10839
10840 if (offset > EEPROM_ADDR_ADDR_MASK ||
10841 (offset % 4) != 0)
10842 return -EINVAL;
10843
10844 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10845 EEPROM_ADDR_DEVID_MASK |
10846 EEPROM_ADDR_READ);
10847 tw32(GRC_EEPROM_ADDR,
10848 tmp |
10849 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10850 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10851 EEPROM_ADDR_ADDR_MASK) |
10852 EEPROM_ADDR_READ | EEPROM_ADDR_START);
10853
Michael Chan9d57f012006-12-07 00:23:25 -080010854 for (i = 0; i < 1000; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010855 tmp = tr32(GRC_EEPROM_ADDR);
10856
10857 if (tmp & EEPROM_ADDR_COMPLETE)
10858 break;
Michael Chan9d57f012006-12-07 00:23:25 -080010859 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010860 }
10861 if (!(tmp & EEPROM_ADDR_COMPLETE))
10862 return -EBUSY;
10863
10864 *val = tr32(GRC_EEPROM_DATA);
10865 return 0;
10866}
10867
10868#define NVRAM_CMD_TIMEOUT 10000
10869
10870static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10871{
10872 int i;
10873
10874 tw32(NVRAM_CMD, nvram_cmd);
10875 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10876 udelay(10);
10877 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10878 udelay(10);
10879 break;
10880 }
10881 }
10882 if (i == NVRAM_CMD_TIMEOUT) {
10883 return -EBUSY;
10884 }
10885 return 0;
10886}
10887
Michael Chan18201802006-03-20 22:29:15 -080010888static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10889{
10890 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10891 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10892 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
Matt Carlson6b91fa02007-10-10 18:01:09 -070010893 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
Michael Chan18201802006-03-20 22:29:15 -080010894 (tp->nvram_jedecnum == JEDEC_ATMEL))
10895
10896 addr = ((addr / tp->nvram_pagesize) <<
10897 ATMEL_AT45DB0X1B_PAGE_POS) +
10898 (addr % tp->nvram_pagesize);
10899
10900 return addr;
10901}
10902
Michael Chanc4e65752006-03-20 22:29:32 -080010903static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10904{
10905 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10906 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10907 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
Matt Carlson6b91fa02007-10-10 18:01:09 -070010908 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
Michael Chanc4e65752006-03-20 22:29:32 -080010909 (tp->nvram_jedecnum == JEDEC_ATMEL))
10910
10911 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10912 tp->nvram_pagesize) +
10913 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10914
10915 return addr;
10916}
10917
Linus Torvalds1da177e2005-04-16 15:20:36 -070010918static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10919{
10920 int ret;
10921
Linus Torvalds1da177e2005-04-16 15:20:36 -070010922 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10923 return tg3_nvram_read_using_eeprom(tp, offset, val);
10924
Michael Chan18201802006-03-20 22:29:15 -080010925 offset = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010926
10927 if (offset > NVRAM_ADDR_MSK)
10928 return -EINVAL;
10929
Michael Chanec41c7d2006-01-17 02:40:55 -080010930 ret = tg3_nvram_lock(tp);
10931 if (ret)
10932 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010933
Michael Chane6af3012005-04-21 17:12:05 -070010934 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010935
10936 tw32(NVRAM_ADDR, offset);
10937 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10938 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10939
10940 if (ret == 0)
10941 *val = swab32(tr32(NVRAM_RDDATA));
10942
Michael Chane6af3012005-04-21 17:12:05 -070010943 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010944
Michael Chan381291b2005-12-13 21:08:21 -080010945 tg3_nvram_unlock(tp);
10946
Linus Torvalds1da177e2005-04-16 15:20:36 -070010947 return ret;
10948}
10949
Al Virob9fc7dc2007-12-17 22:59:57 -080010950static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10951{
10952 u32 v;
10953 int res = tg3_nvram_read(tp, offset, &v);
10954 if (!res)
10955 *val = cpu_to_le32(v);
10956 return res;
10957}
10958
Michael Chan18201802006-03-20 22:29:15 -080010959static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10960{
10961 int err;
10962 u32 tmp;
10963
10964 err = tg3_nvram_read(tp, offset, &tmp);
10965 *val = swab32(tmp);
10966 return err;
10967}
10968
Linus Torvalds1da177e2005-04-16 15:20:36 -070010969static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10970 u32 offset, u32 len, u8 *buf)
10971{
10972 int i, j, rc = 0;
10973 u32 val;
10974
10975 for (i = 0; i < len; i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080010976 u32 addr;
10977 __le32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010978
10979 addr = offset + i;
10980
10981 memcpy(&data, buf + i, 4);
10982
Al Virob9fc7dc2007-12-17 22:59:57 -080010983 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070010984
10985 val = tr32(GRC_EEPROM_ADDR);
10986 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10987
10988 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10989 EEPROM_ADDR_READ);
10990 tw32(GRC_EEPROM_ADDR, val |
10991 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10992 (addr & EEPROM_ADDR_ADDR_MASK) |
10993 EEPROM_ADDR_START |
10994 EEPROM_ADDR_WRITE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010995
Michael Chan9d57f012006-12-07 00:23:25 -080010996 for (j = 0; j < 1000; j++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010997 val = tr32(GRC_EEPROM_ADDR);
10998
10999 if (val & EEPROM_ADDR_COMPLETE)
11000 break;
Michael Chan9d57f012006-12-07 00:23:25 -080011001 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011002 }
11003 if (!(val & EEPROM_ADDR_COMPLETE)) {
11004 rc = -EBUSY;
11005 break;
11006 }
11007 }
11008
11009 return rc;
11010}
11011
11012/* offset and length are dword aligned */
11013static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11014 u8 *buf)
11015{
11016 int ret = 0;
11017 u32 pagesize = tp->nvram_pagesize;
11018 u32 pagemask = pagesize - 1;
11019 u32 nvram_cmd;
11020 u8 *tmp;
11021
11022 tmp = kmalloc(pagesize, GFP_KERNEL);
11023 if (tmp == NULL)
11024 return -ENOMEM;
11025
11026 while (len) {
11027 int j;
Michael Chane6af3012005-04-21 17:12:05 -070011028 u32 phy_addr, page_off, size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011029
11030 phy_addr = offset & ~pagemask;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011031
Linus Torvalds1da177e2005-04-16 15:20:36 -070011032 for (j = 0; j < pagesize; j += 4) {
Al Viro286e3102007-12-17 23:00:31 -080011033 if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
Al Virob9fc7dc2007-12-17 22:59:57 -080011034 (__le32 *) (tmp + j))))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011035 break;
11036 }
11037 if (ret)
11038 break;
11039
11040 page_off = offset & pagemask;
11041 size = pagesize;
11042 if (len < size)
11043 size = len;
11044
11045 len -= size;
11046
11047 memcpy(tmp + page_off, buf, size);
11048
11049 offset = offset + (pagesize - page_off);
11050
Michael Chane6af3012005-04-21 17:12:05 -070011051 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011052
11053 /*
11054 * Before we can erase the flash page, we need
11055 * to issue a special "write enable" command.
11056 */
11057 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11058
11059 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11060 break;
11061
11062 /* Erase the target page */
11063 tw32(NVRAM_ADDR, phy_addr);
11064
11065 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11066 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11067
11068 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11069 break;
11070
11071 /* Issue another write enable to start the write. */
11072 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11073
11074 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11075 break;
11076
11077 for (j = 0; j < pagesize; j += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011078 __be32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011079
Al Virob9fc7dc2007-12-17 22:59:57 -080011080 data = *((__be32 *) (tmp + j));
11081 /* swab32(le32_to_cpu(data)), actually */
11082 tw32(NVRAM_WRDATA, be32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011083
11084 tw32(NVRAM_ADDR, phy_addr + j);
11085
11086 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11087 NVRAM_CMD_WR;
11088
11089 if (j == 0)
11090 nvram_cmd |= NVRAM_CMD_FIRST;
11091 else if (j == (pagesize - 4))
11092 nvram_cmd |= NVRAM_CMD_LAST;
11093
11094 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11095 break;
11096 }
11097 if (ret)
11098 break;
11099 }
11100
11101 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11102 tg3_nvram_exec_cmd(tp, nvram_cmd);
11103
11104 kfree(tmp);
11105
11106 return ret;
11107}
11108
11109/* offset and length are dword aligned */
11110static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11111 u8 *buf)
11112{
11113 int i, ret = 0;
11114
11115 for (i = 0; i < len; i += 4, offset += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011116 u32 page_off, phy_addr, nvram_cmd;
11117 __be32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011118
11119 memcpy(&data, buf + i, 4);
Al Virob9fc7dc2007-12-17 22:59:57 -080011120 tw32(NVRAM_WRDATA, be32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011121
11122 page_off = offset % tp->nvram_pagesize;
11123
Michael Chan18201802006-03-20 22:29:15 -080011124 phy_addr = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011125
11126 tw32(NVRAM_ADDR, phy_addr);
11127
11128 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11129
11130 if ((page_off == 0) || (i == 0))
11131 nvram_cmd |= NVRAM_CMD_FIRST;
Michael Chanf6d9a252006-04-29 19:00:24 -070011132 if (page_off == (tp->nvram_pagesize - 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011133 nvram_cmd |= NVRAM_CMD_LAST;
11134
11135 if (i == (len - 4))
11136 nvram_cmd |= NVRAM_CMD_LAST;
11137
Michael Chan4c987482005-09-05 17:52:38 -070011138 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
Michael Chanaf36e6b2006-03-23 01:28:06 -080011139 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
Michael Chan1b277772006-03-20 22:27:48 -080011140 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
Matt Carlsond30cdd22007-10-07 23:28:35 -070011141 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
Matt Carlson9936bcf2007-10-10 18:03:07 -070011142 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
Matt Carlson57e69832008-05-25 23:48:31 -070011143 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
Michael Chan4c987482005-09-05 17:52:38 -070011144 (tp->nvram_jedecnum == JEDEC_ST) &&
11145 (nvram_cmd & NVRAM_CMD_FIRST)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011146
11147 if ((ret = tg3_nvram_exec_cmd(tp,
11148 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11149 NVRAM_CMD_DONE)))
11150
11151 break;
11152 }
11153 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11154 /* We always do complete word writes to eeprom. */
11155 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11156 }
11157
11158 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11159 break;
11160 }
11161 return ret;
11162}
11163
11164/* offset and length are dword aligned */
11165static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11166{
11167 int ret;
11168
Linus Torvalds1da177e2005-04-16 15:20:36 -070011169 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -070011170 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11171 ~GRC_LCLCTRL_GPIO_OUTPUT1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011172 udelay(40);
11173 }
11174
11175 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11176 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11177 }
11178 else {
11179 u32 grc_mode;
11180
Michael Chanec41c7d2006-01-17 02:40:55 -080011181 ret = tg3_nvram_lock(tp);
11182 if (ret)
11183 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011184
Michael Chane6af3012005-04-21 17:12:05 -070011185 tg3_enable_nvram_access(tp);
11186 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11187 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011188 tw32(NVRAM_WRITE1, 0x406);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011189
11190 grc_mode = tr32(GRC_MODE);
11191 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11192
11193 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11194 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11195
11196 ret = tg3_nvram_write_block_buffered(tp, offset, len,
11197 buf);
11198 }
11199 else {
11200 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11201 buf);
11202 }
11203
11204 grc_mode = tr32(GRC_MODE);
11205 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11206
Michael Chane6af3012005-04-21 17:12:05 -070011207 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011208 tg3_nvram_unlock(tp);
11209 }
11210
11211 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -070011212 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011213 udelay(40);
11214 }
11215
11216 return ret;
11217}
11218
11219struct subsys_tbl_ent {
11220 u16 subsys_vendor, subsys_devid;
11221 u32 phy_id;
11222};
11223
11224static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11225 /* Broadcom boards. */
11226 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11227 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11228 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11229 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
11230 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11231 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11232 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
11233 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11234 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11235 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11236 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11237
11238 /* 3com boards. */
11239 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11240 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11241 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
11242 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11243 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11244
11245 /* DELL boards. */
11246 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11247 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11248 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11249 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11250
11251 /* Compaq boards. */
11252 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11253 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11254 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
11255 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11256 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11257
11258 /* IBM boards. */
11259 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11260};
11261
11262static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11263{
11264 int i;
11265
11266 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11267 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11268 tp->pdev->subsystem_vendor) &&
11269 (subsys_id_to_phy_id[i].subsys_devid ==
11270 tp->pdev->subsystem_device))
11271 return &subsys_id_to_phy_id[i];
11272 }
11273 return NULL;
11274}
11275
Michael Chan7d0c41e2005-04-21 17:06:20 -070011276static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011277{
Linus Torvalds1da177e2005-04-16 15:20:36 -070011278 u32 val;
Michael Chancaf636c72006-03-22 01:05:31 -080011279 u16 pmcsr;
11280
11281 /* On some early chips the SRAM cannot be accessed in D3hot state,
11282 * so need make sure we're in D0.
11283 */
11284 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11285 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11286 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11287 msleep(1);
Michael Chan7d0c41e2005-04-21 17:06:20 -070011288
11289 /* Make sure register accesses (indirect or otherwise)
11290 * will function correctly.
11291 */
11292 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11293 tp->misc_host_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011294
David S. Millerf49639e2006-06-09 11:58:36 -070011295 /* The memory arbiter has to be enabled in order for SRAM accesses
11296 * to succeed. Normally on powerup the tg3 chip firmware will make
11297 * sure it is enabled, but other entities such as system netboot
11298 * code might disable it.
11299 */
11300 val = tr32(MEMARB_MODE);
11301 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11302
Linus Torvalds1da177e2005-04-16 15:20:36 -070011303 tp->phy_id = PHY_ID_INVALID;
Michael Chan7d0c41e2005-04-21 17:06:20 -070011304 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11305
Gary Zambranoa85feb82007-05-05 11:52:19 -070011306 /* Assume an onboard device and WOL capable by default. */
11307 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
David S. Miller72b845e2006-03-14 14:11:48 -080011308
Michael Chanb5d37722006-09-27 16:06:21 -070011309 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan9d26e212006-12-07 00:21:14 -080011310 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
Michael Chanb5d37722006-09-27 16:06:21 -070011311 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011312 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11313 }
Matt Carlson0527ba32007-10-10 18:03:30 -070011314 val = tr32(VCPU_CFGSHDW);
11315 if (val & VCPU_CFGSHDW_ASPM_DBNC)
Matt Carlson8ed5d972007-05-07 00:25:49 -070011316 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
Matt Carlson0527ba32007-10-10 18:03:30 -070011317 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070011318 (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11319 device_may_wakeup(&tp->pdev->dev))
Matt Carlson0527ba32007-10-10 18:03:30 -070011320 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
Matt Carlson05ac4cb2008-11-03 16:53:46 -080011321 goto done;
Michael Chanb5d37722006-09-27 16:06:21 -070011322 }
11323
Linus Torvalds1da177e2005-04-16 15:20:36 -070011324 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11325 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11326 u32 nic_cfg, led_cfg;
Matt Carlsona9daf362008-05-25 23:49:44 -070011327 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
Michael Chan7d0c41e2005-04-21 17:06:20 -070011328 int eeprom_phy_serdes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011329
11330 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11331 tp->nic_sram_data_cfg = nic_cfg;
11332
11333 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11334 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11335 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11336 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11337 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11338 (ver > 0) && (ver < 0x100))
11339 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11340
Matt Carlsona9daf362008-05-25 23:49:44 -070011341 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11342 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11343
Linus Torvalds1da177e2005-04-16 15:20:36 -070011344 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11345 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11346 eeprom_phy_serdes = 1;
11347
11348 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11349 if (nic_phy_id != 0) {
11350 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11351 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11352
11353 eeprom_phy_id = (id1 >> 16) << 10;
11354 eeprom_phy_id |= (id2 & 0xfc00) << 16;
11355 eeprom_phy_id |= (id2 & 0x03ff) << 0;
11356 } else
11357 eeprom_phy_id = 0;
11358
Michael Chan7d0c41e2005-04-21 17:06:20 -070011359 tp->phy_id = eeprom_phy_id;
Michael Chan747e8f82005-07-25 12:33:22 -070011360 if (eeprom_phy_serdes) {
Michael Chana4e2b342005-10-26 15:46:52 -070011361 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan747e8f82005-07-25 12:33:22 -070011362 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11363 else
11364 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11365 }
Michael Chan7d0c41e2005-04-21 17:06:20 -070011366
John W. Linvillecbf46852005-04-21 17:01:29 -070011367 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011368 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11369 SHASTA_EXT_LED_MODE_MASK);
John W. Linvillecbf46852005-04-21 17:01:29 -070011370 else
Linus Torvalds1da177e2005-04-16 15:20:36 -070011371 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11372
11373 switch (led_cfg) {
11374 default:
11375 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11376 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11377 break;
11378
11379 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11380 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11381 break;
11382
11383 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11384 tp->led_ctrl = LED_CTRL_MODE_MAC;
Michael Chan9ba27792005-06-06 15:16:20 -070011385
11386 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11387 * read on some older 5700/5701 bootcode.
11388 */
11389 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11390 ASIC_REV_5700 ||
11391 GET_ASIC_REV(tp->pci_chip_rev_id) ==
11392 ASIC_REV_5701)
11393 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11394
Linus Torvalds1da177e2005-04-16 15:20:36 -070011395 break;
11396
11397 case SHASTA_EXT_LED_SHARED:
11398 tp->led_ctrl = LED_CTRL_MODE_SHARED;
11399 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11400 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11401 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11402 LED_CTRL_MODE_PHY_2);
11403 break;
11404
11405 case SHASTA_EXT_LED_MAC:
11406 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11407 break;
11408
11409 case SHASTA_EXT_LED_COMBO:
11410 tp->led_ctrl = LED_CTRL_MODE_COMBO;
11411 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11412 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11413 LED_CTRL_MODE_PHY_2);
11414 break;
11415
Stephen Hemminger855e1112008-04-16 16:37:28 -070011416 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011417
11418 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11419 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11420 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11421 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11422
Matt Carlsonb2a5c192008-04-03 21:44:44 -070011423 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11424 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
Matt Carlson5f608912007-11-12 21:17:07 -080011425
Michael Chan9d26e212006-12-07 00:21:14 -080011426 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011427 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011428 if ((tp->pdev->subsystem_vendor ==
11429 PCI_VENDOR_ID_ARIMA) &&
11430 (tp->pdev->subsystem_device == 0x205a ||
11431 tp->pdev->subsystem_device == 0x2063))
11432 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11433 } else {
David S. Millerf49639e2006-06-09 11:58:36 -070011434 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011435 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11436 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011437
11438 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11439 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
John W. Linvillecbf46852005-04-21 17:01:29 -070011440 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011441 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11442 }
Matt Carlsonb2b98d42008-11-03 16:52:32 -080011443
11444 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11445 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
Matt Carlson0d3031d2007-10-10 18:02:43 -070011446 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
Matt Carlsonb2b98d42008-11-03 16:52:32 -080011447
Gary Zambranoa85feb82007-05-05 11:52:19 -070011448 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11449 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11450 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011451
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070011452 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
Matt Carlson05ac4cb2008-11-03 16:53:46 -080011453 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
Matt Carlson0527ba32007-10-10 18:03:30 -070011454 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11455
Linus Torvalds1da177e2005-04-16 15:20:36 -070011456 if (cfg2 & (1 << 17))
11457 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11458
11459 /* serdes signal pre-emphasis in register 0x590 set by */
11460 /* bootcode if bit 18 is set */
11461 if (cfg2 & (1 << 18))
11462 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
Matt Carlson8ed5d972007-05-07 00:25:49 -070011463
11464 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11465 u32 cfg3;
11466
11467 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11468 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11469 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11470 }
Matt Carlsona9daf362008-05-25 23:49:44 -070011471
11472 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11473 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11474 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11475 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11476 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11477 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011478 }
Matt Carlson05ac4cb2008-11-03 16:53:46 -080011479done:
11480 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
11481 device_set_wakeup_enable(&tp->pdev->dev,
11482 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
Michael Chan7d0c41e2005-04-21 17:06:20 -070011483}
11484
Matt Carlsonb2a5c192008-04-03 21:44:44 -070011485static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11486{
11487 int i;
11488 u32 val;
11489
11490 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11491 tw32(OTP_CTRL, cmd);
11492
11493 /* Wait for up to 1 ms for command to execute. */
11494 for (i = 0; i < 100; i++) {
11495 val = tr32(OTP_STATUS);
11496 if (val & OTP_STATUS_CMD_DONE)
11497 break;
11498 udelay(10);
11499 }
11500
11501 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11502}
11503
11504/* Read the gphy configuration from the OTP region of the chip. The gphy
11505 * configuration is a 32-bit value that straddles the alignment boundary.
11506 * We do two 32-bit reads and then shift and merge the results.
11507 */
11508static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11509{
11510 u32 bhalf_otp, thalf_otp;
11511
11512 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11513
11514 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11515 return 0;
11516
11517 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11518
11519 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11520 return 0;
11521
11522 thalf_otp = tr32(OTP_READ_DATA);
11523
11524 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11525
11526 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11527 return 0;
11528
11529 bhalf_otp = tr32(OTP_READ_DATA);
11530
11531 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11532}
11533
Michael Chan7d0c41e2005-04-21 17:06:20 -070011534static int __devinit tg3_phy_probe(struct tg3 *tp)
11535{
11536 u32 hw_phy_id_1, hw_phy_id_2;
11537 u32 hw_phy_id, hw_phy_id_masked;
11538 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011539
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011540 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11541 return tg3_phy_init(tp);
11542
Linus Torvalds1da177e2005-04-16 15:20:36 -070011543 /* Reading the PHY ID register can conflict with ASF
11544 * firwmare access to the PHY hardware.
11545 */
11546 err = 0;
Matt Carlson0d3031d2007-10-10 18:02:43 -070011547 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11548 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011549 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11550 } else {
11551 /* Now read the physical PHY_ID from the chip and verify
11552 * that it is sane. If it doesn't look good, we fall back
11553 * to either the hard-coded table based PHY_ID and failing
11554 * that the value found in the eeprom area.
11555 */
11556 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11557 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11558
11559 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
11560 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11561 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
11562
11563 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11564 }
11565
11566 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11567 tp->phy_id = hw_phy_id;
11568 if (hw_phy_id_masked == PHY_ID_BCM8002)
11569 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
Michael Chanda6b2d02005-08-19 12:54:29 -070011570 else
11571 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011572 } else {
Michael Chan7d0c41e2005-04-21 17:06:20 -070011573 if (tp->phy_id != PHY_ID_INVALID) {
11574 /* Do nothing, phy ID already set up in
11575 * tg3_get_eeprom_hw_cfg().
11576 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070011577 } else {
11578 struct subsys_tbl_ent *p;
11579
11580 /* No eeprom signature? Try the hardcoded
11581 * subsys device table.
11582 */
11583 p = lookup_by_subsys(tp);
11584 if (!p)
11585 return -ENODEV;
11586
11587 tp->phy_id = p->phy_id;
11588 if (!tp->phy_id ||
11589 tp->phy_id == PHY_ID_BCM8002)
11590 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11591 }
11592 }
11593
Michael Chan747e8f82005-07-25 12:33:22 -070011594 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
Matt Carlson0d3031d2007-10-10 18:02:43 -070011595 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070011596 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chan3600d912006-12-07 00:21:48 -080011597 u32 bmsr, adv_reg, tg3_ctrl, mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011598
11599 tg3_readphy(tp, MII_BMSR, &bmsr);
11600 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11601 (bmsr & BMSR_LSTATUS))
11602 goto skip_phy_reset;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011603
Linus Torvalds1da177e2005-04-16 15:20:36 -070011604 err = tg3_phy_reset(tp);
11605 if (err)
11606 return err;
11607
11608 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11609 ADVERTISE_100HALF | ADVERTISE_100FULL |
11610 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11611 tg3_ctrl = 0;
11612 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11613 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11614 MII_TG3_CTRL_ADV_1000_FULL);
11615 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11616 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11617 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11618 MII_TG3_CTRL_ENABLE_AS_MASTER);
11619 }
11620
Michael Chan3600d912006-12-07 00:21:48 -080011621 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11622 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11623 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11624 if (!tg3_copper_is_advertising_all(tp, mask)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011625 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11626
11627 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11628 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11629
11630 tg3_writephy(tp, MII_BMCR,
11631 BMCR_ANENABLE | BMCR_ANRESTART);
11632 }
11633 tg3_phy_set_wirespeed(tp);
11634
11635 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11636 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11637 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11638 }
11639
11640skip_phy_reset:
11641 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11642 err = tg3_init_5401phy_dsp(tp);
11643 if (err)
11644 return err;
11645 }
11646
11647 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11648 err = tg3_init_5401phy_dsp(tp);
11649 }
11650
Michael Chan747e8f82005-07-25 12:33:22 -070011651 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011652 tp->link_config.advertising =
11653 (ADVERTISED_1000baseT_Half |
11654 ADVERTISED_1000baseT_Full |
11655 ADVERTISED_Autoneg |
11656 ADVERTISED_FIBRE);
11657 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11658 tp->link_config.advertising &=
11659 ~(ADVERTISED_1000baseT_Half |
11660 ADVERTISED_1000baseT_Full);
11661
11662 return err;
11663}
11664
11665static void __devinit tg3_read_partno(struct tg3 *tp)
11666{
11667 unsigned char vpd_data[256];
Michael Chanaf2c6a42006-11-07 14:57:51 -080011668 unsigned int i;
Michael Chan1b277772006-03-20 22:27:48 -080011669 u32 magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011670
Michael Chan18201802006-03-20 22:29:15 -080011671 if (tg3_nvram_read_swab(tp, 0x0, &magic))
David S. Millerf49639e2006-06-09 11:58:36 -070011672 goto out_not_found;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011673
Michael Chan18201802006-03-20 22:29:15 -080011674 if (magic == TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -080011675 for (i = 0; i < 256; i += 4) {
11676 u32 tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011677
Michael Chan1b277772006-03-20 22:27:48 -080011678 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11679 goto out_not_found;
11680
11681 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
11682 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
11683 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11684 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11685 }
11686 } else {
11687 int vpd_cap;
11688
11689 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11690 for (i = 0; i < 256; i += 4) {
11691 u32 tmp, j = 0;
Al Virob9fc7dc2007-12-17 22:59:57 -080011692 __le32 v;
Michael Chan1b277772006-03-20 22:27:48 -080011693 u16 tmp16;
11694
11695 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11696 i);
11697 while (j++ < 100) {
11698 pci_read_config_word(tp->pdev, vpd_cap +
11699 PCI_VPD_ADDR, &tmp16);
11700 if (tmp16 & 0x8000)
11701 break;
11702 msleep(1);
11703 }
David S. Millerf49639e2006-06-09 11:58:36 -070011704 if (!(tmp16 & 0x8000))
11705 goto out_not_found;
11706
Michael Chan1b277772006-03-20 22:27:48 -080011707 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11708 &tmp);
Al Virob9fc7dc2007-12-17 22:59:57 -080011709 v = cpu_to_le32(tmp);
11710 memcpy(&vpd_data[i], &v, 4);
Michael Chan1b277772006-03-20 22:27:48 -080011711 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011712 }
11713
11714 /* Now parse and find the part number. */
Michael Chanaf2c6a42006-11-07 14:57:51 -080011715 for (i = 0; i < 254; ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011716 unsigned char val = vpd_data[i];
Michael Chanaf2c6a42006-11-07 14:57:51 -080011717 unsigned int block_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011718
11719 if (val == 0x82 || val == 0x91) {
11720 i = (i + 3 +
11721 (vpd_data[i + 1] +
11722 (vpd_data[i + 2] << 8)));
11723 continue;
11724 }
11725
11726 if (val != 0x90)
11727 goto out_not_found;
11728
11729 block_end = (i + 3 +
11730 (vpd_data[i + 1] +
11731 (vpd_data[i + 2] << 8)));
11732 i += 3;
Michael Chanaf2c6a42006-11-07 14:57:51 -080011733
11734 if (block_end > 256)
11735 goto out_not_found;
11736
11737 while (i < (block_end - 2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011738 if (vpd_data[i + 0] == 'P' &&
11739 vpd_data[i + 1] == 'N') {
11740 int partno_len = vpd_data[i + 2];
11741
Michael Chanaf2c6a42006-11-07 14:57:51 -080011742 i += 3;
11743 if (partno_len > 24 || (partno_len + i) > 256)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011744 goto out_not_found;
11745
11746 memcpy(tp->board_part_number,
Michael Chanaf2c6a42006-11-07 14:57:51 -080011747 &vpd_data[i], partno_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011748
11749 /* Success. */
11750 return;
11751 }
Michael Chanaf2c6a42006-11-07 14:57:51 -080011752 i += 3 + vpd_data[i + 2];
Linus Torvalds1da177e2005-04-16 15:20:36 -070011753 }
11754
11755 /* Part number not found. */
11756 goto out_not_found;
11757 }
11758
11759out_not_found:
Michael Chanb5d37722006-09-27 16:06:21 -070011760 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11761 strcpy(tp->board_part_number, "BCM95906");
11762 else
11763 strcpy(tp->board_part_number, "none");
Linus Torvalds1da177e2005-04-16 15:20:36 -070011764}
11765
Matt Carlson9c8a6202007-10-21 16:16:08 -070011766static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11767{
11768 u32 val;
11769
11770 if (tg3_nvram_read_swab(tp, offset, &val) ||
11771 (val & 0xfc000000) != 0x0c000000 ||
11772 tg3_nvram_read_swab(tp, offset + 4, &val) ||
11773 val != 0)
11774 return 0;
11775
11776 return 1;
11777}
11778
Michael Chanc4e65752006-03-20 22:29:32 -080011779static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11780{
11781 u32 val, offset, start;
Matt Carlson9c8a6202007-10-21 16:16:08 -070011782 u32 ver_offset;
11783 int i, bcnt;
Michael Chanc4e65752006-03-20 22:29:32 -080011784
11785 if (tg3_nvram_read_swab(tp, 0, &val))
11786 return;
11787
11788 if (val != TG3_EEPROM_MAGIC)
11789 return;
11790
11791 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11792 tg3_nvram_read_swab(tp, 0x4, &start))
11793 return;
11794
11795 offset = tg3_nvram_logical_addr(tp, offset);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011796
11797 if (!tg3_fw_img_is_valid(tp, offset) ||
11798 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
Michael Chanc4e65752006-03-20 22:29:32 -080011799 return;
11800
Matt Carlson9c8a6202007-10-21 16:16:08 -070011801 offset = offset + ver_offset - start;
11802 for (i = 0; i < 16; i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011803 __le32 v;
11804 if (tg3_nvram_read_le(tp, offset + i, &v))
Michael Chanc4e65752006-03-20 22:29:32 -080011805 return;
11806
Al Virob9fc7dc2007-12-17 22:59:57 -080011807 memcpy(tp->fw_ver + i, &v, 4);
Michael Chanc4e65752006-03-20 22:29:32 -080011808 }
Matt Carlson9c8a6202007-10-21 16:16:08 -070011809
11810 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
Matt Carlson84af67f2007-11-12 21:08:59 -080011811 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
Matt Carlson9c8a6202007-10-21 16:16:08 -070011812 return;
11813
11814 for (offset = TG3_NVM_DIR_START;
11815 offset < TG3_NVM_DIR_END;
11816 offset += TG3_NVM_DIRENT_SIZE) {
11817 if (tg3_nvram_read_swab(tp, offset, &val))
11818 return;
11819
11820 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11821 break;
11822 }
11823
11824 if (offset == TG3_NVM_DIR_END)
11825 return;
11826
11827 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11828 start = 0x08000000;
11829 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11830 return;
11831
11832 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11833 !tg3_fw_img_is_valid(tp, offset) ||
11834 tg3_nvram_read_swab(tp, offset + 8, &val))
11835 return;
11836
11837 offset += val - start;
11838
11839 bcnt = strlen(tp->fw_ver);
11840
11841 tp->fw_ver[bcnt++] = ',';
11842 tp->fw_ver[bcnt++] = ' ';
11843
11844 for (i = 0; i < 4; i++) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011845 __le32 v;
11846 if (tg3_nvram_read_le(tp, offset, &v))
Matt Carlson9c8a6202007-10-21 16:16:08 -070011847 return;
11848
Al Virob9fc7dc2007-12-17 22:59:57 -080011849 offset += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011850
Al Virob9fc7dc2007-12-17 22:59:57 -080011851 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11852 memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011853 break;
11854 }
11855
Al Virob9fc7dc2007-12-17 22:59:57 -080011856 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11857 bcnt += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011858 }
11859
11860 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
Michael Chanc4e65752006-03-20 22:29:32 -080011861}
11862
Michael Chan7544b092007-05-05 13:08:32 -070011863static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11864
Linus Torvalds1da177e2005-04-16 15:20:36 -070011865static int __devinit tg3_get_invariants(struct tg3 *tp)
11866{
11867 static struct pci_device_id write_reorder_chipsets[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011868 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11869 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
John W. Linvillec165b002006-07-08 13:28:53 -070011870 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11871 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
Michael Chan399de502005-10-03 14:02:39 -070011872 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11873 PCI_DEVICE_ID_VIA_8385_0) },
Linus Torvalds1da177e2005-04-16 15:20:36 -070011874 { },
11875 };
11876 u32 misc_ctrl_reg;
11877 u32 cacheline_sz_reg;
11878 u32 pci_state_reg, grc_misc_cfg;
11879 u32 val;
11880 u16 pci_cmd;
Michael Chanc7835a72006-11-15 21:14:42 -080011881 int err, pcie_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011882
Linus Torvalds1da177e2005-04-16 15:20:36 -070011883 /* Force memory write invalidate off. If we leave it on,
11884 * then on 5700_BX chips we have to enable a workaround.
11885 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11886 * to match the cacheline size. The Broadcom driver have this
11887 * workaround but turns MWI off all the times so never uses
11888 * it. This seems to suggest that the workaround is insufficient.
11889 */
11890 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11891 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11892 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11893
11894 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11895 * has the register indirect write enable bit set before
11896 * we try to access any of the MMIO registers. It is also
11897 * critical that the PCI-X hw workaround situation is decided
11898 * before that as well.
11899 */
11900 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11901 &misc_ctrl_reg);
11902
11903 tp->pci_chip_rev_id = (misc_ctrl_reg >>
11904 MISC_HOST_CTRL_CHIPREV_SHIFT);
Matt Carlson795d01c2007-10-07 23:28:17 -070011905 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11906 u32 prod_id_asic_rev;
11907
11908 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11909 &prod_id_asic_rev);
11910 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11911 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011912
Michael Chanff645be2005-04-21 17:09:53 -070011913 /* Wrong chip ID in 5752 A0. This code can be removed later
11914 * as A0 is not in production.
11915 */
11916 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11917 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11918
Michael Chan68929142005-08-09 20:17:14 -070011919 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11920 * we need to disable memory and use config. cycles
11921 * only to access all registers. The 5702/03 chips
11922 * can mistakenly decode the special cycles from the
11923 * ICH chipsets as memory write cycles, causing corruption
11924 * of register and memory space. Only certain ICH bridges
11925 * will drive special cycles with non-zero data during the
11926 * address phase which can fall within the 5703's address
11927 * range. This is not an ICH bug as the PCI spec allows
11928 * non-zero address during special cycles. However, only
11929 * these ICH bridges are known to drive non-zero addresses
11930 * during special cycles.
11931 *
11932 * Since special cycles do not cross PCI bridges, we only
11933 * enable this workaround if the 5703 is on the secondary
11934 * bus of these ICH bridges.
11935 */
11936 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11937 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11938 static struct tg3_dev_id {
11939 u32 vendor;
11940 u32 device;
11941 u32 rev;
11942 } ich_chipsets[] = {
11943 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11944 PCI_ANY_ID },
11945 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11946 PCI_ANY_ID },
11947 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11948 0xa },
11949 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11950 PCI_ANY_ID },
11951 { },
11952 };
11953 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11954 struct pci_dev *bridge = NULL;
11955
11956 while (pci_id->vendor != 0) {
11957 bridge = pci_get_device(pci_id->vendor, pci_id->device,
11958 bridge);
11959 if (!bridge) {
11960 pci_id++;
11961 continue;
11962 }
11963 if (pci_id->rev != PCI_ANY_ID) {
Auke Kok44c10132007-06-08 15:46:36 -070011964 if (bridge->revision > pci_id->rev)
Michael Chan68929142005-08-09 20:17:14 -070011965 continue;
11966 }
11967 if (bridge->subordinate &&
11968 (bridge->subordinate->number ==
11969 tp->pdev->bus->number)) {
11970
11971 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11972 pci_dev_put(bridge);
11973 break;
11974 }
11975 }
11976 }
11977
Matt Carlson41588ba2008-04-19 18:12:33 -070011978 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11979 static struct tg3_dev_id {
11980 u32 vendor;
11981 u32 device;
11982 } bridge_chipsets[] = {
11983 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11984 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11985 { },
11986 };
11987 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11988 struct pci_dev *bridge = NULL;
11989
11990 while (pci_id->vendor != 0) {
11991 bridge = pci_get_device(pci_id->vendor,
11992 pci_id->device,
11993 bridge);
11994 if (!bridge) {
11995 pci_id++;
11996 continue;
11997 }
11998 if (bridge->subordinate &&
11999 (bridge->subordinate->number <=
12000 tp->pdev->bus->number) &&
12001 (bridge->subordinate->subordinate >=
12002 tp->pdev->bus->number)) {
12003 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12004 pci_dev_put(bridge);
12005 break;
12006 }
12007 }
12008 }
12009
Michael Chan4a29cc22006-03-19 13:21:12 -080012010 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12011 * DMA addresses > 40-bit. This bridge may have other additional
12012 * 57xx devices behind it in some 4-port NIC designs for example.
12013 * Any tg3 device found behind the bridge will also need the 40-bit
12014 * DMA workaround.
12015 */
Michael Chana4e2b342005-10-26 15:46:52 -070012016 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12017 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12018 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
Michael Chan4a29cc22006-03-19 13:21:12 -080012019 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
Michael Chan4cf78e42005-07-25 12:29:19 -070012020 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
Michael Chana4e2b342005-10-26 15:46:52 -070012021 }
Michael Chan4a29cc22006-03-19 13:21:12 -080012022 else {
12023 struct pci_dev *bridge = NULL;
12024
12025 do {
12026 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12027 PCI_DEVICE_ID_SERVERWORKS_EPB,
12028 bridge);
12029 if (bridge && bridge->subordinate &&
12030 (bridge->subordinate->number <=
12031 tp->pdev->bus->number) &&
12032 (bridge->subordinate->subordinate >=
12033 tp->pdev->bus->number)) {
12034 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12035 pci_dev_put(bridge);
12036 break;
12037 }
12038 } while (bridge);
12039 }
Michael Chan4cf78e42005-07-25 12:29:19 -070012040
Linus Torvalds1da177e2005-04-16 15:20:36 -070012041 /* Initialize misc host control in PCI block. */
12042 tp->misc_host_ctrl |= (misc_ctrl_reg &
12043 MISC_HOST_CTRL_CHIPREV);
12044 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12045 tp->misc_host_ctrl);
12046
12047 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12048 &cacheline_sz_reg);
12049
12050 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
12051 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
12052 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
12053 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
12054
Michael Chan7544b092007-05-05 13:08:32 -070012055 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12056 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12057 tp->pdev_peer = tg3_find_peer(tp);
12058
John W. Linville2052da92005-04-21 16:56:08 -070012059 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
Michael Chan4cf78e42005-07-25 12:29:19 -070012060 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -080012061 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chand9ab5ad2006-03-20 22:27:35 -080012062 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012063 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012064 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012065 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012066 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
Michael Chana4e2b342005-10-26 15:46:52 -070012067 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
John W. Linville6708e5c2005-04-21 17:00:52 -070012068 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12069
John W. Linville1b440c562005-04-21 17:03:18 -070012070 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12071 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12072 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12073
Michael Chan5a6f3072006-03-20 22:28:05 -080012074 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Michael Chan7544b092007-05-05 13:08:32 -070012075 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12076 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12077 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12078 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12079 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12080 tp->pdev_peer == tp->pdev))
12081 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12082
Michael Chanaf36e6b2006-03-23 01:28:06 -080012083 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012084 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012085 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012086 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012087 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012088 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan5a6f3072006-03-20 22:28:05 -080012089 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
Michael Chanfcfa0a32006-03-20 22:28:41 -080012090 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
Michael Chan52c0fd82006-06-29 20:15:54 -070012091 } else {
Michael Chan7f62ad52007-02-20 23:25:40 -080012092 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
Michael Chan52c0fd82006-06-29 20:15:54 -070012093 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12094 ASIC_REV_5750 &&
12095 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
Michael Chan7f62ad52007-02-20 23:25:40 -080012096 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
Michael Chan52c0fd82006-06-29 20:15:54 -070012097 }
Michael Chan5a6f3072006-03-20 22:28:05 -080012098 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012099
Matt Carlsonf51f3562008-05-25 23:45:08 -070012100 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12101 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan0f893dc2005-07-25 12:30:38 -070012102 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12103
Michael Chanc7835a72006-11-15 21:14:42 -080012104 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12105 if (pcie_cap != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012106 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
Matt Carlson5f5c51e2007-11-12 21:19:37 -080012107
12108 pcie_set_readrq(tp->pdev, 4096);
12109
Michael Chanc7835a72006-11-15 21:14:42 -080012110 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12111 u16 lnkctl;
12112
12113 pci_read_config_word(tp->pdev,
12114 pcie_cap + PCI_EXP_LNKCTL,
12115 &lnkctl);
12116 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
12117 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12118 }
12119 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012120
Michael Chan399de502005-10-03 14:02:39 -070012121 /* If we have an AMD 762 or VIA K8T800 chipset, write
12122 * reordering to the mailbox registers done by the host
12123 * controller can cause major troubles. We read back from
12124 * every mailbox register write to force the writes to be
12125 * posted to the chip in order.
12126 */
12127 if (pci_dev_present(write_reorder_chipsets) &&
12128 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12129 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12130
Linus Torvalds1da177e2005-04-16 15:20:36 -070012131 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12132 tp->pci_lat_timer < 64) {
12133 tp->pci_lat_timer = 64;
12134
12135 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
12136 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
12137 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
12138 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
12139
12140 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12141 cacheline_sz_reg);
12142 }
12143
Matt Carlson9974a352007-10-07 23:27:28 -070012144 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12145 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12146 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12147 if (!tp->pcix_cap) {
12148 printk(KERN_ERR PFX "Cannot find PCI-X "
12149 "capability, aborting.\n");
12150 return -EIO;
12151 }
12152 }
12153
Linus Torvalds1da177e2005-04-16 15:20:36 -070012154 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12155 &pci_state_reg);
12156
Matt Carlson9974a352007-10-07 23:27:28 -070012157 if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012158 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12159
12160 /* If this is a 5700 BX chipset, and we are in PCI-X
12161 * mode, enable register write workaround.
12162 *
12163 * The workaround is to use indirect register accesses
12164 * for all chip writes not to mailbox registers.
12165 */
12166 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12167 u32 pm_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012168
12169 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12170
12171 /* The chip can have it's power management PCI config
12172 * space registers clobbered due to this bug.
12173 * So explicitly force the chip into D0 here.
12174 */
Matt Carlson9974a352007-10-07 23:27:28 -070012175 pci_read_config_dword(tp->pdev,
12176 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012177 &pm_reg);
12178 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12179 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
Matt Carlson9974a352007-10-07 23:27:28 -070012180 pci_write_config_dword(tp->pdev,
12181 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012182 pm_reg);
12183
12184 /* Also, force SERR#/PERR# in PCI command. */
12185 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12186 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12187 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12188 }
12189 }
12190
Michael Chan087fe252005-08-09 20:17:41 -070012191 /* 5700 BX chips need to have their TX producer index mailboxes
12192 * written twice to workaround a bug.
12193 */
12194 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
12195 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12196
Linus Torvalds1da177e2005-04-16 15:20:36 -070012197 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12198 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12199 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12200 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12201
12202 /* Chip-specific fixup from Broadcom driver */
12203 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12204 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12205 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12206 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12207 }
12208
Michael Chan1ee582d2005-08-09 20:16:46 -070012209 /* Default fast path register access methods */
Michael Chan20094932005-08-09 20:16:32 -070012210 tp->read32 = tg3_read32;
Michael Chan1ee582d2005-08-09 20:16:46 -070012211 tp->write32 = tg3_write32;
Michael Chan09ee9292005-08-09 20:17:00 -070012212 tp->read32_mbox = tg3_read32;
Michael Chan20094932005-08-09 20:16:32 -070012213 tp->write32_mbox = tg3_write32;
Michael Chan1ee582d2005-08-09 20:16:46 -070012214 tp->write32_tx_mbox = tg3_write32;
12215 tp->write32_rx_mbox = tg3_write32;
12216
12217 /* Various workaround register access methods */
12218 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12219 tp->write32 = tg3_write_indirect_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070012220 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12221 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12222 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12223 /*
12224 * Back to back register writes can cause problems on these
12225 * chips, the workaround is to read back all reg writes
12226 * except those to mailbox regs.
12227 *
12228 * See tg3_write_indirect_reg32().
12229 */
Michael Chan1ee582d2005-08-09 20:16:46 -070012230 tp->write32 = tg3_write_flush_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070012231 }
12232
Michael Chan1ee582d2005-08-09 20:16:46 -070012233
12234 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12235 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12236 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12237 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12238 tp->write32_rx_mbox = tg3_write_flush_reg32;
12239 }
Michael Chan20094932005-08-09 20:16:32 -070012240
Michael Chan68929142005-08-09 20:17:14 -070012241 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12242 tp->read32 = tg3_read_indirect_reg32;
12243 tp->write32 = tg3_write_indirect_reg32;
12244 tp->read32_mbox = tg3_read_indirect_mbox;
12245 tp->write32_mbox = tg3_write_indirect_mbox;
12246 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12247 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12248
12249 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070012250 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070012251
12252 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12253 pci_cmd &= ~PCI_COMMAND_MEMORY;
12254 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12255 }
Michael Chanb5d37722006-09-27 16:06:21 -070012256 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12257 tp->read32_mbox = tg3_read32_mbox_5906;
12258 tp->write32_mbox = tg3_write32_mbox_5906;
12259 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12260 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12261 }
Michael Chan68929142005-08-09 20:17:14 -070012262
Michael Chanbbadf502006-04-06 21:46:34 -070012263 if (tp->write32 == tg3_write_indirect_reg32 ||
12264 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12265 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
David S. Millerf49639e2006-06-09 11:58:36 -070012266 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
Michael Chanbbadf502006-04-06 21:46:34 -070012267 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12268
Michael Chan7d0c41e2005-04-21 17:06:20 -070012269 /* Get eeprom hw config before calling tg3_set_power_state().
Michael Chan9d26e212006-12-07 00:21:14 -080012270 * In particular, the TG3_FLG2_IS_NIC flag must be
Michael Chan7d0c41e2005-04-21 17:06:20 -070012271 * determined before calling tg3_set_power_state() so that
12272 * we know whether or not to switch out of Vaux power.
12273 * When the flag is set, it means that GPIO1 is used for eeprom
12274 * write protect and also implies that it is a LOM where GPIOs
12275 * are not used to switch power.
Jeff Garzik6aa20a22006-09-13 13:24:59 -040012276 */
Michael Chan7d0c41e2005-04-21 17:06:20 -070012277 tg3_get_eeprom_hw_cfg(tp);
12278
Matt Carlson0d3031d2007-10-10 18:02:43 -070012279 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12280 /* Allow reads and writes to the
12281 * APE register and memory space.
12282 */
12283 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12284 PCISTATE_ALLOW_APE_SHMEM_WR;
12285 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12286 pci_state_reg);
12287 }
12288
Matt Carlson9936bcf2007-10-10 18:03:07 -070012289 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012290 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlsonbcb37f62008-11-03 16:52:09 -080012291 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsond30cdd22007-10-07 23:28:35 -070012292 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12293
Michael Chan314fba32005-04-21 17:07:04 -070012294 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12295 * GPIO1 driven high will bring 5700's external PHY out of reset.
12296 * It is also used as eeprom write protect on LOMs.
12297 */
12298 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12299 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12300 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12301 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12302 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan3e7d83b2005-04-21 17:10:36 -070012303 /* Unused GPIO3 must be driven as output on 5752 because there
12304 * are no pull-up resistors on unused GPIO pins.
12305 */
12306 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12307 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chan314fba32005-04-21 17:07:04 -070012308
Michael Chanaf36e6b2006-03-23 01:28:06 -080012309 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12310 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12311
Matt Carlson5f0c4a32008-06-09 15:41:12 -070012312 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12313 /* Turn off the debug UART. */
12314 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12315 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12316 /* Keep VMain power. */
12317 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12318 GRC_LCLCTRL_GPIO_OUTPUT0;
12319 }
12320
Linus Torvalds1da177e2005-04-16 15:20:36 -070012321 /* Force the chip into D0. */
Michael Chanbc1c7562006-03-20 17:48:03 -080012322 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012323 if (err) {
12324 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12325 pci_name(tp->pdev));
12326 return err;
12327 }
12328
12329 /* 5700 B0 chips do not support checksumming correctly due
12330 * to hardware bugs.
12331 */
12332 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12333 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12334
Linus Torvalds1da177e2005-04-16 15:20:36 -070012335 /* Derive initial jumbo mode from MTU assigned in
12336 * ether_setup() via the alloc_etherdev() call
12337 */
Michael Chan0f893dc2005-07-25 12:30:38 -070012338 if (tp->dev->mtu > ETH_DATA_LEN &&
Michael Chana4e2b342005-10-26 15:46:52 -070012339 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan0f893dc2005-07-25 12:30:38 -070012340 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012341
12342 /* Determine WakeOnLan speed to use. */
12343 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12344 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12345 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12346 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12347 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12348 } else {
12349 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12350 }
12351
12352 /* A few boards don't want Ethernet@WireSpeed phy feature */
12353 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12354 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12355 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
Michael Chan747e8f82005-07-25 12:33:22 -070012356 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
Michael Chanb5d37722006-09-27 16:06:21 -070012357 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
Michael Chan747e8f82005-07-25 12:33:22 -070012358 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
Linus Torvalds1da177e2005-04-16 15:20:36 -070012359 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12360
12361 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12362 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12363 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12364 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12365 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12366
Michael Chanc424cb22006-04-29 18:56:34 -070012367 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12368 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012369 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012370 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12371 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
Michael Chand4011ad2007-02-13 12:17:25 -080012372 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12373 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12374 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
Michael Chanc1d2a192007-01-08 19:57:20 -080012375 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12376 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
Matt Carlson57e69832008-05-25 23:48:31 -070012377 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12378 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
Michael Chanc424cb22006-04-29 18:56:34 -070012379 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12380 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012381
Matt Carlsonb2a5c192008-04-03 21:44:44 -070012382 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12383 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12384 tp->phy_otp = tg3_read_otp_phycfg(tp);
12385 if (tp->phy_otp == 0)
12386 tp->phy_otp = TG3_OTP_DEFAULT;
12387 }
12388
Matt Carlsonf51f3562008-05-25 23:45:08 -070012389 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
Matt Carlson8ef21422008-05-02 16:47:53 -070012390 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12391 else
12392 tp->mi_mode = MAC_MI_MODE_BASE;
12393
Linus Torvalds1da177e2005-04-16 15:20:36 -070012394 tp->coalesce_mode = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012395 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12396 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12397 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12398
Matt Carlson57e69832008-05-25 23:48:31 -070012399 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12400 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12401
Matt Carlson158d7ab2008-05-29 01:37:54 -070012402 err = tg3_mdio_init(tp);
12403 if (err)
12404 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012405
12406 /* Initialize data/descriptor byte/word swapping. */
12407 val = tr32(GRC_MODE);
12408 val &= GRC_MODE_HOST_STACKUP;
12409 tw32(GRC_MODE, val | tp->grc_mode);
12410
12411 tg3_switch_clocks(tp);
12412
12413 /* Clear this out for sanity. */
12414 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12415
12416 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12417 &pci_state_reg);
12418 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12419 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12420 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12421
12422 if (chiprevid == CHIPREV_ID_5701_A0 ||
12423 chiprevid == CHIPREV_ID_5701_B0 ||
12424 chiprevid == CHIPREV_ID_5701_B2 ||
12425 chiprevid == CHIPREV_ID_5701_B5) {
12426 void __iomem *sram_base;
12427
12428 /* Write some dummy words into the SRAM status block
12429 * area, see if it reads back correctly. If the return
12430 * value is bad, force enable the PCIX workaround.
12431 */
12432 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12433
12434 writel(0x00000000, sram_base);
12435 writel(0x00000000, sram_base + 4);
12436 writel(0xffffffff, sram_base + 4);
12437 if (readl(sram_base) != 0x00000000)
12438 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12439 }
12440 }
12441
12442 udelay(50);
12443 tg3_nvram_init(tp);
12444
12445 grc_misc_cfg = tr32(GRC_MISC_CFG);
12446 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12447
Linus Torvalds1da177e2005-04-16 15:20:36 -070012448 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12449 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12450 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12451 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12452
David S. Millerfac9b832005-05-18 22:46:34 -070012453 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12454 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12455 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12456 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12457 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12458 HOSTCC_MODE_CLRTICK_TXBD);
12459
12460 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12461 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12462 tp->misc_host_ctrl);
12463 }
12464
Matt Carlson3bda1252008-08-15 14:08:22 -070012465 /* Preserve the APE MAC_MODE bits */
12466 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12467 tp->mac_mode = tr32(MAC_MODE) |
12468 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12469 else
12470 tp->mac_mode = TG3_DEF_MAC_MODE;
12471
Linus Torvalds1da177e2005-04-16 15:20:36 -070012472 /* these are limited to 10/100 only */
12473 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12474 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12475 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12476 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12477 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12478 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12479 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12480 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12481 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
Michael Chan676917d2006-12-07 00:20:22 -080012482 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12483 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
Michael Chanb5d37722006-09-27 16:06:21 -070012484 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012485 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12486
12487 err = tg3_phy_probe(tp);
12488 if (err) {
12489 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12490 pci_name(tp->pdev), err);
12491 /* ... but do not return immediately ... */
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070012492 tg3_mdio_fini(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012493 }
12494
12495 tg3_read_partno(tp);
Michael Chanc4e65752006-03-20 22:29:32 -080012496 tg3_read_fw_ver(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012497
12498 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12499 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12500 } else {
12501 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12502 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12503 else
12504 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12505 }
12506
12507 /* 5700 {AX,BX} chips have a broken status block link
12508 * change bit implementation, so we must use the
12509 * status register in those cases.
12510 */
12511 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12512 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12513 else
12514 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12515
12516 /* The led_ctrl is set during tg3_phy_probe, here we might
12517 * have to force the link status polling mechanism based
12518 * upon subsystem IDs.
12519 */
12520 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
Michael Chan007a880d2007-05-31 14:49:51 -070012521 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070012522 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12523 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12524 TG3_FLAG_USE_LINKCHG_REG);
12525 }
12526
12527 /* For all SERDES we poll the MAC status register. */
12528 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12529 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12530 else
12531 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12532
Michael Chan5a6f3072006-03-20 22:28:05 -080012533 /* All chips before 5787 can get confused if TX buffers
Linus Torvalds1da177e2005-04-16 15:20:36 -070012534 * straddle the 4GB address boundary in some cases.
12535 */
Michael Chanaf36e6b2006-03-23 01:28:06 -080012536 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012537 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012538 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012539 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012540 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012541 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Michael Chan5a6f3072006-03-20 22:28:05 -080012542 tp->dev->hard_start_xmit = tg3_start_xmit;
12543 else
12544 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012545
12546 tp->rx_offset = 2;
12547 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12548 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12549 tp->rx_offset = 0;
12550
Michael Chanf92905d2006-06-29 20:14:29 -070012551 tp->rx_std_max_post = TG3_RX_RING_SIZE;
12552
12553 /* Increment the rx prod index on the rx std ring by at most
12554 * 8 for these chips to workaround hw errata.
12555 */
12556 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12557 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12558 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12559 tp->rx_std_max_post = 8;
12560
Matt Carlson8ed5d972007-05-07 00:25:49 -070012561 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12562 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12563 PCIE_PWR_MGMT_L1_THRESH_MSK;
12564
Linus Torvalds1da177e2005-04-16 15:20:36 -070012565 return err;
12566}
12567
David S. Miller49b6e95f2007-03-29 01:38:42 -070012568#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012569static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12570{
12571 struct net_device *dev = tp->dev;
12572 struct pci_dev *pdev = tp->pdev;
David S. Miller49b6e95f2007-03-29 01:38:42 -070012573 struct device_node *dp = pci_device_to_OF_node(pdev);
David S. Miller374d4ca2007-03-29 01:57:57 -070012574 const unsigned char *addr;
David S. Miller49b6e95f2007-03-29 01:38:42 -070012575 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012576
David S. Miller49b6e95f2007-03-29 01:38:42 -070012577 addr = of_get_property(dp, "local-mac-address", &len);
12578 if (addr && len == 6) {
12579 memcpy(dev->dev_addr, addr, 6);
12580 memcpy(dev->perm_addr, dev->dev_addr, 6);
12581 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012582 }
12583 return -ENODEV;
12584}
12585
12586static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12587{
12588 struct net_device *dev = tp->dev;
12589
12590 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
John W. Linville2ff43692005-09-12 14:44:20 -070012591 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012592 return 0;
12593}
12594#endif
12595
12596static int __devinit tg3_get_device_address(struct tg3 *tp)
12597{
12598 struct net_device *dev = tp->dev;
12599 u32 hi, lo, mac_offset;
Michael Chan008652b2006-03-27 23:14:53 -080012600 int addr_ok = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012601
David S. Miller49b6e95f2007-03-29 01:38:42 -070012602#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012603 if (!tg3_get_macaddr_sparc(tp))
12604 return 0;
12605#endif
12606
12607 mac_offset = 0x7c;
David S. Millerf49639e2006-06-09 11:58:36 -070012608 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
Michael Chana4e2b342005-10-26 15:46:52 -070012609 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012610 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12611 mac_offset = 0xcc;
12612 if (tg3_nvram_lock(tp))
12613 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12614 else
12615 tg3_nvram_unlock(tp);
12616 }
Michael Chanb5d37722006-09-27 16:06:21 -070012617 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12618 mac_offset = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012619
12620 /* First try to get it from MAC address mailbox. */
12621 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12622 if ((hi >> 16) == 0x484b) {
12623 dev->dev_addr[0] = (hi >> 8) & 0xff;
12624 dev->dev_addr[1] = (hi >> 0) & 0xff;
12625
12626 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12627 dev->dev_addr[2] = (lo >> 24) & 0xff;
12628 dev->dev_addr[3] = (lo >> 16) & 0xff;
12629 dev->dev_addr[4] = (lo >> 8) & 0xff;
12630 dev->dev_addr[5] = (lo >> 0) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012631
Michael Chan008652b2006-03-27 23:14:53 -080012632 /* Some old bootcode may report a 0 MAC address in SRAM */
12633 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12634 }
12635 if (!addr_ok) {
12636 /* Next, try NVRAM. */
David S. Millerf49639e2006-06-09 11:58:36 -070012637 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
Michael Chan008652b2006-03-27 23:14:53 -080012638 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12639 dev->dev_addr[0] = ((hi >> 16) & 0xff);
12640 dev->dev_addr[1] = ((hi >> 24) & 0xff);
12641 dev->dev_addr[2] = ((lo >> 0) & 0xff);
12642 dev->dev_addr[3] = ((lo >> 8) & 0xff);
12643 dev->dev_addr[4] = ((lo >> 16) & 0xff);
12644 dev->dev_addr[5] = ((lo >> 24) & 0xff);
12645 }
12646 /* Finally just fetch it out of the MAC control regs. */
12647 else {
12648 hi = tr32(MAC_ADDR_0_HIGH);
12649 lo = tr32(MAC_ADDR_0_LOW);
12650
12651 dev->dev_addr[5] = lo & 0xff;
12652 dev->dev_addr[4] = (lo >> 8) & 0xff;
12653 dev->dev_addr[3] = (lo >> 16) & 0xff;
12654 dev->dev_addr[2] = (lo >> 24) & 0xff;
12655 dev->dev_addr[1] = hi & 0xff;
12656 dev->dev_addr[0] = (hi >> 8) & 0xff;
12657 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012658 }
12659
12660 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
David S. Miller7582a332008-03-20 15:53:15 -070012661#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012662 if (!tg3_get_default_macaddr_sparc(tp))
12663 return 0;
12664#endif
12665 return -EINVAL;
12666 }
John W. Linville2ff43692005-09-12 14:44:20 -070012667 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012668 return 0;
12669}
12670
David S. Miller59e6b432005-05-18 22:50:10 -070012671#define BOUNDARY_SINGLE_CACHELINE 1
12672#define BOUNDARY_MULTI_CACHELINE 2
12673
12674static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12675{
12676 int cacheline_size;
12677 u8 byte;
12678 int goal;
12679
12680 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12681 if (byte == 0)
12682 cacheline_size = 1024;
12683 else
12684 cacheline_size = (int) byte * 4;
12685
12686 /* On 5703 and later chips, the boundary bits have no
12687 * effect.
12688 */
12689 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12690 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12691 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12692 goto out;
12693
12694#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12695 goal = BOUNDARY_MULTI_CACHELINE;
12696#else
12697#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12698 goal = BOUNDARY_SINGLE_CACHELINE;
12699#else
12700 goal = 0;
12701#endif
12702#endif
12703
12704 if (!goal)
12705 goto out;
12706
12707 /* PCI controllers on most RISC systems tend to disconnect
12708 * when a device tries to burst across a cache-line boundary.
12709 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12710 *
12711 * Unfortunately, for PCI-E there are only limited
12712 * write-side controls for this, and thus for reads
12713 * we will still get the disconnects. We'll also waste
12714 * these PCI cycles for both read and write for chips
12715 * other than 5700 and 5701 which do not implement the
12716 * boundary bits.
12717 */
12718 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12719 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12720 switch (cacheline_size) {
12721 case 16:
12722 case 32:
12723 case 64:
12724 case 128:
12725 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12726 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12727 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12728 } else {
12729 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12730 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12731 }
12732 break;
12733
12734 case 256:
12735 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12736 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12737 break;
12738
12739 default:
12740 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12741 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12742 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012743 }
David S. Miller59e6b432005-05-18 22:50:10 -070012744 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12745 switch (cacheline_size) {
12746 case 16:
12747 case 32:
12748 case 64:
12749 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12750 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12751 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12752 break;
12753 }
12754 /* fallthrough */
12755 case 128:
12756 default:
12757 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12758 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12759 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012760 }
David S. Miller59e6b432005-05-18 22:50:10 -070012761 } else {
12762 switch (cacheline_size) {
12763 case 16:
12764 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12765 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12766 DMA_RWCTRL_WRITE_BNDRY_16);
12767 break;
12768 }
12769 /* fallthrough */
12770 case 32:
12771 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12772 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12773 DMA_RWCTRL_WRITE_BNDRY_32);
12774 break;
12775 }
12776 /* fallthrough */
12777 case 64:
12778 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12779 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12780 DMA_RWCTRL_WRITE_BNDRY_64);
12781 break;
12782 }
12783 /* fallthrough */
12784 case 128:
12785 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12786 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12787 DMA_RWCTRL_WRITE_BNDRY_128);
12788 break;
12789 }
12790 /* fallthrough */
12791 case 256:
12792 val |= (DMA_RWCTRL_READ_BNDRY_256 |
12793 DMA_RWCTRL_WRITE_BNDRY_256);
12794 break;
12795 case 512:
12796 val |= (DMA_RWCTRL_READ_BNDRY_512 |
12797 DMA_RWCTRL_WRITE_BNDRY_512);
12798 break;
12799 case 1024:
12800 default:
12801 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12802 DMA_RWCTRL_WRITE_BNDRY_1024);
12803 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012804 }
David S. Miller59e6b432005-05-18 22:50:10 -070012805 }
12806
12807out:
12808 return val;
12809}
12810
Linus Torvalds1da177e2005-04-16 15:20:36 -070012811static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12812{
12813 struct tg3_internal_buffer_desc test_desc;
12814 u32 sram_dma_descs;
12815 int i, ret;
12816
12817 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12818
12819 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12820 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12821 tw32(RDMAC_STATUS, 0);
12822 tw32(WDMAC_STATUS, 0);
12823
12824 tw32(BUFMGR_MODE, 0);
12825 tw32(FTQ_RESET, 0);
12826
12827 test_desc.addr_hi = ((u64) buf_dma) >> 32;
12828 test_desc.addr_lo = buf_dma & 0xffffffff;
12829 test_desc.nic_mbuf = 0x00002100;
12830 test_desc.len = size;
12831
12832 /*
12833 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12834 * the *second* time the tg3 driver was getting loaded after an
12835 * initial scan.
12836 *
12837 * Broadcom tells me:
12838 * ...the DMA engine is connected to the GRC block and a DMA
12839 * reset may affect the GRC block in some unpredictable way...
12840 * The behavior of resets to individual blocks has not been tested.
12841 *
12842 * Broadcom noted the GRC reset will also reset all sub-components.
12843 */
12844 if (to_device) {
12845 test_desc.cqid_sqid = (13 << 8) | 2;
12846
12847 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12848 udelay(40);
12849 } else {
12850 test_desc.cqid_sqid = (16 << 8) | 7;
12851
12852 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12853 udelay(40);
12854 }
12855 test_desc.flags = 0x00000005;
12856
12857 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12858 u32 val;
12859
12860 val = *(((u32 *)&test_desc) + i);
12861 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12862 sram_dma_descs + (i * sizeof(u32)));
12863 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12864 }
12865 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12866
12867 if (to_device) {
12868 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12869 } else {
12870 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12871 }
12872
12873 ret = -ENODEV;
12874 for (i = 0; i < 40; i++) {
12875 u32 val;
12876
12877 if (to_device)
12878 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12879 else
12880 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12881 if ((val & 0xffff) == sram_dma_descs) {
12882 ret = 0;
12883 break;
12884 }
12885
12886 udelay(100);
12887 }
12888
12889 return ret;
12890}
12891
David S. Millerded73402005-05-23 13:59:47 -070012892#define TEST_BUFFER_SIZE 0x2000
Linus Torvalds1da177e2005-04-16 15:20:36 -070012893
12894static int __devinit tg3_test_dma(struct tg3 *tp)
12895{
12896 dma_addr_t buf_dma;
David S. Miller59e6b432005-05-18 22:50:10 -070012897 u32 *buf, saved_dma_rwctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012898 int ret;
12899
12900 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12901 if (!buf) {
12902 ret = -ENOMEM;
12903 goto out_nofree;
12904 }
12905
12906 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12907 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12908
David S. Miller59e6b432005-05-18 22:50:10 -070012909 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012910
12911 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12912 /* DMA read watermark not used on PCIE */
12913 tp->dma_rwctrl |= 0x00180000;
12914 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
Michael Chan85e94ce2005-04-21 17:05:28 -070012915 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12916 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012917 tp->dma_rwctrl |= 0x003f0000;
12918 else
12919 tp->dma_rwctrl |= 0x003f000f;
12920 } else {
12921 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12922 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12923 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
Michael Chan49afdeb2007-02-13 12:17:03 -080012924 u32 read_water = 0x7;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012925
Michael Chan4a29cc22006-03-19 13:21:12 -080012926 /* If the 5704 is behind the EPB bridge, we can
12927 * do the less restrictive ONE_DMA workaround for
12928 * better performance.
12929 */
12930 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12931 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12932 tp->dma_rwctrl |= 0x8000;
12933 else if (ccval == 0x6 || ccval == 0x7)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012934 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12935
Michael Chan49afdeb2007-02-13 12:17:03 -080012936 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12937 read_water = 4;
David S. Miller59e6b432005-05-18 22:50:10 -070012938 /* Set bit 23 to enable PCIX hw bug fix */
Michael Chan49afdeb2007-02-13 12:17:03 -080012939 tp->dma_rwctrl |=
12940 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12941 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12942 (1 << 23);
Michael Chan4cf78e42005-07-25 12:29:19 -070012943 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12944 /* 5780 always in PCIX mode */
12945 tp->dma_rwctrl |= 0x00144000;
Michael Chana4e2b342005-10-26 15:46:52 -070012946 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12947 /* 5714 always in PCIX mode */
12948 tp->dma_rwctrl |= 0x00148000;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012949 } else {
12950 tp->dma_rwctrl |= 0x001b000f;
12951 }
12952 }
12953
12954 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12955 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12956 tp->dma_rwctrl &= 0xfffffff0;
12957
12958 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12959 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12960 /* Remove this if it causes problems for some boards. */
12961 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12962
12963 /* On 5700/5701 chips, we need to set this bit.
12964 * Otherwise the chip will issue cacheline transactions
12965 * to streamable DMA memory with not all the byte
12966 * enables turned on. This is an error on several
12967 * RISC PCI controllers, in particular sparc64.
12968 *
12969 * On 5703/5704 chips, this bit has been reassigned
12970 * a different meaning. In particular, it is used
12971 * on those chips to enable a PCI-X workaround.
12972 */
12973 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12974 }
12975
12976 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12977
12978#if 0
12979 /* Unneeded, already done by tg3_get_invariants. */
12980 tg3_switch_clocks(tp);
12981#endif
12982
12983 ret = 0;
12984 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12985 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12986 goto out;
12987
David S. Miller59e6b432005-05-18 22:50:10 -070012988 /* It is best to perform DMA test with maximum write burst size
12989 * to expose the 5700/5701 write DMA bug.
12990 */
12991 saved_dma_rwctrl = tp->dma_rwctrl;
12992 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12993 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12994
Linus Torvalds1da177e2005-04-16 15:20:36 -070012995 while (1) {
12996 u32 *p = buf, i;
12997
12998 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12999 p[i] = i;
13000
13001 /* Send the buffer to the chip. */
13002 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13003 if (ret) {
13004 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
13005 break;
13006 }
13007
13008#if 0
13009 /* validate data reached card RAM correctly. */
13010 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13011 u32 val;
13012 tg3_read_mem(tp, 0x2100 + (i*4), &val);
13013 if (le32_to_cpu(val) != p[i]) {
13014 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
13015 /* ret = -ENODEV here? */
13016 }
13017 p[i] = 0;
13018 }
13019#endif
13020 /* Now read it back. */
13021 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13022 if (ret) {
13023 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13024
13025 break;
13026 }
13027
13028 /* Verify it. */
13029 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13030 if (p[i] == i)
13031 continue;
13032
David S. Miller59e6b432005-05-18 22:50:10 -070013033 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13034 DMA_RWCTRL_WRITE_BNDRY_16) {
13035 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013036 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13037 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13038 break;
13039 } else {
13040 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13041 ret = -ENODEV;
13042 goto out;
13043 }
13044 }
13045
13046 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13047 /* Success. */
13048 ret = 0;
13049 break;
13050 }
13051 }
David S. Miller59e6b432005-05-18 22:50:10 -070013052 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13053 DMA_RWCTRL_WRITE_BNDRY_16) {
Michael Chan6d1cfba2005-06-08 14:13:14 -070013054 static struct pci_device_id dma_wait_state_chipsets[] = {
13055 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13056 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13057 { },
13058 };
13059
David S. Miller59e6b432005-05-18 22:50:10 -070013060 /* DMA test passed without adjusting DMA boundary,
Michael Chan6d1cfba2005-06-08 14:13:14 -070013061 * now look for chipsets that are known to expose the
13062 * DMA bug without failing the test.
David S. Miller59e6b432005-05-18 22:50:10 -070013063 */
Michael Chan6d1cfba2005-06-08 14:13:14 -070013064 if (pci_dev_present(dma_wait_state_chipsets)) {
13065 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13066 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13067 }
13068 else
13069 /* Safe to use the calculated DMA boundary. */
13070 tp->dma_rwctrl = saved_dma_rwctrl;
13071
David S. Miller59e6b432005-05-18 22:50:10 -070013072 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13073 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013074
13075out:
13076 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13077out_nofree:
13078 return ret;
13079}
13080
13081static void __devinit tg3_init_link_config(struct tg3 *tp)
13082{
13083 tp->link_config.advertising =
13084 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13085 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13086 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13087 ADVERTISED_Autoneg | ADVERTISED_MII);
13088 tp->link_config.speed = SPEED_INVALID;
13089 tp->link_config.duplex = DUPLEX_INVALID;
13090 tp->link_config.autoneg = AUTONEG_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013091 tp->link_config.active_speed = SPEED_INVALID;
13092 tp->link_config.active_duplex = DUPLEX_INVALID;
13093 tp->link_config.phy_is_low_power = 0;
13094 tp->link_config.orig_speed = SPEED_INVALID;
13095 tp->link_config.orig_duplex = DUPLEX_INVALID;
13096 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13097}
13098
13099static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13100{
Michael Chanfdfec172005-07-25 12:31:48 -070013101 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13102 tp->bufmgr_config.mbuf_read_dma_low_water =
13103 DEFAULT_MB_RDMA_LOW_WATER_5705;
13104 tp->bufmgr_config.mbuf_mac_rx_low_water =
13105 DEFAULT_MB_MACRX_LOW_WATER_5705;
13106 tp->bufmgr_config.mbuf_high_water =
13107 DEFAULT_MB_HIGH_WATER_5705;
Michael Chanb5d37722006-09-27 16:06:21 -070013108 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13109 tp->bufmgr_config.mbuf_mac_rx_low_water =
13110 DEFAULT_MB_MACRX_LOW_WATER_5906;
13111 tp->bufmgr_config.mbuf_high_water =
13112 DEFAULT_MB_HIGH_WATER_5906;
13113 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013114
Michael Chanfdfec172005-07-25 12:31:48 -070013115 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13116 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13117 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13118 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13119 tp->bufmgr_config.mbuf_high_water_jumbo =
13120 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13121 } else {
13122 tp->bufmgr_config.mbuf_read_dma_low_water =
13123 DEFAULT_MB_RDMA_LOW_WATER;
13124 tp->bufmgr_config.mbuf_mac_rx_low_water =
13125 DEFAULT_MB_MACRX_LOW_WATER;
13126 tp->bufmgr_config.mbuf_high_water =
13127 DEFAULT_MB_HIGH_WATER;
13128
13129 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13130 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13131 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13132 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13133 tp->bufmgr_config.mbuf_high_water_jumbo =
13134 DEFAULT_MB_HIGH_WATER_JUMBO;
13135 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013136
13137 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13138 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13139}
13140
13141static char * __devinit tg3_phy_string(struct tg3 *tp)
13142{
13143 switch (tp->phy_id & PHY_ID_MASK) {
13144 case PHY_ID_BCM5400: return "5400";
13145 case PHY_ID_BCM5401: return "5401";
13146 case PHY_ID_BCM5411: return "5411";
13147 case PHY_ID_BCM5701: return "5701";
13148 case PHY_ID_BCM5703: return "5703";
13149 case PHY_ID_BCM5704: return "5704";
13150 case PHY_ID_BCM5705: return "5705";
13151 case PHY_ID_BCM5750: return "5750";
Michael Chan85e94ce2005-04-21 17:05:28 -070013152 case PHY_ID_BCM5752: return "5752";
Michael Chana4e2b342005-10-26 15:46:52 -070013153 case PHY_ID_BCM5714: return "5714";
Michael Chan4cf78e42005-07-25 12:29:19 -070013154 case PHY_ID_BCM5780: return "5780";
Michael Chanaf36e6b2006-03-23 01:28:06 -080013155 case PHY_ID_BCM5755: return "5755";
Michael Chand9ab5ad2006-03-20 22:27:35 -080013156 case PHY_ID_BCM5787: return "5787";
Matt Carlsond30cdd22007-10-07 23:28:35 -070013157 case PHY_ID_BCM5784: return "5784";
Michael Chan126a3362006-09-27 16:03:07 -070013158 case PHY_ID_BCM5756: return "5722/5756";
Michael Chanb5d37722006-09-27 16:06:21 -070013159 case PHY_ID_BCM5906: return "5906";
Matt Carlson9936bcf2007-10-10 18:03:07 -070013160 case PHY_ID_BCM5761: return "5761";
Linus Torvalds1da177e2005-04-16 15:20:36 -070013161 case PHY_ID_BCM8002: return "8002/serdes";
13162 case 0: return "serdes";
13163 default: return "unknown";
Stephen Hemminger855e1112008-04-16 16:37:28 -070013164 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013165}
13166
Michael Chanf9804dd2005-09-27 12:13:10 -070013167static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13168{
13169 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13170 strcpy(str, "PCI Express");
13171 return str;
13172 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13173 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13174
13175 strcpy(str, "PCIX:");
13176
13177 if ((clock_ctrl == 7) ||
13178 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13179 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13180 strcat(str, "133MHz");
13181 else if (clock_ctrl == 0)
13182 strcat(str, "33MHz");
13183 else if (clock_ctrl == 2)
13184 strcat(str, "50MHz");
13185 else if (clock_ctrl == 4)
13186 strcat(str, "66MHz");
13187 else if (clock_ctrl == 6)
13188 strcat(str, "100MHz");
Michael Chanf9804dd2005-09-27 12:13:10 -070013189 } else {
13190 strcpy(str, "PCI:");
13191 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13192 strcat(str, "66MHz");
13193 else
13194 strcat(str, "33MHz");
13195 }
13196 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13197 strcat(str, ":32-bit");
13198 else
13199 strcat(str, ":64-bit");
13200 return str;
13201}
13202
Michael Chan8c2dc7e2005-12-19 16:26:02 -080013203static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013204{
13205 struct pci_dev *peer;
13206 unsigned int func, devnr = tp->pdev->devfn & ~7;
13207
13208 for (func = 0; func < 8; func++) {
13209 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13210 if (peer && peer != tp->pdev)
13211 break;
13212 pci_dev_put(peer);
13213 }
Michael Chan16fe9d72005-12-13 21:09:54 -080013214 /* 5704 can be configured in single-port mode, set peer to
13215 * tp->pdev in that case.
13216 */
13217 if (!peer) {
13218 peer = tp->pdev;
13219 return peer;
13220 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013221
13222 /*
13223 * We don't need to keep the refcount elevated; there's no way
13224 * to remove one half of this device without removing the other
13225 */
13226 pci_dev_put(peer);
13227
13228 return peer;
13229}
13230
David S. Miller15f98502005-05-18 22:49:26 -070013231static void __devinit tg3_init_coal(struct tg3 *tp)
13232{
13233 struct ethtool_coalesce *ec = &tp->coal;
13234
13235 memset(ec, 0, sizeof(*ec));
13236 ec->cmd = ETHTOOL_GCOALESCE;
13237 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13238 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13239 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13240 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13241 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13242 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13243 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13244 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13245 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13246
13247 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13248 HOSTCC_MODE_CLRTICK_TXBD)) {
13249 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13250 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13251 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13252 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13253 }
Michael Chand244c892005-07-05 14:42:33 -070013254
13255 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13256 ec->rx_coalesce_usecs_irq = 0;
13257 ec->tx_coalesce_usecs_irq = 0;
13258 ec->stats_block_coalesce_usecs = 0;
13259 }
David S. Miller15f98502005-05-18 22:49:26 -070013260}
13261
Linus Torvalds1da177e2005-04-16 15:20:36 -070013262static int __devinit tg3_init_one(struct pci_dev *pdev,
13263 const struct pci_device_id *ent)
13264{
13265 static int tg3_version_printed = 0;
Matt Carlson63532392008-11-03 16:49:57 -080013266 resource_size_t tg3reg_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013267 struct net_device *dev;
13268 struct tg3 *tp;
Joe Perchesd6645372007-12-20 04:06:59 -080013269 int err, pm_cap;
Michael Chanf9804dd2005-09-27 12:13:10 -070013270 char str[40];
Michael Chan72f2afb2006-03-06 19:28:35 -080013271 u64 dma_mask, persist_dma_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013272
13273 if (tg3_version_printed++ == 0)
13274 printk(KERN_INFO "%s", version);
13275
13276 err = pci_enable_device(pdev);
13277 if (err) {
13278 printk(KERN_ERR PFX "Cannot enable PCI device, "
13279 "aborting.\n");
13280 return err;
13281 }
13282
Matt Carlson63532392008-11-03 16:49:57 -080013283 if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013284 printk(KERN_ERR PFX "Cannot find proper PCI device "
13285 "base address, aborting.\n");
13286 err = -ENODEV;
13287 goto err_out_disable_pdev;
13288 }
13289
13290 err = pci_request_regions(pdev, DRV_MODULE_NAME);
13291 if (err) {
13292 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13293 "aborting.\n");
13294 goto err_out_disable_pdev;
13295 }
13296
13297 pci_set_master(pdev);
13298
13299 /* Find power-management capability. */
13300 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13301 if (pm_cap == 0) {
13302 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13303 "aborting.\n");
13304 err = -EIO;
13305 goto err_out_free_res;
13306 }
13307
Linus Torvalds1da177e2005-04-16 15:20:36 -070013308 dev = alloc_etherdev(sizeof(*tp));
13309 if (!dev) {
13310 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13311 err = -ENOMEM;
13312 goto err_out_free_res;
13313 }
13314
Linus Torvalds1da177e2005-04-16 15:20:36 -070013315 SET_NETDEV_DEV(dev, &pdev->dev);
13316
Linus Torvalds1da177e2005-04-16 15:20:36 -070013317#if TG3_VLAN_TAG_USED
13318 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13319 dev->vlan_rx_register = tg3_vlan_rx_register;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013320#endif
13321
13322 tp = netdev_priv(dev);
13323 tp->pdev = pdev;
13324 tp->dev = dev;
13325 tp->pm_cap = pm_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013326 tp->rx_mode = TG3_DEF_RX_MODE;
13327 tp->tx_mode = TG3_DEF_TX_MODE;
Matt Carlson8ef21422008-05-02 16:47:53 -070013328
Linus Torvalds1da177e2005-04-16 15:20:36 -070013329 if (tg3_debug > 0)
13330 tp->msg_enable = tg3_debug;
13331 else
13332 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13333
13334 /* The word/byte swap controls here control register access byte
13335 * swapping. DMA data byte swapping is controlled in the GRC_MODE
13336 * setting below.
13337 */
13338 tp->misc_host_ctrl =
13339 MISC_HOST_CTRL_MASK_PCI_INT |
13340 MISC_HOST_CTRL_WORD_SWAP |
13341 MISC_HOST_CTRL_INDIR_ACCESS |
13342 MISC_HOST_CTRL_PCISTATE_RW;
13343
13344 /* The NONFRM (non-frame) byte/word swap controls take effect
13345 * on descriptor entries, anything which isn't packet data.
13346 *
13347 * The StrongARM chips on the board (one for tx, one for rx)
13348 * are running in big-endian mode.
13349 */
13350 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13351 GRC_MODE_WSWAP_NONFRM_DATA);
13352#ifdef __BIG_ENDIAN
13353 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13354#endif
13355 spin_lock_init(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013356 spin_lock_init(&tp->indirect_lock);
David Howellsc4028952006-11-22 14:57:56 +000013357 INIT_WORK(&tp->reset_task, tg3_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013358
Matt Carlson63532392008-11-03 16:49:57 -080013359 dev->mem_start = pci_resource_start(pdev, BAR_0);
13360 tg3reg_len = pci_resource_len(pdev, BAR_0);
13361 dev->mem_end = dev->mem_start + tg3reg_len;
13362
13363 tp->regs = ioremap_nocache(dev->mem_start, tg3reg_len);
Andy Gospodarekab0049b2007-09-06 20:42:14 +010013364 if (!tp->regs) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013365 printk(KERN_ERR PFX "Cannot map device registers, "
13366 "aborting.\n");
13367 err = -ENOMEM;
13368 goto err_out_free_dev;
13369 }
13370
13371 tg3_init_link_config(tp);
13372
Linus Torvalds1da177e2005-04-16 15:20:36 -070013373 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13374 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13375 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13376
13377 dev->open = tg3_open;
13378 dev->stop = tg3_close;
13379 dev->get_stats = tg3_get_stats;
13380 dev->set_multicast_list = tg3_set_rx_mode;
13381 dev->set_mac_address = tg3_set_mac_addr;
13382 dev->do_ioctl = tg3_ioctl;
13383 dev->tx_timeout = tg3_tx_timeout;
Stephen Hemmingerbea33482007-10-03 16:41:36 -070013384 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013385 dev->ethtool_ops = &tg3_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013386 dev->watchdog_timeo = TG3_TX_TIMEOUT;
13387 dev->change_mtu = tg3_change_mtu;
13388 dev->irq = pdev->irq;
13389#ifdef CONFIG_NET_POLL_CONTROLLER
13390 dev->poll_controller = tg3_poll_controller;
13391#endif
13392
13393 err = tg3_get_invariants(tp);
13394 if (err) {
13395 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13396 "aborting.\n");
13397 goto err_out_iounmap;
13398 }
13399
Michael Chan4a29cc22006-03-19 13:21:12 -080013400 /* The EPB bridge inside 5714, 5715, and 5780 and any
13401 * device behind the EPB cannot support DMA addresses > 40-bit.
Michael Chan72f2afb2006-03-06 19:28:35 -080013402 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13403 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13404 * do DMA address check in tg3_start_xmit().
13405 */
Michael Chan4a29cc22006-03-19 13:21:12 -080013406 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13407 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13408 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
Michael Chan72f2afb2006-03-06 19:28:35 -080013409 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13410#ifdef CONFIG_HIGHMEM
13411 dma_mask = DMA_64BIT_MASK;
13412#endif
Michael Chan4a29cc22006-03-19 13:21:12 -080013413 } else
Michael Chan72f2afb2006-03-06 19:28:35 -080013414 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13415
13416 /* Configure DMA attributes. */
13417 if (dma_mask > DMA_32BIT_MASK) {
13418 err = pci_set_dma_mask(pdev, dma_mask);
13419 if (!err) {
13420 dev->features |= NETIF_F_HIGHDMA;
13421 err = pci_set_consistent_dma_mask(pdev,
13422 persist_dma_mask);
13423 if (err < 0) {
13424 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13425 "DMA for consistent allocations\n");
13426 goto err_out_iounmap;
13427 }
13428 }
13429 }
13430 if (err || dma_mask == DMA_32BIT_MASK) {
13431 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13432 if (err) {
13433 printk(KERN_ERR PFX "No usable DMA configuration, "
13434 "aborting.\n");
13435 goto err_out_iounmap;
13436 }
13437 }
13438
Michael Chanfdfec172005-07-25 12:31:48 -070013439 tg3_init_bufmgr_config(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013440
Linus Torvalds1da177e2005-04-16 15:20:36 -070013441 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13442 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13443 }
13444 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13445 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13446 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
Michael Chanc7835a72006-11-15 21:14:42 -080013447 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -070013448 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13449 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13450 } else {
Michael Chan7f62ad52007-02-20 23:25:40 -080013451 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013452 }
13453
Michael Chan4e3a7aa2006-03-20 17:47:44 -080013454 /* TSO is on by default on chips that support hardware TSO.
13455 * Firmware TSO on older chips gives lower performance, so it
13456 * is off by default, but can be enabled using ethtool.
13457 */
Michael Chanb0026622006-07-03 19:42:14 -070013458 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013459 dev->features |= NETIF_F_TSO;
Michael Chanb5d37722006-09-27 16:06:21 -070013460 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13461 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
Michael Chanb0026622006-07-03 19:42:14 -070013462 dev->features |= NETIF_F_TSO6;
Matt Carlson57e69832008-05-25 23:48:31 -070013463 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13464 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13465 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13466 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlson9936bcf2007-10-10 18:03:07 -070013467 dev->features |= NETIF_F_TSO_ECN;
Michael Chanb0026622006-07-03 19:42:14 -070013468 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013469
Linus Torvalds1da177e2005-04-16 15:20:36 -070013470
13471 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13472 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13473 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13474 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13475 tp->rx_pending = 63;
13476 }
13477
Linus Torvalds1da177e2005-04-16 15:20:36 -070013478 err = tg3_get_device_address(tp);
13479 if (err) {
13480 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13481 "aborting.\n");
13482 goto err_out_iounmap;
13483 }
13484
Matt Carlson0d3031d2007-10-10 18:02:43 -070013485 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
Matt Carlson63532392008-11-03 16:49:57 -080013486 if (!(pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM)) {
Matt Carlson0d3031d2007-10-10 18:02:43 -070013487 printk(KERN_ERR PFX "Cannot find proper PCI device "
13488 "base address for APE, aborting.\n");
13489 err = -ENODEV;
13490 goto err_out_iounmap;
13491 }
13492
Matt Carlson63532392008-11-03 16:49:57 -080013493 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
Al Viro79ea13c2008-01-24 02:06:46 -080013494 if (!tp->aperegs) {
Matt Carlson0d3031d2007-10-10 18:02:43 -070013495 printk(KERN_ERR PFX "Cannot map APE registers, "
13496 "aborting.\n");
13497 err = -ENOMEM;
13498 goto err_out_iounmap;
13499 }
13500
13501 tg3_ape_lock_init(tp);
13502 }
13503
Matt Carlsonc88864d2007-11-12 21:07:01 -080013504 /*
13505 * Reset chip in case UNDI or EFI driver did not shutdown
13506 * DMA self test will enable WDMAC and we'll see (spurious)
13507 * pending DMA on the PCI bus at that point.
13508 */
13509 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13510 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13511 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13512 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13513 }
13514
13515 err = tg3_test_dma(tp);
13516 if (err) {
13517 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13518 goto err_out_apeunmap;
13519 }
13520
13521 /* Tigon3 can do ipv4 only... and some chips have buggy
13522 * checksumming.
13523 */
13524 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13525 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13526 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13527 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13528 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070013529 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13530 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsonc88864d2007-11-12 21:07:01 -080013531 dev->features |= NETIF_F_IPV6_CSUM;
13532
13533 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13534 } else
13535 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13536
13537 /* flow control autonegotiation is default behavior */
13538 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
Matt Carlson8d018622007-12-20 20:05:44 -080013539 tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
Matt Carlsonc88864d2007-11-12 21:07:01 -080013540
13541 tg3_init_coal(tp);
13542
Michael Chanc49a1562006-12-17 17:07:29 -080013543 pci_set_drvdata(pdev, dev);
13544
Linus Torvalds1da177e2005-04-16 15:20:36 -070013545 err = register_netdev(dev);
13546 if (err) {
13547 printk(KERN_ERR PFX "Cannot register net device, "
13548 "aborting.\n");
Matt Carlson0d3031d2007-10-10 18:02:43 -070013549 goto err_out_apeunmap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013550 }
13551
Matt Carlsondf59c942008-11-03 16:52:56 -080013552 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070013553 dev->name,
13554 tp->board_part_number,
13555 tp->pci_chip_rev_id,
Michael Chanf9804dd2005-09-27 12:13:10 -070013556 tg3_bus_string(tp, str),
Johannes Berge1749612008-10-27 15:59:26 -070013557 dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013558
Matt Carlsondf59c942008-11-03 16:52:56 -080013559 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
13560 printk(KERN_INFO
13561 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
13562 tp->dev->name,
13563 tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
13564 tp->mdio_bus->phy_map[PHY_ADDR]->dev.bus_id);
13565 else
13566 printk(KERN_INFO
13567 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
13568 tp->dev->name, tg3_phy_string(tp),
13569 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13570 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13571 "10/100/1000Base-T")),
13572 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
13573
13574 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070013575 dev->name,
13576 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13577 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13578 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13579 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -070013580 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
Michael Chan4a29cc22006-03-19 13:21:12 -080013581 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13582 dev->name, tp->dma_rwctrl,
13583 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13584 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
Linus Torvalds1da177e2005-04-16 15:20:36 -070013585
13586 return 0;
13587
Matt Carlson0d3031d2007-10-10 18:02:43 -070013588err_out_apeunmap:
13589 if (tp->aperegs) {
13590 iounmap(tp->aperegs);
13591 tp->aperegs = NULL;
13592 }
13593
Linus Torvalds1da177e2005-04-16 15:20:36 -070013594err_out_iounmap:
Michael Chan68929142005-08-09 20:17:14 -070013595 if (tp->regs) {
13596 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070013597 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070013598 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013599
13600err_out_free_dev:
13601 free_netdev(dev);
13602
13603err_out_free_res:
13604 pci_release_regions(pdev);
13605
13606err_out_disable_pdev:
13607 pci_disable_device(pdev);
13608 pci_set_drvdata(pdev, NULL);
13609 return err;
13610}
13611
13612static void __devexit tg3_remove_one(struct pci_dev *pdev)
13613{
13614 struct net_device *dev = pci_get_drvdata(pdev);
13615
13616 if (dev) {
13617 struct tg3 *tp = netdev_priv(dev);
13618
Michael Chan7faa0062006-02-02 17:29:28 -080013619 flush_scheduled_work();
Matt Carlson158d7ab2008-05-29 01:37:54 -070013620
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013621 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13622 tg3_phy_fini(tp);
Matt Carlson158d7ab2008-05-29 01:37:54 -070013623 tg3_mdio_fini(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013624 }
Matt Carlson158d7ab2008-05-29 01:37:54 -070013625
Linus Torvalds1da177e2005-04-16 15:20:36 -070013626 unregister_netdev(dev);
Matt Carlson0d3031d2007-10-10 18:02:43 -070013627 if (tp->aperegs) {
13628 iounmap(tp->aperegs);
13629 tp->aperegs = NULL;
13630 }
Michael Chan68929142005-08-09 20:17:14 -070013631 if (tp->regs) {
13632 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070013633 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070013634 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013635 free_netdev(dev);
13636 pci_release_regions(pdev);
13637 pci_disable_device(pdev);
13638 pci_set_drvdata(pdev, NULL);
13639 }
13640}
13641
13642static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13643{
13644 struct net_device *dev = pci_get_drvdata(pdev);
13645 struct tg3 *tp = netdev_priv(dev);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070013646 pci_power_t target_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013647 int err;
13648
Michael Chan3e0c95f2007-08-03 20:56:54 -070013649 /* PCI register 4 needs to be saved whether netif_running() or not.
13650 * MSI address and data need to be saved if using MSI and
13651 * netif_running().
13652 */
13653 pci_save_state(pdev);
13654
Linus Torvalds1da177e2005-04-16 15:20:36 -070013655 if (!netif_running(dev))
13656 return 0;
13657
Michael Chan7faa0062006-02-02 17:29:28 -080013658 flush_scheduled_work();
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013659 tg3_phy_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013660 tg3_netif_stop(tp);
13661
13662 del_timer_sync(&tp->timer);
13663
David S. Millerf47c11e2005-06-24 20:18:35 -070013664 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013665 tg3_disable_ints(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -070013666 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013667
13668 netif_device_detach(dev);
13669
David S. Millerf47c11e2005-06-24 20:18:35 -070013670 tg3_full_lock(tp, 0);
Michael Chan944d9802005-05-29 14:57:48 -070013671 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan6a9eba12005-12-13 21:08:58 -080013672 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
David S. Millerf47c11e2005-06-24 20:18:35 -070013673 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013674
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070013675 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13676
13677 err = tg3_set_power_state(tp, target_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013678 if (err) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013679 int err2;
13680
David S. Millerf47c11e2005-06-24 20:18:35 -070013681 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013682
Michael Chan6a9eba12005-12-13 21:08:58 -080013683 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013684 err2 = tg3_restart_hw(tp, 1);
13685 if (err2)
Michael Chanb9ec6c12006-07-25 16:37:27 -070013686 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013687
13688 tp->timer.expires = jiffies + tp->timer_offset;
13689 add_timer(&tp->timer);
13690
13691 netif_device_attach(dev);
13692 tg3_netif_start(tp);
13693
Michael Chanb9ec6c12006-07-25 16:37:27 -070013694out:
David S. Millerf47c11e2005-06-24 20:18:35 -070013695 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013696
13697 if (!err2)
13698 tg3_phy_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013699 }
13700
13701 return err;
13702}
13703
13704static int tg3_resume(struct pci_dev *pdev)
13705{
13706 struct net_device *dev = pci_get_drvdata(pdev);
13707 struct tg3 *tp = netdev_priv(dev);
13708 int err;
13709
Michael Chan3e0c95f2007-08-03 20:56:54 -070013710 pci_restore_state(tp->pdev);
13711
Linus Torvalds1da177e2005-04-16 15:20:36 -070013712 if (!netif_running(dev))
13713 return 0;
13714
Michael Chanbc1c7562006-03-20 17:48:03 -080013715 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013716 if (err)
13717 return err;
13718
13719 netif_device_attach(dev);
13720
David S. Millerf47c11e2005-06-24 20:18:35 -070013721 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013722
Michael Chan6a9eba12005-12-13 21:08:58 -080013723 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Michael Chanb9ec6c12006-07-25 16:37:27 -070013724 err = tg3_restart_hw(tp, 1);
13725 if (err)
13726 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013727
13728 tp->timer.expires = jiffies + tp->timer_offset;
13729 add_timer(&tp->timer);
13730
Linus Torvalds1da177e2005-04-16 15:20:36 -070013731 tg3_netif_start(tp);
13732
Michael Chanb9ec6c12006-07-25 16:37:27 -070013733out:
David S. Millerf47c11e2005-06-24 20:18:35 -070013734 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013735
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013736 if (!err)
13737 tg3_phy_start(tp);
13738
Michael Chanb9ec6c12006-07-25 16:37:27 -070013739 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013740}
13741
13742static struct pci_driver tg3_driver = {
13743 .name = DRV_MODULE_NAME,
13744 .id_table = tg3_pci_tbl,
13745 .probe = tg3_init_one,
13746 .remove = __devexit_p(tg3_remove_one),
13747 .suspend = tg3_suspend,
13748 .resume = tg3_resume
13749};
13750
13751static int __init tg3_init(void)
13752{
Jeff Garzik29917622006-08-19 17:48:59 -040013753 return pci_register_driver(&tg3_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013754}
13755
13756static void __exit tg3_cleanup(void)
13757{
13758 pci_unregister_driver(&tg3_driver);
13759}
13760
13761module_init(tg3_init);
13762module_exit(tg3_cleanup);