blob: aebc645a3b511cab469052cd4892b3a8d4774960 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
Michael Chan65610fb2007-02-13 12:18:46 -08007 * Copyright (C) 2005-2007 Broadcom Corporation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Firmware is:
Michael Chan49cabf42005-06-06 15:15:17 -070010 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/compiler.h>
24#include <linux/slab.h>
25#include <linux/delay.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020026#include <linux/in.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
Matt Carlson158d7ab2008-05-29 01:37:54 -070035#include <linux/phy.h>
Matt Carlsona9daf362008-05-25 23:49:44 -070036#include <linux/brcmphy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/if_vlan.h>
38#include <linux/ip.h>
39#include <linux/tcp.h>
40#include <linux/workqueue.h>
Michael Chan61487482005-09-05 17:53:19 -070041#include <linux/prefetch.h>
Tobias Klauserf9a5f7d2005-10-29 15:09:26 +020042#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44#include <net/checksum.h>
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -030045#include <net/ip.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47#include <asm/system.h>
48#include <asm/io.h>
49#include <asm/byteorder.h>
50#include <asm/uaccess.h>
51
David S. Miller49b6e95f2007-03-29 01:38:42 -070052#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <asm/idprom.h>
David S. Miller49b6e95f2007-03-29 01:38:42 -070054#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#endif
56
Matt Carlson63532392008-11-03 16:49:57 -080057#define BAR_0 0
58#define BAR_2 2
59
Linus Torvalds1da177e2005-04-16 15:20:36 -070060#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
61#define TG3_VLAN_TAG_USED 1
62#else
63#define TG3_VLAN_TAG_USED 0
64#endif
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#define TG3_TSO_SUPPORT 1
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
68#include "tg3.h"
69
70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": "
Matt Carlson23197912008-08-15 14:11:19 -070072#define DRV_MODULE_VERSION "3.94"
73#define DRV_MODULE_RELDATE "August 14, 2008"
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0
77#define TG3_DEF_TX_MODE 0
78#define TG3_DEF_MSG_ENABLE \
79 (NETIF_MSG_DRV | \
80 NETIF_MSG_PROBE | \
81 NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | \
83 NETIF_MSG_IFDOWN | \
84 NETIF_MSG_IFUP | \
85 NETIF_MSG_RX_ERR | \
86 NETIF_MSG_TX_ERR)
87
88/* length of time before we decide the hardware is borked,
89 * and dev->tx_timeout() should be called to fix the problem
90 */
91#define TG3_TX_TIMEOUT (5 * HZ)
92
93/* hardware minimum and maximum for a single frame's data payload */
94#define TG3_MIN_MTU 60
95#define TG3_MAX_MTU(tp) \
Michael Chan0f893dc2005-07-25 12:30:38 -070096 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
98/* These numbers seem to be hard coded in the NIC firmware somehow.
99 * You can't change the ring sizes, but you can change where you place
100 * them in the NIC onboard memory.
101 */
102#define TG3_RX_RING_SIZE 512
103#define TG3_DEF_RX_RING_PENDING 200
104#define TG3_RX_JUMBO_RING_SIZE 256
105#define TG3_DEF_RX_JUMBO_RING_PENDING 100
106
107/* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
112 */
113#define TG3_RX_RCB_RING_SIZE(tp) \
114 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
115
116#define TG3_TX_RING_SIZE 512
117#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
118
119#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120 TG3_RX_RING_SIZE)
121#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 TG3_RX_JUMBO_RING_SIZE)
123#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RCB_RING_SIZE(tp))
125#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
126 TG3_TX_RING_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128
129#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
130#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
131
132/* minimum number of free TX descriptors required to wake up TX process */
Ranjit Manomohan42952232006-10-18 20:54:26 -0700133#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
135/* number of ETHTOOL_GSTATS u64's */
136#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
137
Michael Chan4cafd3f2005-05-29 14:56:34 -0700138#define TG3_NUM_TEST 6
139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140static char version[] __devinitdata =
141 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
142
143MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
144MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
145MODULE_LICENSE("GPL");
146MODULE_VERSION(DRV_MODULE_VERSION);
147
148static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
149module_param(tg3_debug, int, 0);
150MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
151
152static struct pci_device_id tg3_pci_tbl[] = {
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
Michael Chan126a3362006-09-27 16:03:07 -0700177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
Michael Chan126a3362006-09-27 16:03:07 -0700192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
Michael Chan676917d2006-12-07 00:20:22 -0800196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
Michael Chanb5d37722006-09-27 16:06:21 -0700204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
Matt Carlsond30cdd22007-10-07 23:28:35 -0700206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
Matt Carlson6c7af272007-10-21 16:12:02 -0700208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
Matt Carlson9936bcf2007-10-10 18:03:07 -0700209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
Matt Carlsonc88e6682008-11-03 16:49:18 -0800211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
Matt Carlson57e69832008-05-25 23:48:31 -0700213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700214 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
215 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
216 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
217 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
218 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
219 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
220 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
221 {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222};
223
224MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
225
Andreas Mohr50da8592006-08-14 23:54:30 -0700226static const struct {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 const char string[ETH_GSTRING_LEN];
228} ethtool_stats_keys[TG3_NUM_STATS] = {
229 { "rx_octets" },
230 { "rx_fragments" },
231 { "rx_ucast_packets" },
232 { "rx_mcast_packets" },
233 { "rx_bcast_packets" },
234 { "rx_fcs_errors" },
235 { "rx_align_errors" },
236 { "rx_xon_pause_rcvd" },
237 { "rx_xoff_pause_rcvd" },
238 { "rx_mac_ctrl_rcvd" },
239 { "rx_xoff_entered" },
240 { "rx_frame_too_long_errors" },
241 { "rx_jabbers" },
242 { "rx_undersize_packets" },
243 { "rx_in_length_errors" },
244 { "rx_out_length_errors" },
245 { "rx_64_or_less_octet_packets" },
246 { "rx_65_to_127_octet_packets" },
247 { "rx_128_to_255_octet_packets" },
248 { "rx_256_to_511_octet_packets" },
249 { "rx_512_to_1023_octet_packets" },
250 { "rx_1024_to_1522_octet_packets" },
251 { "rx_1523_to_2047_octet_packets" },
252 { "rx_2048_to_4095_octet_packets" },
253 { "rx_4096_to_8191_octet_packets" },
254 { "rx_8192_to_9022_octet_packets" },
255
256 { "tx_octets" },
257 { "tx_collisions" },
258
259 { "tx_xon_sent" },
260 { "tx_xoff_sent" },
261 { "tx_flow_control" },
262 { "tx_mac_errors" },
263 { "tx_single_collisions" },
264 { "tx_mult_collisions" },
265 { "tx_deferred" },
266 { "tx_excessive_collisions" },
267 { "tx_late_collisions" },
268 { "tx_collide_2times" },
269 { "tx_collide_3times" },
270 { "tx_collide_4times" },
271 { "tx_collide_5times" },
272 { "tx_collide_6times" },
273 { "tx_collide_7times" },
274 { "tx_collide_8times" },
275 { "tx_collide_9times" },
276 { "tx_collide_10times" },
277 { "tx_collide_11times" },
278 { "tx_collide_12times" },
279 { "tx_collide_13times" },
280 { "tx_collide_14times" },
281 { "tx_collide_15times" },
282 { "tx_ucast_packets" },
283 { "tx_mcast_packets" },
284 { "tx_bcast_packets" },
285 { "tx_carrier_sense_errors" },
286 { "tx_discards" },
287 { "tx_errors" },
288
289 { "dma_writeq_full" },
290 { "dma_write_prioq_full" },
291 { "rxbds_empty" },
292 { "rx_discards" },
293 { "rx_errors" },
294 { "rx_threshold_hit" },
295
296 { "dma_readq_full" },
297 { "dma_read_prioq_full" },
298 { "tx_comp_queue_full" },
299
300 { "ring_set_send_prod_index" },
301 { "ring_status_update" },
302 { "nic_irqs" },
303 { "nic_avoided_irqs" },
304 { "nic_tx_threshold_hit" }
305};
306
Andreas Mohr50da8592006-08-14 23:54:30 -0700307static const struct {
Michael Chan4cafd3f2005-05-29 14:56:34 -0700308 const char string[ETH_GSTRING_LEN];
309} ethtool_test_keys[TG3_NUM_TEST] = {
310 { "nvram test (online) " },
311 { "link test (online) " },
312 { "register test (offline)" },
313 { "memory test (offline)" },
314 { "loopback test (offline)" },
315 { "interrupt test (offline)" },
316};
317
Michael Chanb401e9e2005-12-19 16:27:04 -0800318static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
319{
320 writel(val, tp->regs + off);
321}
322
323static u32 tg3_read32(struct tg3 *tp, u32 off)
324{
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400325 return (readl(tp->regs + off));
Michael Chanb401e9e2005-12-19 16:27:04 -0800326}
327
Matt Carlson0d3031d2007-10-10 18:02:43 -0700328static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
329{
330 writel(val, tp->aperegs + off);
331}
332
333static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
334{
335 return (readl(tp->aperegs + off));
336}
337
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
339{
Michael Chan68929142005-08-09 20:17:14 -0700340 unsigned long flags;
341
342 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700343 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
344 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
Michael Chan68929142005-08-09 20:17:14 -0700345 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700346}
347
348static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
349{
350 writel(val, tp->regs + off);
351 readl(tp->regs + off);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352}
353
Michael Chan68929142005-08-09 20:17:14 -0700354static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
355{
356 unsigned long flags;
357 u32 val;
358
359 spin_lock_irqsave(&tp->indirect_lock, flags);
360 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
361 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
362 spin_unlock_irqrestore(&tp->indirect_lock, flags);
363 return val;
364}
365
366static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
367{
368 unsigned long flags;
369
370 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
371 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
372 TG3_64BIT_REG_LOW, val);
373 return;
374 }
375 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
376 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
377 TG3_64BIT_REG_LOW, val);
378 return;
379 }
380
381 spin_lock_irqsave(&tp->indirect_lock, flags);
382 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
383 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
384 spin_unlock_irqrestore(&tp->indirect_lock, flags);
385
386 /* In indirect mode when disabling interrupts, we also need
387 * to clear the interrupt bit in the GRC local ctrl register.
388 */
389 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
390 (val == 0x1)) {
391 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
392 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
393 }
394}
395
396static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
397{
398 unsigned long flags;
399 u32 val;
400
401 spin_lock_irqsave(&tp->indirect_lock, flags);
402 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
403 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
404 spin_unlock_irqrestore(&tp->indirect_lock, flags);
405 return val;
406}
407
Michael Chanb401e9e2005-12-19 16:27:04 -0800408/* usec_wait specifies the wait time in usec when writing to certain registers
409 * where it is unsafe to read back the register without some delay.
410 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
411 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
412 */
413static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414{
Michael Chanb401e9e2005-12-19 16:27:04 -0800415 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
416 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
417 /* Non-posted methods */
418 tp->write32(tp, off, val);
419 else {
420 /* Posted method */
421 tg3_write32(tp, off, val);
422 if (usec_wait)
423 udelay(usec_wait);
424 tp->read32(tp, off);
425 }
426 /* Wait again after the read for the posted method to guarantee that
427 * the wait time is met.
428 */
429 if (usec_wait)
430 udelay(usec_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431}
432
Michael Chan09ee9292005-08-09 20:17:00 -0700433static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
434{
435 tp->write32_mbox(tp, off, val);
Michael Chan68929142005-08-09 20:17:14 -0700436 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
437 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
438 tp->read32_mbox(tp, off);
Michael Chan09ee9292005-08-09 20:17:00 -0700439}
440
Michael Chan20094932005-08-09 20:16:32 -0700441static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442{
443 void __iomem *mbox = tp->regs + off;
444 writel(val, mbox);
445 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
446 writel(val, mbox);
447 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
448 readl(mbox);
449}
450
Michael Chanb5d37722006-09-27 16:06:21 -0700451static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
452{
453 return (readl(tp->regs + off + GRCMBOX_BASE));
454}
455
456static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
457{
458 writel(val, tp->regs + off + GRCMBOX_BASE);
459}
460
Michael Chan20094932005-08-09 20:16:32 -0700461#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700462#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
Michael Chan20094932005-08-09 20:16:32 -0700463#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
464#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700465#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
Michael Chan20094932005-08-09 20:16:32 -0700466
467#define tw32(reg,val) tp->write32(tp, reg, val)
Michael Chanb401e9e2005-12-19 16:27:04 -0800468#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
469#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
Michael Chan20094932005-08-09 20:16:32 -0700470#define tr32(reg) tp->read32(tp, reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
472static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
473{
Michael Chan68929142005-08-09 20:17:14 -0700474 unsigned long flags;
475
Michael Chanb5d37722006-09-27 16:06:21 -0700476 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
477 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
478 return;
479
Michael Chan68929142005-08-09 20:17:14 -0700480 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700481 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
482 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
483 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484
Michael Chanbbadf502006-04-06 21:46:34 -0700485 /* Always leave this as zero. */
486 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
487 } else {
488 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
489 tw32_f(TG3PCI_MEM_WIN_DATA, val);
490
491 /* Always leave this as zero. */
492 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
493 }
Michael Chan68929142005-08-09 20:17:14 -0700494 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495}
496
497static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
498{
Michael Chan68929142005-08-09 20:17:14 -0700499 unsigned long flags;
500
Michael Chanb5d37722006-09-27 16:06:21 -0700501 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
502 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
503 *val = 0;
504 return;
505 }
506
Michael Chan68929142005-08-09 20:17:14 -0700507 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700508 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
509 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
510 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511
Michael Chanbbadf502006-04-06 21:46:34 -0700512 /* Always leave this as zero. */
513 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
514 } else {
515 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
516 *val = tr32(TG3PCI_MEM_WIN_DATA);
517
518 /* Always leave this as zero. */
519 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
520 }
Michael Chan68929142005-08-09 20:17:14 -0700521 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522}
523
Matt Carlson0d3031d2007-10-10 18:02:43 -0700524static void tg3_ape_lock_init(struct tg3 *tp)
525{
526 int i;
527
528 /* Make sure the driver hasn't any stale locks. */
529 for (i = 0; i < 8; i++)
530 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
531 APE_LOCK_GRANT_DRIVER);
532}
533
534static int tg3_ape_lock(struct tg3 *tp, int locknum)
535{
536 int i, off;
537 int ret = 0;
538 u32 status;
539
540 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
541 return 0;
542
543 switch (locknum) {
Matt Carlson77b483f2008-08-15 14:07:24 -0700544 case TG3_APE_LOCK_GRC:
Matt Carlson0d3031d2007-10-10 18:02:43 -0700545 case TG3_APE_LOCK_MEM:
546 break;
547 default:
548 return -EINVAL;
549 }
550
551 off = 4 * locknum;
552
553 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
554
555 /* Wait for up to 1 millisecond to acquire lock. */
556 for (i = 0; i < 100; i++) {
557 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
558 if (status == APE_LOCK_GRANT_DRIVER)
559 break;
560 udelay(10);
561 }
562
563 if (status != APE_LOCK_GRANT_DRIVER) {
564 /* Revoke the lock request. */
565 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
566 APE_LOCK_GRANT_DRIVER);
567
568 ret = -EBUSY;
569 }
570
571 return ret;
572}
573
574static void tg3_ape_unlock(struct tg3 *tp, int locknum)
575{
576 int off;
577
578 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
579 return;
580
581 switch (locknum) {
Matt Carlson77b483f2008-08-15 14:07:24 -0700582 case TG3_APE_LOCK_GRC:
Matt Carlson0d3031d2007-10-10 18:02:43 -0700583 case TG3_APE_LOCK_MEM:
584 break;
585 default:
586 return;
587 }
588
589 off = 4 * locknum;
590 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
591}
592
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593static void tg3_disable_ints(struct tg3 *tp)
594{
595 tw32(TG3PCI_MISC_HOST_CTRL,
596 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700597 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598}
599
600static inline void tg3_cond_int(struct tg3 *tp)
601{
Michael Chan38f38432005-09-05 17:53:32 -0700602 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
603 (tp->hw_status->status & SD_STATUS_UPDATED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
Michael Chanb5d37722006-09-27 16:06:21 -0700605 else
606 tw32(HOSTCC_MODE, tp->coalesce_mode |
607 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608}
609
610static void tg3_enable_ints(struct tg3 *tp)
611{
Michael Chanbbe832c2005-06-24 20:20:04 -0700612 tp->irq_sync = 0;
613 wmb();
614
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 tw32(TG3PCI_MISC_HOST_CTRL,
616 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700617 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
618 (tp->last_tag << 24));
Michael Chanfcfa0a32006-03-20 22:28:41 -0800619 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
620 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
621 (tp->last_tag << 24));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 tg3_cond_int(tp);
623}
624
Michael Chan04237dd2005-04-25 15:17:17 -0700625static inline unsigned int tg3_has_work(struct tg3 *tp)
626{
627 struct tg3_hw_status *sblk = tp->hw_status;
628 unsigned int work_exists = 0;
629
630 /* check for phy events */
631 if (!(tp->tg3_flags &
632 (TG3_FLAG_USE_LINKCHG_REG |
633 TG3_FLAG_POLL_SERDES))) {
634 if (sblk->status & SD_STATUS_LINK_CHG)
635 work_exists = 1;
636 }
637 /* check for RX/TX work to do */
638 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
639 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
640 work_exists = 1;
641
642 return work_exists;
643}
644
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645/* tg3_restart_ints
Michael Chan04237dd2005-04-25 15:17:17 -0700646 * similar to tg3_enable_ints, but it accurately determines whether there
647 * is new work pending and can return without flushing the PIO write
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400648 * which reenables interrupts
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 */
650static void tg3_restart_ints(struct tg3 *tp)
651{
David S. Millerfac9b832005-05-18 22:46:34 -0700652 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
653 tp->last_tag << 24);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 mmiowb();
655
David S. Millerfac9b832005-05-18 22:46:34 -0700656 /* When doing tagged status, this work check is unnecessary.
657 * The last_tag we write above tells the chip which piece of
658 * work we've completed.
659 */
660 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
661 tg3_has_work(tp))
Michael Chan04237dd2005-04-25 15:17:17 -0700662 tw32(HOSTCC_MODE, tp->coalesce_mode |
663 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664}
665
666static inline void tg3_netif_stop(struct tg3 *tp)
667{
Michael Chanbbe832c2005-06-24 20:20:04 -0700668 tp->dev->trans_start = jiffies; /* prevent tx timeout */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700669 napi_disable(&tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 netif_tx_disable(tp->dev);
671}
672
673static inline void tg3_netif_start(struct tg3 *tp)
674{
675 netif_wake_queue(tp->dev);
676 /* NOTE: unconditional netif_wake_queue is only appropriate
677 * so long as all callers are assured to have free tx slots
678 * (such as after tg3_init_hw)
679 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700680 napi_enable(&tp->napi);
David S. Millerf47c11e2005-06-24 20:18:35 -0700681 tp->hw_status->status |= SD_STATUS_UPDATED;
682 tg3_enable_ints(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683}
684
685static void tg3_switch_clocks(struct tg3 *tp)
686{
687 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
688 u32 orig_clock_ctrl;
689
Matt Carlson795d01c2007-10-07 23:28:17 -0700690 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
691 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan4cf78e42005-07-25 12:29:19 -0700692 return;
693
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 orig_clock_ctrl = clock_ctrl;
695 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
696 CLOCK_CTRL_CLKRUN_OENABLE |
697 0x1f);
698 tp->pci_clock_ctrl = clock_ctrl;
699
700 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
701 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800702 tw32_wait_f(TG3PCI_CLOCK_CTRL,
703 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 }
705 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800706 tw32_wait_f(TG3PCI_CLOCK_CTRL,
707 clock_ctrl |
708 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
709 40);
710 tw32_wait_f(TG3PCI_CLOCK_CTRL,
711 clock_ctrl | (CLOCK_CTRL_ALTCLK),
712 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 }
Michael Chanb401e9e2005-12-19 16:27:04 -0800714 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715}
716
717#define PHY_BUSY_LOOPS 5000
718
719static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
720{
721 u32 frame_val;
722 unsigned int loops;
723 int ret;
724
725 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
726 tw32_f(MAC_MI_MODE,
727 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
728 udelay(80);
729 }
730
731 *val = 0x0;
732
733 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
734 MI_COM_PHY_ADDR_MASK);
735 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
736 MI_COM_REG_ADDR_MASK);
737 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400738
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 tw32_f(MAC_MI_COM, frame_val);
740
741 loops = PHY_BUSY_LOOPS;
742 while (loops != 0) {
743 udelay(10);
744 frame_val = tr32(MAC_MI_COM);
745
746 if ((frame_val & MI_COM_BUSY) == 0) {
747 udelay(5);
748 frame_val = tr32(MAC_MI_COM);
749 break;
750 }
751 loops -= 1;
752 }
753
754 ret = -EBUSY;
755 if (loops != 0) {
756 *val = frame_val & MI_COM_DATA_MASK;
757 ret = 0;
758 }
759
760 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
761 tw32_f(MAC_MI_MODE, tp->mi_mode);
762 udelay(80);
763 }
764
765 return ret;
766}
767
768static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
769{
770 u32 frame_val;
771 unsigned int loops;
772 int ret;
773
Michael Chanb5d37722006-09-27 16:06:21 -0700774 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
775 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
776 return 0;
777
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
779 tw32_f(MAC_MI_MODE,
780 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
781 udelay(80);
782 }
783
784 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
785 MI_COM_PHY_ADDR_MASK);
786 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
787 MI_COM_REG_ADDR_MASK);
788 frame_val |= (val & MI_COM_DATA_MASK);
789 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400790
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 tw32_f(MAC_MI_COM, frame_val);
792
793 loops = PHY_BUSY_LOOPS;
794 while (loops != 0) {
795 udelay(10);
796 frame_val = tr32(MAC_MI_COM);
797 if ((frame_val & MI_COM_BUSY) == 0) {
798 udelay(5);
799 frame_val = tr32(MAC_MI_COM);
800 break;
801 }
802 loops -= 1;
803 }
804
805 ret = -EBUSY;
806 if (loops != 0)
807 ret = 0;
808
809 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
810 tw32_f(MAC_MI_MODE, tp->mi_mode);
811 udelay(80);
812 }
813
814 return ret;
815}
816
Matt Carlson95e28692008-05-25 23:44:14 -0700817static int tg3_bmcr_reset(struct tg3 *tp)
818{
819 u32 phy_control;
820 int limit, err;
821
822 /* OK, reset it, and poll the BMCR_RESET bit until it
823 * clears or we time out.
824 */
825 phy_control = BMCR_RESET;
826 err = tg3_writephy(tp, MII_BMCR, phy_control);
827 if (err != 0)
828 return -EBUSY;
829
830 limit = 5000;
831 while (limit--) {
832 err = tg3_readphy(tp, MII_BMCR, &phy_control);
833 if (err != 0)
834 return -EBUSY;
835
836 if ((phy_control & BMCR_RESET) == 0) {
837 udelay(40);
838 break;
839 }
840 udelay(10);
841 }
842 if (limit <= 0)
843 return -EBUSY;
844
845 return 0;
846}
847
Matt Carlson158d7ab2008-05-29 01:37:54 -0700848static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
849{
850 struct tg3 *tp = (struct tg3 *)bp->priv;
851 u32 val;
852
853 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
854 return -EAGAIN;
855
856 if (tg3_readphy(tp, reg, &val))
857 return -EIO;
858
859 return val;
860}
861
862static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
863{
864 struct tg3 *tp = (struct tg3 *)bp->priv;
865
866 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
867 return -EAGAIN;
868
869 if (tg3_writephy(tp, reg, val))
870 return -EIO;
871
872 return 0;
873}
874
875static int tg3_mdio_reset(struct mii_bus *bp)
876{
877 return 0;
878}
879
Matt Carlsona9daf362008-05-25 23:49:44 -0700880static void tg3_mdio_config(struct tg3 *tp)
881{
882 u32 val;
883
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700884 if (tp->mdio_bus->phy_map[PHY_ADDR]->interface !=
Matt Carlsona9daf362008-05-25 23:49:44 -0700885 PHY_INTERFACE_MODE_RGMII)
886 return;
887
888 val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
889 MAC_PHYCFG1_RGMII_SND_STAT_EN);
890 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
891 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
892 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
893 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
894 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
895 }
896 tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
897
898 val = tr32(MAC_PHYCFG2) & ~(MAC_PHYCFG2_INBAND_ENABLE);
899 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
900 val |= MAC_PHYCFG2_INBAND_ENABLE;
901 tw32(MAC_PHYCFG2, val);
902
903 val = tr32(MAC_EXT_RGMII_MODE);
904 val &= ~(MAC_RGMII_MODE_RX_INT_B |
905 MAC_RGMII_MODE_RX_QUALITY |
906 MAC_RGMII_MODE_RX_ACTIVITY |
907 MAC_RGMII_MODE_RX_ENG_DET |
908 MAC_RGMII_MODE_TX_ENABLE |
909 MAC_RGMII_MODE_TX_LOWPWR |
910 MAC_RGMII_MODE_TX_RESET);
911 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
912 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
913 val |= MAC_RGMII_MODE_RX_INT_B |
914 MAC_RGMII_MODE_RX_QUALITY |
915 MAC_RGMII_MODE_RX_ACTIVITY |
916 MAC_RGMII_MODE_RX_ENG_DET;
917 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
918 val |= MAC_RGMII_MODE_TX_ENABLE |
919 MAC_RGMII_MODE_TX_LOWPWR |
920 MAC_RGMII_MODE_TX_RESET;
921 }
922 tw32(MAC_EXT_RGMII_MODE, val);
923}
924
Matt Carlson158d7ab2008-05-29 01:37:54 -0700925static void tg3_mdio_start(struct tg3 *tp)
926{
927 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700928 mutex_lock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700929 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700930 mutex_unlock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700931 }
932
933 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
934 tw32_f(MAC_MI_MODE, tp->mi_mode);
935 udelay(80);
Matt Carlsona9daf362008-05-25 23:49:44 -0700936
937 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)
938 tg3_mdio_config(tp);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700939}
940
941static void tg3_mdio_stop(struct tg3 *tp)
942{
943 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700944 mutex_lock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700945 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700946 mutex_unlock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700947 }
948}
949
950static int tg3_mdio_init(struct tg3 *tp)
951{
952 int i;
953 u32 reg;
Matt Carlsona9daf362008-05-25 23:49:44 -0700954 struct phy_device *phydev;
Matt Carlson158d7ab2008-05-29 01:37:54 -0700955
956 tg3_mdio_start(tp);
957
958 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
959 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
960 return 0;
961
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700962 tp->mdio_bus = mdiobus_alloc();
963 if (tp->mdio_bus == NULL)
964 return -ENOMEM;
Matt Carlson158d7ab2008-05-29 01:37:54 -0700965
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700966 tp->mdio_bus->name = "tg3 mdio bus";
967 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
Matt Carlson158d7ab2008-05-29 01:37:54 -0700968 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700969 tp->mdio_bus->priv = tp;
970 tp->mdio_bus->parent = &tp->pdev->dev;
971 tp->mdio_bus->read = &tg3_mdio_read;
972 tp->mdio_bus->write = &tg3_mdio_write;
973 tp->mdio_bus->reset = &tg3_mdio_reset;
974 tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
975 tp->mdio_bus->irq = &tp->mdio_irq[0];
Matt Carlson158d7ab2008-05-29 01:37:54 -0700976
977 for (i = 0; i < PHY_MAX_ADDR; i++)
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700978 tp->mdio_bus->irq[i] = PHY_POLL;
Matt Carlson158d7ab2008-05-29 01:37:54 -0700979
980 /* The bus registration will look for all the PHYs on the mdio bus.
981 * Unfortunately, it does not ensure the PHY is powered up before
982 * accessing the PHY ID registers. A chip reset is the
983 * quickest way to bring the device back to an operational state..
984 */
985 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
986 tg3_bmcr_reset(tp);
987
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700988 i = mdiobus_register(tp->mdio_bus);
Matt Carlsona9daf362008-05-25 23:49:44 -0700989 if (i) {
Matt Carlson158d7ab2008-05-29 01:37:54 -0700990 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
991 tp->dev->name, i);
Matt Carlsona9daf362008-05-25 23:49:44 -0700992 return i;
993 }
Matt Carlson158d7ab2008-05-29 01:37:54 -0700994
Matt Carlsona9daf362008-05-25 23:49:44 -0700995 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
996
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700997 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsona9daf362008-05-25 23:49:44 -0700998
999 switch (phydev->phy_id) {
1000 case TG3_PHY_ID_BCM50610:
1001 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1002 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1003 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1004 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1005 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1006 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1007 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1008 break;
1009 case TG3_PHY_ID_BCMAC131:
1010 phydev->interface = PHY_INTERFACE_MODE_MII;
1011 break;
1012 }
1013
1014 tg3_mdio_config(tp);
1015
1016 return 0;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001017}
1018
1019static void tg3_mdio_fini(struct tg3 *tp)
1020{
1021 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1022 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001023 mdiobus_unregister(tp->mdio_bus);
1024 mdiobus_free(tp->mdio_bus);
Matt Carlson158d7ab2008-05-29 01:37:54 -07001025 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1026 }
1027}
1028
Matt Carlson95e28692008-05-25 23:44:14 -07001029/* tp->lock is held. */
Matt Carlson4ba526c2008-08-15 14:10:04 -07001030static inline void tg3_generate_fw_event(struct tg3 *tp)
1031{
1032 u32 val;
1033
1034 val = tr32(GRC_RX_CPU_EVENT);
1035 val |= GRC_RX_CPU_DRIVER_EVENT;
1036 tw32_f(GRC_RX_CPU_EVENT, val);
1037
1038 tp->last_event_jiffies = jiffies;
1039}
1040
1041#define TG3_FW_EVENT_TIMEOUT_USEC 2500
1042
1043/* tp->lock is held. */
Matt Carlson95e28692008-05-25 23:44:14 -07001044static void tg3_wait_for_event_ack(struct tg3 *tp)
1045{
1046 int i;
Matt Carlson4ba526c2008-08-15 14:10:04 -07001047 unsigned int delay_cnt;
1048 long time_remain;
Matt Carlson95e28692008-05-25 23:44:14 -07001049
Matt Carlson4ba526c2008-08-15 14:10:04 -07001050 /* If enough time has passed, no wait is necessary. */
1051 time_remain = (long)(tp->last_event_jiffies + 1 +
1052 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1053 (long)jiffies;
1054 if (time_remain < 0)
1055 return;
1056
1057 /* Check if we can shorten the wait time. */
1058 delay_cnt = jiffies_to_usecs(time_remain);
1059 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1060 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1061 delay_cnt = (delay_cnt >> 3) + 1;
1062
1063 for (i = 0; i < delay_cnt; i++) {
Matt Carlson95e28692008-05-25 23:44:14 -07001064 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1065 break;
Matt Carlson4ba526c2008-08-15 14:10:04 -07001066 udelay(8);
Matt Carlson95e28692008-05-25 23:44:14 -07001067 }
1068}
1069
1070/* tp->lock is held. */
1071static void tg3_ump_link_report(struct tg3 *tp)
1072{
1073 u32 reg;
1074 u32 val;
1075
1076 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1077 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1078 return;
1079
1080 tg3_wait_for_event_ack(tp);
1081
1082 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1083
1084 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1085
1086 val = 0;
1087 if (!tg3_readphy(tp, MII_BMCR, &reg))
1088 val = reg << 16;
1089 if (!tg3_readphy(tp, MII_BMSR, &reg))
1090 val |= (reg & 0xffff);
1091 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1092
1093 val = 0;
1094 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1095 val = reg << 16;
1096 if (!tg3_readphy(tp, MII_LPA, &reg))
1097 val |= (reg & 0xffff);
1098 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1099
1100 val = 0;
1101 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1102 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1103 val = reg << 16;
1104 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1105 val |= (reg & 0xffff);
1106 }
1107 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1108
1109 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1110 val = reg << 16;
1111 else
1112 val = 0;
1113 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1114
Matt Carlson4ba526c2008-08-15 14:10:04 -07001115 tg3_generate_fw_event(tp);
Matt Carlson95e28692008-05-25 23:44:14 -07001116}
1117
1118static void tg3_link_report(struct tg3 *tp)
1119{
1120 if (!netif_carrier_ok(tp->dev)) {
1121 if (netif_msg_link(tp))
1122 printk(KERN_INFO PFX "%s: Link is down.\n",
1123 tp->dev->name);
1124 tg3_ump_link_report(tp);
1125 } else if (netif_msg_link(tp)) {
1126 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1127 tp->dev->name,
1128 (tp->link_config.active_speed == SPEED_1000 ?
1129 1000 :
1130 (tp->link_config.active_speed == SPEED_100 ?
1131 100 : 10)),
1132 (tp->link_config.active_duplex == DUPLEX_FULL ?
1133 "full" : "half"));
1134
1135 printk(KERN_INFO PFX
1136 "%s: Flow control is %s for TX and %s for RX.\n",
1137 tp->dev->name,
1138 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1139 "on" : "off",
1140 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1141 "on" : "off");
1142 tg3_ump_link_report(tp);
1143 }
1144}
1145
1146static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1147{
1148 u16 miireg;
1149
1150 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1151 miireg = ADVERTISE_PAUSE_CAP;
1152 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1153 miireg = ADVERTISE_PAUSE_ASYM;
1154 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1155 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1156 else
1157 miireg = 0;
1158
1159 return miireg;
1160}
1161
1162static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1163{
1164 u16 miireg;
1165
1166 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1167 miireg = ADVERTISE_1000XPAUSE;
1168 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1169 miireg = ADVERTISE_1000XPSE_ASYM;
1170 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1171 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1172 else
1173 miireg = 0;
1174
1175 return miireg;
1176}
1177
1178static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1179{
1180 u8 cap = 0;
1181
1182 if (lcladv & ADVERTISE_PAUSE_CAP) {
1183 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1184 if (rmtadv & LPA_PAUSE_CAP)
1185 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1186 else if (rmtadv & LPA_PAUSE_ASYM)
1187 cap = TG3_FLOW_CTRL_RX;
1188 } else {
1189 if (rmtadv & LPA_PAUSE_CAP)
1190 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1191 }
1192 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1193 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1194 cap = TG3_FLOW_CTRL_TX;
1195 }
1196
1197 return cap;
1198}
1199
1200static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1201{
1202 u8 cap = 0;
1203
1204 if (lcladv & ADVERTISE_1000XPAUSE) {
1205 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1206 if (rmtadv & LPA_1000XPAUSE)
1207 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1208 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1209 cap = TG3_FLOW_CTRL_RX;
1210 } else {
1211 if (rmtadv & LPA_1000XPAUSE)
1212 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1213 }
1214 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1215 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1216 cap = TG3_FLOW_CTRL_TX;
1217 }
1218
1219 return cap;
1220}
1221
Matt Carlsonf51f3562008-05-25 23:45:08 -07001222static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
Matt Carlson95e28692008-05-25 23:44:14 -07001223{
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001224 u8 autoneg;
Matt Carlsonf51f3562008-05-25 23:45:08 -07001225 u8 flowctrl = 0;
Matt Carlson95e28692008-05-25 23:44:14 -07001226 u32 old_rx_mode = tp->rx_mode;
1227 u32 old_tx_mode = tp->tx_mode;
1228
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001229 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001230 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001231 else
1232 autoneg = tp->link_config.autoneg;
1233
1234 if (autoneg == AUTONEG_ENABLE &&
Matt Carlson95e28692008-05-25 23:44:14 -07001235 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1236 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Matt Carlsonf51f3562008-05-25 23:45:08 -07001237 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
Matt Carlson95e28692008-05-25 23:44:14 -07001238 else
Matt Carlsonf51f3562008-05-25 23:45:08 -07001239 flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1240 } else
1241 flowctrl = tp->link_config.flowctrl;
Matt Carlson95e28692008-05-25 23:44:14 -07001242
Matt Carlsonf51f3562008-05-25 23:45:08 -07001243 tp->link_config.active_flowctrl = flowctrl;
Matt Carlson95e28692008-05-25 23:44:14 -07001244
Matt Carlsonf51f3562008-05-25 23:45:08 -07001245 if (flowctrl & TG3_FLOW_CTRL_RX)
Matt Carlson95e28692008-05-25 23:44:14 -07001246 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1247 else
1248 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1249
Matt Carlsonf51f3562008-05-25 23:45:08 -07001250 if (old_rx_mode != tp->rx_mode)
Matt Carlson95e28692008-05-25 23:44:14 -07001251 tw32_f(MAC_RX_MODE, tp->rx_mode);
Matt Carlson95e28692008-05-25 23:44:14 -07001252
Matt Carlsonf51f3562008-05-25 23:45:08 -07001253 if (flowctrl & TG3_FLOW_CTRL_TX)
Matt Carlson95e28692008-05-25 23:44:14 -07001254 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1255 else
1256 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1257
Matt Carlsonf51f3562008-05-25 23:45:08 -07001258 if (old_tx_mode != tp->tx_mode)
Matt Carlson95e28692008-05-25 23:44:14 -07001259 tw32_f(MAC_TX_MODE, tp->tx_mode);
Matt Carlson95e28692008-05-25 23:44:14 -07001260}
1261
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001262static void tg3_adjust_link(struct net_device *dev)
1263{
1264 u8 oldflowctrl, linkmesg = 0;
1265 u32 mac_mode, lcl_adv, rmt_adv;
1266 struct tg3 *tp = netdev_priv(dev);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001267 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001268
1269 spin_lock(&tp->lock);
1270
1271 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1272 MAC_MODE_HALF_DUPLEX);
1273
1274 oldflowctrl = tp->link_config.active_flowctrl;
1275
1276 if (phydev->link) {
1277 lcl_adv = 0;
1278 rmt_adv = 0;
1279
1280 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1281 mac_mode |= MAC_MODE_PORT_MODE_MII;
1282 else
1283 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1284
1285 if (phydev->duplex == DUPLEX_HALF)
1286 mac_mode |= MAC_MODE_HALF_DUPLEX;
1287 else {
1288 lcl_adv = tg3_advert_flowctrl_1000T(
1289 tp->link_config.flowctrl);
1290
1291 if (phydev->pause)
1292 rmt_adv = LPA_PAUSE_CAP;
1293 if (phydev->asym_pause)
1294 rmt_adv |= LPA_PAUSE_ASYM;
1295 }
1296
1297 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1298 } else
1299 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1300
1301 if (mac_mode != tp->mac_mode) {
1302 tp->mac_mode = mac_mode;
1303 tw32_f(MAC_MODE, tp->mac_mode);
1304 udelay(40);
1305 }
1306
1307 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1308 tw32(MAC_TX_LENGTHS,
1309 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1310 (6 << TX_LENGTHS_IPG_SHIFT) |
1311 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1312 else
1313 tw32(MAC_TX_LENGTHS,
1314 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1315 (6 << TX_LENGTHS_IPG_SHIFT) |
1316 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1317
1318 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1319 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1320 phydev->speed != tp->link_config.active_speed ||
1321 phydev->duplex != tp->link_config.active_duplex ||
1322 oldflowctrl != tp->link_config.active_flowctrl)
1323 linkmesg = 1;
1324
1325 tp->link_config.active_speed = phydev->speed;
1326 tp->link_config.active_duplex = phydev->duplex;
1327
1328 spin_unlock(&tp->lock);
1329
1330 if (linkmesg)
1331 tg3_link_report(tp);
1332}
1333
1334static int tg3_phy_init(struct tg3 *tp)
1335{
1336 struct phy_device *phydev;
1337
1338 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1339 return 0;
1340
1341 /* Bring the PHY back to a known state. */
1342 tg3_bmcr_reset(tp);
1343
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001344 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001345
1346 /* Attach the MAC to the PHY. */
Matt Carlsona9daf362008-05-25 23:49:44 -07001347 phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link,
1348 phydev->dev_flags, phydev->interface);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001349 if (IS_ERR(phydev)) {
1350 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1351 return PTR_ERR(phydev);
1352 }
1353
1354 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1355
1356 /* Mask with MAC supported features. */
1357 phydev->supported &= (PHY_GBIT_FEATURES |
1358 SUPPORTED_Pause |
1359 SUPPORTED_Asym_Pause);
1360
1361 phydev->advertising = phydev->supported;
1362
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001363 return 0;
1364}
1365
1366static void tg3_phy_start(struct tg3 *tp)
1367{
1368 struct phy_device *phydev;
1369
1370 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1371 return;
1372
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001373 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001374
1375 if (tp->link_config.phy_is_low_power) {
1376 tp->link_config.phy_is_low_power = 0;
1377 phydev->speed = tp->link_config.orig_speed;
1378 phydev->duplex = tp->link_config.orig_duplex;
1379 phydev->autoneg = tp->link_config.orig_autoneg;
1380 phydev->advertising = tp->link_config.orig_advertising;
1381 }
1382
1383 phy_start(phydev);
1384
1385 phy_start_aneg(phydev);
1386}
1387
1388static void tg3_phy_stop(struct tg3 *tp)
1389{
1390 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1391 return;
1392
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001393 phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001394}
1395
1396static void tg3_phy_fini(struct tg3 *tp)
1397{
1398 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001399 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001400 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1401 }
1402}
1403
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001404static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1405{
1406 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1407 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1408}
1409
Matt Carlson9ef8ca92007-07-11 19:48:29 -07001410static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1411{
1412 u32 phy;
1413
1414 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1415 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1416 return;
1417
1418 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1419 u32 ephy;
1420
1421 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1422 tg3_writephy(tp, MII_TG3_EPHY_TEST,
1423 ephy | MII_TG3_EPHY_SHADOW_EN);
1424 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1425 if (enable)
1426 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1427 else
1428 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1429 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1430 }
1431 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1432 }
1433 } else {
1434 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1435 MII_TG3_AUXCTL_SHDWSEL_MISC;
1436 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1437 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1438 if (enable)
1439 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1440 else
1441 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1442 phy |= MII_TG3_AUXCTL_MISC_WREN;
1443 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1444 }
1445 }
1446}
1447
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448static void tg3_phy_set_wirespeed(struct tg3 *tp)
1449{
1450 u32 val;
1451
1452 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1453 return;
1454
1455 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1456 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1457 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1458 (val | (1 << 15) | (1 << 4)));
1459}
1460
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001461static void tg3_phy_apply_otp(struct tg3 *tp)
1462{
1463 u32 otp, phy;
1464
1465 if (!tp->phy_otp)
1466 return;
1467
1468 otp = tp->phy_otp;
1469
1470 /* Enable SM_DSP clock and tx 6dB coding. */
1471 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1472 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1473 MII_TG3_AUXCTL_ACTL_TX_6DB;
1474 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1475
1476 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1477 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1478 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1479
1480 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1481 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1482 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1483
1484 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1485 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1486 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1487
1488 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1489 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1490
1491 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1492 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1493
1494 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1495 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1496 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1497
1498 /* Turn off SM_DSP clock. */
1499 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1500 MII_TG3_AUXCTL_ACTL_TX_6DB;
1501 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1502}
1503
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504static int tg3_wait_macro_done(struct tg3 *tp)
1505{
1506 int limit = 100;
1507
1508 while (limit--) {
1509 u32 tmp32;
1510
1511 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1512 if ((tmp32 & 0x1000) == 0)
1513 break;
1514 }
1515 }
1516 if (limit <= 0)
1517 return -EBUSY;
1518
1519 return 0;
1520}
1521
1522static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1523{
1524 static const u32 test_pat[4][6] = {
1525 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1526 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1527 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1528 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1529 };
1530 int chan;
1531
1532 for (chan = 0; chan < 4; chan++) {
1533 int i;
1534
1535 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1536 (chan * 0x2000) | 0x0200);
1537 tg3_writephy(tp, 0x16, 0x0002);
1538
1539 for (i = 0; i < 6; i++)
1540 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1541 test_pat[chan][i]);
1542
1543 tg3_writephy(tp, 0x16, 0x0202);
1544 if (tg3_wait_macro_done(tp)) {
1545 *resetp = 1;
1546 return -EBUSY;
1547 }
1548
1549 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1550 (chan * 0x2000) | 0x0200);
1551 tg3_writephy(tp, 0x16, 0x0082);
1552 if (tg3_wait_macro_done(tp)) {
1553 *resetp = 1;
1554 return -EBUSY;
1555 }
1556
1557 tg3_writephy(tp, 0x16, 0x0802);
1558 if (tg3_wait_macro_done(tp)) {
1559 *resetp = 1;
1560 return -EBUSY;
1561 }
1562
1563 for (i = 0; i < 6; i += 2) {
1564 u32 low, high;
1565
1566 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1567 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1568 tg3_wait_macro_done(tp)) {
1569 *resetp = 1;
1570 return -EBUSY;
1571 }
1572 low &= 0x7fff;
1573 high &= 0x000f;
1574 if (low != test_pat[chan][i] ||
1575 high != test_pat[chan][i+1]) {
1576 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1577 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1578 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1579
1580 return -EBUSY;
1581 }
1582 }
1583 }
1584
1585 return 0;
1586}
1587
1588static int tg3_phy_reset_chanpat(struct tg3 *tp)
1589{
1590 int chan;
1591
1592 for (chan = 0; chan < 4; chan++) {
1593 int i;
1594
1595 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1596 (chan * 0x2000) | 0x0200);
1597 tg3_writephy(tp, 0x16, 0x0002);
1598 for (i = 0; i < 6; i++)
1599 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1600 tg3_writephy(tp, 0x16, 0x0202);
1601 if (tg3_wait_macro_done(tp))
1602 return -EBUSY;
1603 }
1604
1605 return 0;
1606}
1607
1608static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1609{
1610 u32 reg32, phy9_orig;
1611 int retries, do_phy_reset, err;
1612
1613 retries = 10;
1614 do_phy_reset = 1;
1615 do {
1616 if (do_phy_reset) {
1617 err = tg3_bmcr_reset(tp);
1618 if (err)
1619 return err;
1620 do_phy_reset = 0;
1621 }
1622
1623 /* Disable transmitter and interrupt. */
1624 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1625 continue;
1626
1627 reg32 |= 0x3000;
1628 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1629
1630 /* Set full-duplex, 1000 mbps. */
1631 tg3_writephy(tp, MII_BMCR,
1632 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1633
1634 /* Set to master mode. */
1635 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1636 continue;
1637
1638 tg3_writephy(tp, MII_TG3_CTRL,
1639 (MII_TG3_CTRL_AS_MASTER |
1640 MII_TG3_CTRL_ENABLE_AS_MASTER));
1641
1642 /* Enable SM_DSP_CLOCK and 6dB. */
1643 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1644
1645 /* Block the PHY control access. */
1646 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1647 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1648
1649 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1650 if (!err)
1651 break;
1652 } while (--retries);
1653
1654 err = tg3_phy_reset_chanpat(tp);
1655 if (err)
1656 return err;
1657
1658 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1659 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1660
1661 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1662 tg3_writephy(tp, 0x16, 0x0000);
1663
1664 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1665 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1666 /* Set Extended packet length bit for jumbo frames */
1667 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1668 }
1669 else {
1670 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1671 }
1672
1673 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1674
1675 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1676 reg32 &= ~0x3000;
1677 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1678 } else if (!err)
1679 err = -EBUSY;
1680
1681 return err;
1682}
1683
1684/* This will reset the tigon3 PHY if there is no valid
1685 * link unless the FORCE argument is non-zero.
1686 */
1687static int tg3_phy_reset(struct tg3 *tp)
1688{
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001689 u32 cpmuctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 u32 phy_status;
1691 int err;
1692
Michael Chan60189dd2006-12-17 17:08:07 -08001693 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1694 u32 val;
1695
1696 val = tr32(GRC_MISC_CFG);
1697 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1698 udelay(40);
1699 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1701 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1702 if (err != 0)
1703 return -EBUSY;
1704
Michael Chanc8e1e822006-04-29 18:55:17 -07001705 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1706 netif_carrier_off(tp->dev);
1707 tg3_link_report(tp);
1708 }
1709
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1711 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1712 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1713 err = tg3_phy_reset_5703_4_5(tp);
1714 if (err)
1715 return err;
1716 goto out;
1717 }
1718
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001719 cpmuctrl = 0;
1720 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1721 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1722 cpmuctrl = tr32(TG3_CPMU_CTRL);
1723 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1724 tw32(TG3_CPMU_CTRL,
1725 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1726 }
1727
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 err = tg3_bmcr_reset(tp);
1729 if (err)
1730 return err;
1731
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001732 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1733 u32 phy;
1734
1735 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1736 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1737
1738 tw32(TG3_CPMU_CTRL, cpmuctrl);
1739 }
1740
Matt Carlsonbcb37f62008-11-03 16:52:09 -08001741 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1742 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
Matt Carlsonce057f02007-11-12 21:08:03 -08001743 u32 val;
1744
1745 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1746 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1747 CPMU_LSPD_1000MB_MACCLK_12_5) {
1748 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1749 udelay(40);
1750 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1751 }
Matt Carlson662f38d2007-11-12 21:16:17 -08001752
1753 /* Disable GPHY autopowerdown. */
1754 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1755 MII_TG3_MISC_SHDW_WREN |
1756 MII_TG3_MISC_SHDW_APD_SEL |
1757 MII_TG3_MISC_SHDW_APD_WKTM_84MS);
Matt Carlsonce057f02007-11-12 21:08:03 -08001758 }
1759
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001760 tg3_phy_apply_otp(tp);
1761
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762out:
1763 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1764 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1765 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1766 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1767 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1768 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1769 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1770 }
1771 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1772 tg3_writephy(tp, 0x1c, 0x8d68);
1773 tg3_writephy(tp, 0x1c, 0x8d68);
1774 }
1775 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1776 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1777 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1778 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1779 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1780 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1781 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1782 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1783 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1784 }
Michael Chanc424cb22006-04-29 18:56:34 -07001785 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1786 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1787 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
Michael Chanc1d2a192007-01-08 19:57:20 -08001788 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1789 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1790 tg3_writephy(tp, MII_TG3_TEST1,
1791 MII_TG3_TEST1_TRIM_EN | 0x4);
1792 } else
1793 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
Michael Chanc424cb22006-04-29 18:56:34 -07001794 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1795 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 /* Set Extended packet length bit (bit 14) on all chips that */
1797 /* support jumbo frames */
1798 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1799 /* Cannot do read-modify-write on 5401 */
1800 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
Michael Chan0f893dc2005-07-25 12:30:38 -07001801 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 u32 phy_reg;
1803
1804 /* Set bit 14 with read-modify-write to preserve other bits */
1805 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1806 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1807 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1808 }
1809
1810 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1811 * jumbo frames transmission.
1812 */
Michael Chan0f893dc2005-07-25 12:30:38 -07001813 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 u32 phy_reg;
1815
1816 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1817 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1818 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1819 }
1820
Michael Chan715116a2006-09-27 16:09:25 -07001821 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan715116a2006-09-27 16:09:25 -07001822 /* adjust output voltage */
1823 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
Michael Chan715116a2006-09-27 16:09:25 -07001824 }
1825
Matt Carlson9ef8ca92007-07-11 19:48:29 -07001826 tg3_phy_toggle_automdix(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 tg3_phy_set_wirespeed(tp);
1828 return 0;
1829}
1830
1831static void tg3_frob_aux_power(struct tg3 *tp)
1832{
1833 struct tg3 *tp_peer = tp;
1834
Michael Chan9d26e212006-12-07 00:21:14 -08001835 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 return;
1837
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001838 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1839 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1840 struct net_device *dev_peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001842 dev_peer = pci_get_drvdata(tp->pdev_peer);
Michael Chanbc1c7562006-03-20 17:48:03 -08001843 /* remove_one() may have been run on the peer. */
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001844 if (!dev_peer)
Michael Chanbc1c7562006-03-20 17:48:03 -08001845 tp_peer = tp;
1846 else
1847 tp_peer = netdev_priv(dev_peer);
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001848 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849
1850 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
Michael Chan6921d202005-12-13 21:15:53 -08001851 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1852 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1853 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1855 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
Michael Chanb401e9e2005-12-19 16:27:04 -08001856 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1857 (GRC_LCLCTRL_GPIO_OE0 |
1858 GRC_LCLCTRL_GPIO_OE1 |
1859 GRC_LCLCTRL_GPIO_OE2 |
1860 GRC_LCLCTRL_GPIO_OUTPUT0 |
1861 GRC_LCLCTRL_GPIO_OUTPUT1),
1862 100);
Matt Carlson5f0c4a32008-06-09 15:41:12 -07001863 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1864 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1865 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1866 GRC_LCLCTRL_GPIO_OE1 |
1867 GRC_LCLCTRL_GPIO_OE2 |
1868 GRC_LCLCTRL_GPIO_OUTPUT0 |
1869 GRC_LCLCTRL_GPIO_OUTPUT1 |
1870 tp->grc_local_ctrl;
1871 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1872
1873 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1874 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1875
1876 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1877 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 } else {
1879 u32 no_gpio2;
Michael Chandc56b7d2005-12-19 16:26:28 -08001880 u32 grc_local_ctrl = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881
1882 if (tp_peer != tp &&
1883 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1884 return;
1885
Michael Chandc56b7d2005-12-19 16:26:28 -08001886 /* Workaround to prevent overdrawing Amps. */
1887 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1888 ASIC_REV_5714) {
1889 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chanb401e9e2005-12-19 16:27:04 -08001890 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1891 grc_local_ctrl, 100);
Michael Chandc56b7d2005-12-19 16:26:28 -08001892 }
1893
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 /* On 5753 and variants, GPIO2 cannot be used. */
1895 no_gpio2 = tp->nic_sram_data_cfg &
1896 NIC_SRAM_DATA_CFG_NO_GPIO2;
1897
Michael Chandc56b7d2005-12-19 16:26:28 -08001898 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 GRC_LCLCTRL_GPIO_OE1 |
1900 GRC_LCLCTRL_GPIO_OE2 |
1901 GRC_LCLCTRL_GPIO_OUTPUT1 |
1902 GRC_LCLCTRL_GPIO_OUTPUT2;
1903 if (no_gpio2) {
1904 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1905 GRC_LCLCTRL_GPIO_OUTPUT2);
1906 }
Michael Chanb401e9e2005-12-19 16:27:04 -08001907 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1908 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909
1910 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1911
Michael Chanb401e9e2005-12-19 16:27:04 -08001912 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1913 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914
1915 if (!no_gpio2) {
1916 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chanb401e9e2005-12-19 16:27:04 -08001917 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1918 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 }
1920 }
1921 } else {
1922 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1923 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1924 if (tp_peer != tp &&
1925 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1926 return;
1927
Michael Chanb401e9e2005-12-19 16:27:04 -08001928 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1929 (GRC_LCLCTRL_GPIO_OE1 |
1930 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931
Michael Chanb401e9e2005-12-19 16:27:04 -08001932 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1933 GRC_LCLCTRL_GPIO_OE1, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934
Michael Chanb401e9e2005-12-19 16:27:04 -08001935 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1936 (GRC_LCLCTRL_GPIO_OE1 |
1937 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 }
1939 }
1940}
1941
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07001942static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1943{
1944 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1945 return 1;
1946 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1947 if (speed != SPEED_10)
1948 return 1;
1949 } else if (speed == SPEED_10)
1950 return 1;
1951
1952 return 0;
1953}
1954
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955static int tg3_setup_phy(struct tg3 *, int);
1956
1957#define RESET_KIND_SHUTDOWN 0
1958#define RESET_KIND_INIT 1
1959#define RESET_KIND_SUSPEND 2
1960
1961static void tg3_write_sig_post_reset(struct tg3 *, int);
1962static int tg3_halt_cpu(struct tg3 *, u32);
Michael Chan6921d202005-12-13 21:15:53 -08001963static int tg3_nvram_lock(struct tg3 *);
1964static void tg3_nvram_unlock(struct tg3 *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965
Michael Chan15c3b692006-03-22 01:06:52 -08001966static void tg3_power_down_phy(struct tg3 *tp)
1967{
Matt Carlsonce057f02007-11-12 21:08:03 -08001968 u32 val;
1969
Michael Chan51297242007-02-13 12:17:57 -08001970 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1971 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1972 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1973 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1974
1975 sg_dig_ctrl |=
1976 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1977 tw32(SG_DIG_CTRL, sg_dig_ctrl);
1978 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1979 }
Michael Chan3f7045c2006-09-27 16:02:29 -07001980 return;
Michael Chan51297242007-02-13 12:17:57 -08001981 }
Michael Chan3f7045c2006-09-27 16:02:29 -07001982
Michael Chan60189dd2006-12-17 17:08:07 -08001983 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan60189dd2006-12-17 17:08:07 -08001984 tg3_bmcr_reset(tp);
1985 val = tr32(GRC_MISC_CFG);
1986 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1987 udelay(40);
1988 return;
Matt Carlsondd477002008-05-25 23:45:58 -07001989 } else if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
Michael Chan715116a2006-09-27 16:09:25 -07001990 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1991 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1992 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1993 }
Michael Chan3f7045c2006-09-27 16:02:29 -07001994
Michael Chan15c3b692006-03-22 01:06:52 -08001995 /* The PHY should not be powered down on some chips because
1996 * of bugs.
1997 */
1998 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1999 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2000 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2001 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2002 return;
Matt Carlsonce057f02007-11-12 21:08:03 -08002003
Matt Carlsonbcb37f62008-11-03 16:52:09 -08002004 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2005 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
Matt Carlsonce057f02007-11-12 21:08:03 -08002006 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2007 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2008 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2009 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2010 }
2011
Michael Chan15c3b692006-03-22 01:06:52 -08002012 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2013}
2014
Matt Carlson3f007892008-11-03 16:51:36 -08002015/* tp->lock is held. */
2016static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2017{
2018 u32 addr_high, addr_low;
2019 int i;
2020
2021 addr_high = ((tp->dev->dev_addr[0] << 8) |
2022 tp->dev->dev_addr[1]);
2023 addr_low = ((tp->dev->dev_addr[2] << 24) |
2024 (tp->dev->dev_addr[3] << 16) |
2025 (tp->dev->dev_addr[4] << 8) |
2026 (tp->dev->dev_addr[5] << 0));
2027 for (i = 0; i < 4; i++) {
2028 if (i == 1 && skip_mac_1)
2029 continue;
2030 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2031 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2032 }
2033
2034 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2035 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2036 for (i = 0; i < 12; i++) {
2037 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2038 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2039 }
2040 }
2041
2042 addr_high = (tp->dev->dev_addr[0] +
2043 tp->dev->dev_addr[1] +
2044 tp->dev->dev_addr[2] +
2045 tp->dev->dev_addr[3] +
2046 tp->dev->dev_addr[4] +
2047 tp->dev->dev_addr[5]) &
2048 TX_BACKOFF_SEED_MASK;
2049 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2050}
2051
Michael Chanbc1c7562006-03-20 17:48:03 -08002052static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053{
2054 u32 misc_host_ctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055
2056 /* Make sure register accesses (indirect or otherwise)
2057 * will function correctly.
2058 */
2059 pci_write_config_dword(tp->pdev,
2060 TG3PCI_MISC_HOST_CTRL,
2061 tp->misc_host_ctrl);
2062
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063 switch (state) {
Michael Chanbc1c7562006-03-20 17:48:03 -08002064 case PCI_D0:
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002065 pci_enable_wake(tp->pdev, state, false);
2066 pci_set_power_state(tp->pdev, PCI_D0);
Michael Chan8c6bda12005-04-21 17:09:08 -07002067
Michael Chan9d26e212006-12-07 00:21:14 -08002068 /* Switch out of Vaux if it is a NIC */
2069 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
Michael Chanb401e9e2005-12-19 16:27:04 -08002070 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071
2072 return 0;
2073
Michael Chanbc1c7562006-03-20 17:48:03 -08002074 case PCI_D1:
Michael Chanbc1c7562006-03-20 17:48:03 -08002075 case PCI_D2:
Michael Chanbc1c7562006-03-20 17:48:03 -08002076 case PCI_D3hot:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077 break;
2078
2079 default:
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002080 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2081 tp->dev->name, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 return -EINVAL;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002083 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2085 tw32(TG3PCI_MISC_HOST_CTRL,
2086 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2087
Matt Carlsondd477002008-05-25 23:45:58 -07002088 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002089 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2090 !tp->link_config.phy_is_low_power) {
2091 struct phy_device *phydev;
2092 u32 advertising;
2093
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07002094 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002095
2096 tp->link_config.phy_is_low_power = 1;
2097
2098 tp->link_config.orig_speed = phydev->speed;
2099 tp->link_config.orig_duplex = phydev->duplex;
2100 tp->link_config.orig_autoneg = phydev->autoneg;
2101 tp->link_config.orig_advertising = phydev->advertising;
2102
2103 advertising = ADVERTISED_TP |
2104 ADVERTISED_Pause |
2105 ADVERTISED_Autoneg |
2106 ADVERTISED_10baseT_Half;
2107
2108 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2109 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
2110 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2111 advertising |=
2112 ADVERTISED_100baseT_Half |
2113 ADVERTISED_100baseT_Full |
2114 ADVERTISED_10baseT_Full;
2115 else
2116 advertising |= ADVERTISED_10baseT_Full;
2117 }
2118
2119 phydev->advertising = advertising;
2120
2121 phy_start_aneg(phydev);
2122 }
Matt Carlsondd477002008-05-25 23:45:58 -07002123 } else {
2124 if (tp->link_config.phy_is_low_power == 0) {
2125 tp->link_config.phy_is_low_power = 1;
2126 tp->link_config.orig_speed = tp->link_config.speed;
2127 tp->link_config.orig_duplex = tp->link_config.duplex;
2128 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2129 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130
Matt Carlsondd477002008-05-25 23:45:58 -07002131 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2132 tp->link_config.speed = SPEED_10;
2133 tp->link_config.duplex = DUPLEX_HALF;
2134 tp->link_config.autoneg = AUTONEG_ENABLE;
2135 tg3_setup_phy(tp, 0);
2136 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137 }
2138
Matt Carlson3f007892008-11-03 16:51:36 -08002139 __tg3_set_mac_addr(tp, 0);
2140
Michael Chanb5d37722006-09-27 16:06:21 -07002141 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2142 u32 val;
2143
2144 val = tr32(GRC_VCPU_EXT_CTRL);
2145 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2146 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chan6921d202005-12-13 21:15:53 -08002147 int i;
2148 u32 val;
2149
2150 for (i = 0; i < 200; i++) {
2151 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2152 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2153 break;
2154 msleep(1);
2155 }
2156 }
Gary Zambranoa85feb82007-05-05 11:52:19 -07002157 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2158 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2159 WOL_DRV_STATE_SHUTDOWN |
2160 WOL_DRV_WOL |
2161 WOL_SET_MAGIC_PKT);
Michael Chan6921d202005-12-13 21:15:53 -08002162
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
2164 u32 mac_mode;
2165
2166 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
Matt Carlsondd477002008-05-25 23:45:58 -07002167 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
2168 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2169 udelay(40);
2170 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171
Michael Chan3f7045c2006-09-27 16:02:29 -07002172 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2173 mac_mode = MAC_MODE_PORT_MODE_GMII;
2174 else
2175 mac_mode = MAC_MODE_PORT_MODE_MII;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002177 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2178 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2179 ASIC_REV_5700) {
2180 u32 speed = (tp->tg3_flags &
2181 TG3_FLAG_WOL_SPEED_100MB) ?
2182 SPEED_100 : SPEED_10;
2183 if (tg3_5700_link_polarity(tp, speed))
2184 mac_mode |= MAC_MODE_LINK_POLARITY;
2185 else
2186 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2187 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 } else {
2189 mac_mode = MAC_MODE_PORT_MODE_TBI;
2190 }
2191
John W. Linvillecbf46852005-04-21 17:01:29 -07002192 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 tw32(MAC_LED_CTRL, tp->led_ctrl);
2194
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002195 if (pci_pme_capable(tp->pdev, state) &&
Matt Carlsonb2aee152008-11-03 16:51:11 -08002196 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
Matt Carlsonb2aee152008-11-03 16:51:11 -08002198 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2199 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2200 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2201 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2202 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2203 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204
Matt Carlson3bda1252008-08-15 14:08:22 -07002205 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2206 mac_mode |= tp->mac_mode &
2207 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2208 if (mac_mode & MAC_MODE_APE_TX_EN)
2209 mac_mode |= MAC_MODE_TDE_ENABLE;
2210 }
2211
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 tw32_f(MAC_MODE, mac_mode);
2213 udelay(100);
2214
2215 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2216 udelay(10);
2217 }
2218
2219 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2220 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2221 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2222 u32 base_val;
2223
2224 base_val = tp->pci_clock_ctrl;
2225 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2226 CLOCK_CTRL_TXCLK_DISABLE);
2227
Michael Chanb401e9e2005-12-19 16:27:04 -08002228 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2229 CLOCK_CTRL_PWRDOWN_PLL133, 40);
Michael Chand7b0a852007-02-13 12:17:38 -08002230 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
Matt Carlson795d01c2007-10-07 23:28:17 -07002231 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
Michael Chand7b0a852007-02-13 12:17:38 -08002232 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
Michael Chan4cf78e42005-07-25 12:29:19 -07002233 /* do nothing */
Michael Chan85e94ce2005-04-21 17:05:28 -07002234 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2236 u32 newbits1, newbits2;
2237
2238 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2239 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2240 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2241 CLOCK_CTRL_TXCLK_DISABLE |
2242 CLOCK_CTRL_ALTCLK);
2243 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2244 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2245 newbits1 = CLOCK_CTRL_625_CORE;
2246 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2247 } else {
2248 newbits1 = CLOCK_CTRL_ALTCLK;
2249 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2250 }
2251
Michael Chanb401e9e2005-12-19 16:27:04 -08002252 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2253 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254
Michael Chanb401e9e2005-12-19 16:27:04 -08002255 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2256 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257
2258 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2259 u32 newbits3;
2260
2261 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2262 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2263 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2264 CLOCK_CTRL_TXCLK_DISABLE |
2265 CLOCK_CTRL_44MHZ_CORE);
2266 } else {
2267 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2268 }
2269
Michael Chanb401e9e2005-12-19 16:27:04 -08002270 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2271 tp->pci_clock_ctrl | newbits3, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 }
2273 }
2274
Michael Chan6921d202005-12-13 21:15:53 -08002275 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
Matt Carlson0d3031d2007-10-10 18:02:43 -07002276 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
2277 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
Michael Chan3f7045c2006-09-27 16:02:29 -07002278 tg3_power_down_phy(tp);
Michael Chan6921d202005-12-13 21:15:53 -08002279
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 tg3_frob_aux_power(tp);
2281
2282 /* Workaround for unstable PLL clock */
2283 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2284 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2285 u32 val = tr32(0x7d00);
2286
2287 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2288 tw32(0x7d00, val);
Michael Chan6921d202005-12-13 21:15:53 -08002289 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chanec41c7d2006-01-17 02:40:55 -08002290 int err;
2291
2292 err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 tg3_halt_cpu(tp, RX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -08002294 if (!err)
2295 tg3_nvram_unlock(tp);
Michael Chan6921d202005-12-13 21:15:53 -08002296 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297 }
2298
Michael Chanbbadf502006-04-06 21:46:34 -07002299 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2300
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002301 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
2302 pci_enable_wake(tp->pdev, state, true);
2303
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304 /* Finally, set the new power state. */
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002305 pci_set_power_state(tp->pdev, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307 return 0;
2308}
2309
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2311{
2312 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2313 case MII_TG3_AUX_STAT_10HALF:
2314 *speed = SPEED_10;
2315 *duplex = DUPLEX_HALF;
2316 break;
2317
2318 case MII_TG3_AUX_STAT_10FULL:
2319 *speed = SPEED_10;
2320 *duplex = DUPLEX_FULL;
2321 break;
2322
2323 case MII_TG3_AUX_STAT_100HALF:
2324 *speed = SPEED_100;
2325 *duplex = DUPLEX_HALF;
2326 break;
2327
2328 case MII_TG3_AUX_STAT_100FULL:
2329 *speed = SPEED_100;
2330 *duplex = DUPLEX_FULL;
2331 break;
2332
2333 case MII_TG3_AUX_STAT_1000HALF:
2334 *speed = SPEED_1000;
2335 *duplex = DUPLEX_HALF;
2336 break;
2337
2338 case MII_TG3_AUX_STAT_1000FULL:
2339 *speed = SPEED_1000;
2340 *duplex = DUPLEX_FULL;
2341 break;
2342
2343 default:
Michael Chan715116a2006-09-27 16:09:25 -07002344 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2345 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2346 SPEED_10;
2347 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2348 DUPLEX_HALF;
2349 break;
2350 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 *speed = SPEED_INVALID;
2352 *duplex = DUPLEX_INVALID;
2353 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002354 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355}
2356
2357static void tg3_phy_copper_begin(struct tg3 *tp)
2358{
2359 u32 new_adv;
2360 int i;
2361
2362 if (tp->link_config.phy_is_low_power) {
2363 /* Entering low power mode. Disable gigabit and
2364 * 100baseT advertisements.
2365 */
2366 tg3_writephy(tp, MII_TG3_CTRL, 0);
2367
2368 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2369 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2370 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2371 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2372
2373 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2374 } else if (tp->link_config.speed == SPEED_INVALID) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2376 tp->link_config.advertising &=
2377 ~(ADVERTISED_1000baseT_Half |
2378 ADVERTISED_1000baseT_Full);
2379
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002380 new_adv = ADVERTISE_CSMA;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2382 new_adv |= ADVERTISE_10HALF;
2383 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2384 new_adv |= ADVERTISE_10FULL;
2385 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2386 new_adv |= ADVERTISE_100HALF;
2387 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2388 new_adv |= ADVERTISE_100FULL;
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002389
2390 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2391
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2393
2394 if (tp->link_config.advertising &
2395 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2396 new_adv = 0;
2397 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2398 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2399 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2400 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2401 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2402 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2403 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2404 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2405 MII_TG3_CTRL_ENABLE_AS_MASTER);
2406 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2407 } else {
2408 tg3_writephy(tp, MII_TG3_CTRL, 0);
2409 }
2410 } else {
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002411 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2412 new_adv |= ADVERTISE_CSMA;
2413
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414 /* Asking for a specific link mode. */
2415 if (tp->link_config.speed == SPEED_1000) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2417
2418 if (tp->link_config.duplex == DUPLEX_FULL)
2419 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2420 else
2421 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2422 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2423 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2424 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2425 MII_TG3_CTRL_ENABLE_AS_MASTER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427 if (tp->link_config.speed == SPEED_100) {
2428 if (tp->link_config.duplex == DUPLEX_FULL)
2429 new_adv |= ADVERTISE_100FULL;
2430 else
2431 new_adv |= ADVERTISE_100HALF;
2432 } else {
2433 if (tp->link_config.duplex == DUPLEX_FULL)
2434 new_adv |= ADVERTISE_10FULL;
2435 else
2436 new_adv |= ADVERTISE_10HALF;
2437 }
2438 tg3_writephy(tp, MII_ADVERTISE, new_adv);
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002439
2440 new_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441 }
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002442
2443 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444 }
2445
2446 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2447 tp->link_config.speed != SPEED_INVALID) {
2448 u32 bmcr, orig_bmcr;
2449
2450 tp->link_config.active_speed = tp->link_config.speed;
2451 tp->link_config.active_duplex = tp->link_config.duplex;
2452
2453 bmcr = 0;
2454 switch (tp->link_config.speed) {
2455 default:
2456 case SPEED_10:
2457 break;
2458
2459 case SPEED_100:
2460 bmcr |= BMCR_SPEED100;
2461 break;
2462
2463 case SPEED_1000:
2464 bmcr |= TG3_BMCR_SPEED1000;
2465 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002466 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467
2468 if (tp->link_config.duplex == DUPLEX_FULL)
2469 bmcr |= BMCR_FULLDPLX;
2470
2471 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2472 (bmcr != orig_bmcr)) {
2473 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2474 for (i = 0; i < 1500; i++) {
2475 u32 tmp;
2476
2477 udelay(10);
2478 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2479 tg3_readphy(tp, MII_BMSR, &tmp))
2480 continue;
2481 if (!(tmp & BMSR_LSTATUS)) {
2482 udelay(40);
2483 break;
2484 }
2485 }
2486 tg3_writephy(tp, MII_BMCR, bmcr);
2487 udelay(40);
2488 }
2489 } else {
2490 tg3_writephy(tp, MII_BMCR,
2491 BMCR_ANENABLE | BMCR_ANRESTART);
2492 }
2493}
2494
2495static int tg3_init_5401phy_dsp(struct tg3 *tp)
2496{
2497 int err;
2498
2499 /* Turn off tap power management. */
2500 /* Set Extended packet length bit */
2501 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2502
2503 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2504 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2505
2506 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2507 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2508
2509 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2510 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2511
2512 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2513 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2514
2515 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2516 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2517
2518 udelay(40);
2519
2520 return err;
2521}
2522
Michael Chan3600d912006-12-07 00:21:48 -08002523static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524{
Michael Chan3600d912006-12-07 00:21:48 -08002525 u32 adv_reg, all_mask = 0;
2526
2527 if (mask & ADVERTISED_10baseT_Half)
2528 all_mask |= ADVERTISE_10HALF;
2529 if (mask & ADVERTISED_10baseT_Full)
2530 all_mask |= ADVERTISE_10FULL;
2531 if (mask & ADVERTISED_100baseT_Half)
2532 all_mask |= ADVERTISE_100HALF;
2533 if (mask & ADVERTISED_100baseT_Full)
2534 all_mask |= ADVERTISE_100FULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535
2536 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2537 return 0;
2538
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 if ((adv_reg & all_mask) != all_mask)
2540 return 0;
2541 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2542 u32 tg3_ctrl;
2543
Michael Chan3600d912006-12-07 00:21:48 -08002544 all_mask = 0;
2545 if (mask & ADVERTISED_1000baseT_Half)
2546 all_mask |= ADVERTISE_1000HALF;
2547 if (mask & ADVERTISED_1000baseT_Full)
2548 all_mask |= ADVERTISE_1000FULL;
2549
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2551 return 0;
2552
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553 if ((tg3_ctrl & all_mask) != all_mask)
2554 return 0;
2555 }
2556 return 1;
2557}
2558
Matt Carlsonef167e22007-12-20 20:10:01 -08002559static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2560{
2561 u32 curadv, reqadv;
2562
2563 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2564 return 1;
2565
2566 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2567 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2568
2569 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2570 if (curadv != reqadv)
2571 return 0;
2572
2573 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2574 tg3_readphy(tp, MII_LPA, rmtadv);
2575 } else {
2576 /* Reprogram the advertisement register, even if it
2577 * does not affect the current link. If the link
2578 * gets renegotiated in the future, we can save an
2579 * additional renegotiation cycle by advertising
2580 * it correctly in the first place.
2581 */
2582 if (curadv != reqadv) {
2583 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2584 ADVERTISE_PAUSE_ASYM);
2585 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2586 }
2587 }
2588
2589 return 1;
2590}
2591
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2593{
2594 int current_link_up;
2595 u32 bmsr, dummy;
Matt Carlsonef167e22007-12-20 20:10:01 -08002596 u32 lcl_adv, rmt_adv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 u16 current_speed;
2598 u8 current_duplex;
2599 int i, err;
2600
2601 tw32(MAC_EVENT, 0);
2602
2603 tw32_f(MAC_STATUS,
2604 (MAC_STATUS_SYNC_CHANGED |
2605 MAC_STATUS_CFG_CHANGED |
2606 MAC_STATUS_MI_COMPLETION |
2607 MAC_STATUS_LNKSTATE_CHANGED));
2608 udelay(40);
2609
Matt Carlson8ef21422008-05-02 16:47:53 -07002610 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2611 tw32_f(MAC_MI_MODE,
2612 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2613 udelay(80);
2614 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615
2616 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2617
2618 /* Some third-party PHYs need to be reset on link going
2619 * down.
2620 */
2621 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2622 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2623 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2624 netif_carrier_ok(tp->dev)) {
2625 tg3_readphy(tp, MII_BMSR, &bmsr);
2626 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2627 !(bmsr & BMSR_LSTATUS))
2628 force_reset = 1;
2629 }
2630 if (force_reset)
2631 tg3_phy_reset(tp);
2632
2633 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2634 tg3_readphy(tp, MII_BMSR, &bmsr);
2635 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2636 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2637 bmsr = 0;
2638
2639 if (!(bmsr & BMSR_LSTATUS)) {
2640 err = tg3_init_5401phy_dsp(tp);
2641 if (err)
2642 return err;
2643
2644 tg3_readphy(tp, MII_BMSR, &bmsr);
2645 for (i = 0; i < 1000; i++) {
2646 udelay(10);
2647 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2648 (bmsr & BMSR_LSTATUS)) {
2649 udelay(40);
2650 break;
2651 }
2652 }
2653
2654 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2655 !(bmsr & BMSR_LSTATUS) &&
2656 tp->link_config.active_speed == SPEED_1000) {
2657 err = tg3_phy_reset(tp);
2658 if (!err)
2659 err = tg3_init_5401phy_dsp(tp);
2660 if (err)
2661 return err;
2662 }
2663 }
2664 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2665 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2666 /* 5701 {A0,B0} CRC bug workaround */
2667 tg3_writephy(tp, 0x15, 0x0a75);
2668 tg3_writephy(tp, 0x1c, 0x8c68);
2669 tg3_writephy(tp, 0x1c, 0x8d68);
2670 tg3_writephy(tp, 0x1c, 0x8c68);
2671 }
2672
2673 /* Clear pending interrupts... */
2674 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2675 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2676
2677 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2678 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
Michael Chan715116a2006-09-27 16:09:25 -07002679 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2681
2682 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2683 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2684 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2685 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2686 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2687 else
2688 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2689 }
2690
2691 current_link_up = 0;
2692 current_speed = SPEED_INVALID;
2693 current_duplex = DUPLEX_INVALID;
2694
2695 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2696 u32 val;
2697
2698 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2699 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2700 if (!(val & (1 << 10))) {
2701 val |= (1 << 10);
2702 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2703 goto relink;
2704 }
2705 }
2706
2707 bmsr = 0;
2708 for (i = 0; i < 100; i++) {
2709 tg3_readphy(tp, MII_BMSR, &bmsr);
2710 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2711 (bmsr & BMSR_LSTATUS))
2712 break;
2713 udelay(40);
2714 }
2715
2716 if (bmsr & BMSR_LSTATUS) {
2717 u32 aux_stat, bmcr;
2718
2719 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2720 for (i = 0; i < 2000; i++) {
2721 udelay(10);
2722 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2723 aux_stat)
2724 break;
2725 }
2726
2727 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2728 &current_speed,
2729 &current_duplex);
2730
2731 bmcr = 0;
2732 for (i = 0; i < 200; i++) {
2733 tg3_readphy(tp, MII_BMCR, &bmcr);
2734 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2735 continue;
2736 if (bmcr && bmcr != 0x7fff)
2737 break;
2738 udelay(10);
2739 }
2740
Matt Carlsonef167e22007-12-20 20:10:01 -08002741 lcl_adv = 0;
2742 rmt_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743
Matt Carlsonef167e22007-12-20 20:10:01 -08002744 tp->link_config.active_speed = current_speed;
2745 tp->link_config.active_duplex = current_duplex;
2746
2747 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2748 if ((bmcr & BMCR_ANENABLE) &&
2749 tg3_copper_is_advertising_all(tp,
2750 tp->link_config.advertising)) {
2751 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2752 &rmt_adv))
2753 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754 }
2755 } else {
2756 if (!(bmcr & BMCR_ANENABLE) &&
2757 tp->link_config.speed == current_speed &&
Matt Carlsonef167e22007-12-20 20:10:01 -08002758 tp->link_config.duplex == current_duplex &&
2759 tp->link_config.flowctrl ==
2760 tp->link_config.active_flowctrl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762 }
2763 }
2764
Matt Carlsonef167e22007-12-20 20:10:01 -08002765 if (current_link_up == 1 &&
2766 tp->link_config.active_duplex == DUPLEX_FULL)
2767 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768 }
2769
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770relink:
Michael Chan6921d202005-12-13 21:15:53 -08002771 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772 u32 tmp;
2773
2774 tg3_phy_copper_begin(tp);
2775
2776 tg3_readphy(tp, MII_BMSR, &tmp);
2777 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2778 (tmp & BMSR_LSTATUS))
2779 current_link_up = 1;
2780 }
2781
2782 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2783 if (current_link_up == 1) {
2784 if (tp->link_config.active_speed == SPEED_100 ||
2785 tp->link_config.active_speed == SPEED_10)
2786 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2787 else
2788 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2789 } else
2790 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2791
2792 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2793 if (tp->link_config.active_duplex == DUPLEX_HALF)
2794 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2795
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002797 if (current_link_up == 1 &&
2798 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002800 else
2801 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 }
2803
2804 /* ??? Without this setting Netgear GA302T PHY does not
2805 * ??? send/receive packets...
2806 */
2807 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2808 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2809 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2810 tw32_f(MAC_MI_MODE, tp->mi_mode);
2811 udelay(80);
2812 }
2813
2814 tw32_f(MAC_MODE, tp->mac_mode);
2815 udelay(40);
2816
2817 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2818 /* Polled via timer. */
2819 tw32_f(MAC_EVENT, 0);
2820 } else {
2821 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2822 }
2823 udelay(40);
2824
2825 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2826 current_link_up == 1 &&
2827 tp->link_config.active_speed == SPEED_1000 &&
2828 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2829 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2830 udelay(120);
2831 tw32_f(MAC_STATUS,
2832 (MAC_STATUS_SYNC_CHANGED |
2833 MAC_STATUS_CFG_CHANGED));
2834 udelay(40);
2835 tg3_write_mem(tp,
2836 NIC_SRAM_FIRMWARE_MBOX,
2837 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2838 }
2839
2840 if (current_link_up != netif_carrier_ok(tp->dev)) {
2841 if (current_link_up)
2842 netif_carrier_on(tp->dev);
2843 else
2844 netif_carrier_off(tp->dev);
2845 tg3_link_report(tp);
2846 }
2847
2848 return 0;
2849}
2850
2851struct tg3_fiber_aneginfo {
2852 int state;
2853#define ANEG_STATE_UNKNOWN 0
2854#define ANEG_STATE_AN_ENABLE 1
2855#define ANEG_STATE_RESTART_INIT 2
2856#define ANEG_STATE_RESTART 3
2857#define ANEG_STATE_DISABLE_LINK_OK 4
2858#define ANEG_STATE_ABILITY_DETECT_INIT 5
2859#define ANEG_STATE_ABILITY_DETECT 6
2860#define ANEG_STATE_ACK_DETECT_INIT 7
2861#define ANEG_STATE_ACK_DETECT 8
2862#define ANEG_STATE_COMPLETE_ACK_INIT 9
2863#define ANEG_STATE_COMPLETE_ACK 10
2864#define ANEG_STATE_IDLE_DETECT_INIT 11
2865#define ANEG_STATE_IDLE_DETECT 12
2866#define ANEG_STATE_LINK_OK 13
2867#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2868#define ANEG_STATE_NEXT_PAGE_WAIT 15
2869
2870 u32 flags;
2871#define MR_AN_ENABLE 0x00000001
2872#define MR_RESTART_AN 0x00000002
2873#define MR_AN_COMPLETE 0x00000004
2874#define MR_PAGE_RX 0x00000008
2875#define MR_NP_LOADED 0x00000010
2876#define MR_TOGGLE_TX 0x00000020
2877#define MR_LP_ADV_FULL_DUPLEX 0x00000040
2878#define MR_LP_ADV_HALF_DUPLEX 0x00000080
2879#define MR_LP_ADV_SYM_PAUSE 0x00000100
2880#define MR_LP_ADV_ASYM_PAUSE 0x00000200
2881#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2882#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2883#define MR_LP_ADV_NEXT_PAGE 0x00001000
2884#define MR_TOGGLE_RX 0x00002000
2885#define MR_NP_RX 0x00004000
2886
2887#define MR_LINK_OK 0x80000000
2888
2889 unsigned long link_time, cur_time;
2890
2891 u32 ability_match_cfg;
2892 int ability_match_count;
2893
2894 char ability_match, idle_match, ack_match;
2895
2896 u32 txconfig, rxconfig;
2897#define ANEG_CFG_NP 0x00000080
2898#define ANEG_CFG_ACK 0x00000040
2899#define ANEG_CFG_RF2 0x00000020
2900#define ANEG_CFG_RF1 0x00000010
2901#define ANEG_CFG_PS2 0x00000001
2902#define ANEG_CFG_PS1 0x00008000
2903#define ANEG_CFG_HD 0x00004000
2904#define ANEG_CFG_FD 0x00002000
2905#define ANEG_CFG_INVAL 0x00001f06
2906
2907};
2908#define ANEG_OK 0
2909#define ANEG_DONE 1
2910#define ANEG_TIMER_ENAB 2
2911#define ANEG_FAILED -1
2912
2913#define ANEG_STATE_SETTLE_TIME 10000
2914
2915static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2916 struct tg3_fiber_aneginfo *ap)
2917{
Matt Carlson5be73b42007-12-20 20:09:29 -08002918 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919 unsigned long delta;
2920 u32 rx_cfg_reg;
2921 int ret;
2922
2923 if (ap->state == ANEG_STATE_UNKNOWN) {
2924 ap->rxconfig = 0;
2925 ap->link_time = 0;
2926 ap->cur_time = 0;
2927 ap->ability_match_cfg = 0;
2928 ap->ability_match_count = 0;
2929 ap->ability_match = 0;
2930 ap->idle_match = 0;
2931 ap->ack_match = 0;
2932 }
2933 ap->cur_time++;
2934
2935 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2936 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2937
2938 if (rx_cfg_reg != ap->ability_match_cfg) {
2939 ap->ability_match_cfg = rx_cfg_reg;
2940 ap->ability_match = 0;
2941 ap->ability_match_count = 0;
2942 } else {
2943 if (++ap->ability_match_count > 1) {
2944 ap->ability_match = 1;
2945 ap->ability_match_cfg = rx_cfg_reg;
2946 }
2947 }
2948 if (rx_cfg_reg & ANEG_CFG_ACK)
2949 ap->ack_match = 1;
2950 else
2951 ap->ack_match = 0;
2952
2953 ap->idle_match = 0;
2954 } else {
2955 ap->idle_match = 1;
2956 ap->ability_match_cfg = 0;
2957 ap->ability_match_count = 0;
2958 ap->ability_match = 0;
2959 ap->ack_match = 0;
2960
2961 rx_cfg_reg = 0;
2962 }
2963
2964 ap->rxconfig = rx_cfg_reg;
2965 ret = ANEG_OK;
2966
2967 switch(ap->state) {
2968 case ANEG_STATE_UNKNOWN:
2969 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2970 ap->state = ANEG_STATE_AN_ENABLE;
2971
2972 /* fallthru */
2973 case ANEG_STATE_AN_ENABLE:
2974 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2975 if (ap->flags & MR_AN_ENABLE) {
2976 ap->link_time = 0;
2977 ap->cur_time = 0;
2978 ap->ability_match_cfg = 0;
2979 ap->ability_match_count = 0;
2980 ap->ability_match = 0;
2981 ap->idle_match = 0;
2982 ap->ack_match = 0;
2983
2984 ap->state = ANEG_STATE_RESTART_INIT;
2985 } else {
2986 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2987 }
2988 break;
2989
2990 case ANEG_STATE_RESTART_INIT:
2991 ap->link_time = ap->cur_time;
2992 ap->flags &= ~(MR_NP_LOADED);
2993 ap->txconfig = 0;
2994 tw32(MAC_TX_AUTO_NEG, 0);
2995 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2996 tw32_f(MAC_MODE, tp->mac_mode);
2997 udelay(40);
2998
2999 ret = ANEG_TIMER_ENAB;
3000 ap->state = ANEG_STATE_RESTART;
3001
3002 /* fallthru */
3003 case ANEG_STATE_RESTART:
3004 delta = ap->cur_time - ap->link_time;
3005 if (delta > ANEG_STATE_SETTLE_TIME) {
3006 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3007 } else {
3008 ret = ANEG_TIMER_ENAB;
3009 }
3010 break;
3011
3012 case ANEG_STATE_DISABLE_LINK_OK:
3013 ret = ANEG_DONE;
3014 break;
3015
3016 case ANEG_STATE_ABILITY_DETECT_INIT:
3017 ap->flags &= ~(MR_TOGGLE_TX);
Matt Carlson5be73b42007-12-20 20:09:29 -08003018 ap->txconfig = ANEG_CFG_FD;
3019 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3020 if (flowctrl & ADVERTISE_1000XPAUSE)
3021 ap->txconfig |= ANEG_CFG_PS1;
3022 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3023 ap->txconfig |= ANEG_CFG_PS2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3025 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3026 tw32_f(MAC_MODE, tp->mac_mode);
3027 udelay(40);
3028
3029 ap->state = ANEG_STATE_ABILITY_DETECT;
3030 break;
3031
3032 case ANEG_STATE_ABILITY_DETECT:
3033 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3034 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3035 }
3036 break;
3037
3038 case ANEG_STATE_ACK_DETECT_INIT:
3039 ap->txconfig |= ANEG_CFG_ACK;
3040 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3041 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3042 tw32_f(MAC_MODE, tp->mac_mode);
3043 udelay(40);
3044
3045 ap->state = ANEG_STATE_ACK_DETECT;
3046
3047 /* fallthru */
3048 case ANEG_STATE_ACK_DETECT:
3049 if (ap->ack_match != 0) {
3050 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3051 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3052 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3053 } else {
3054 ap->state = ANEG_STATE_AN_ENABLE;
3055 }
3056 } else if (ap->ability_match != 0 &&
3057 ap->rxconfig == 0) {
3058 ap->state = ANEG_STATE_AN_ENABLE;
3059 }
3060 break;
3061
3062 case ANEG_STATE_COMPLETE_ACK_INIT:
3063 if (ap->rxconfig & ANEG_CFG_INVAL) {
3064 ret = ANEG_FAILED;
3065 break;
3066 }
3067 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3068 MR_LP_ADV_HALF_DUPLEX |
3069 MR_LP_ADV_SYM_PAUSE |
3070 MR_LP_ADV_ASYM_PAUSE |
3071 MR_LP_ADV_REMOTE_FAULT1 |
3072 MR_LP_ADV_REMOTE_FAULT2 |
3073 MR_LP_ADV_NEXT_PAGE |
3074 MR_TOGGLE_RX |
3075 MR_NP_RX);
3076 if (ap->rxconfig & ANEG_CFG_FD)
3077 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3078 if (ap->rxconfig & ANEG_CFG_HD)
3079 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3080 if (ap->rxconfig & ANEG_CFG_PS1)
3081 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3082 if (ap->rxconfig & ANEG_CFG_PS2)
3083 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3084 if (ap->rxconfig & ANEG_CFG_RF1)
3085 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3086 if (ap->rxconfig & ANEG_CFG_RF2)
3087 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3088 if (ap->rxconfig & ANEG_CFG_NP)
3089 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3090
3091 ap->link_time = ap->cur_time;
3092
3093 ap->flags ^= (MR_TOGGLE_TX);
3094 if (ap->rxconfig & 0x0008)
3095 ap->flags |= MR_TOGGLE_RX;
3096 if (ap->rxconfig & ANEG_CFG_NP)
3097 ap->flags |= MR_NP_RX;
3098 ap->flags |= MR_PAGE_RX;
3099
3100 ap->state = ANEG_STATE_COMPLETE_ACK;
3101 ret = ANEG_TIMER_ENAB;
3102 break;
3103
3104 case ANEG_STATE_COMPLETE_ACK:
3105 if (ap->ability_match != 0 &&
3106 ap->rxconfig == 0) {
3107 ap->state = ANEG_STATE_AN_ENABLE;
3108 break;
3109 }
3110 delta = ap->cur_time - ap->link_time;
3111 if (delta > ANEG_STATE_SETTLE_TIME) {
3112 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3113 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3114 } else {
3115 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3116 !(ap->flags & MR_NP_RX)) {
3117 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3118 } else {
3119 ret = ANEG_FAILED;
3120 }
3121 }
3122 }
3123 break;
3124
3125 case ANEG_STATE_IDLE_DETECT_INIT:
3126 ap->link_time = ap->cur_time;
3127 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3128 tw32_f(MAC_MODE, tp->mac_mode);
3129 udelay(40);
3130
3131 ap->state = ANEG_STATE_IDLE_DETECT;
3132 ret = ANEG_TIMER_ENAB;
3133 break;
3134
3135 case ANEG_STATE_IDLE_DETECT:
3136 if (ap->ability_match != 0 &&
3137 ap->rxconfig == 0) {
3138 ap->state = ANEG_STATE_AN_ENABLE;
3139 break;
3140 }
3141 delta = ap->cur_time - ap->link_time;
3142 if (delta > ANEG_STATE_SETTLE_TIME) {
3143 /* XXX another gem from the Broadcom driver :( */
3144 ap->state = ANEG_STATE_LINK_OK;
3145 }
3146 break;
3147
3148 case ANEG_STATE_LINK_OK:
3149 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3150 ret = ANEG_DONE;
3151 break;
3152
3153 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3154 /* ??? unimplemented */
3155 break;
3156
3157 case ANEG_STATE_NEXT_PAGE_WAIT:
3158 /* ??? unimplemented */
3159 break;
3160
3161 default:
3162 ret = ANEG_FAILED;
3163 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07003164 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165
3166 return ret;
3167}
3168
Matt Carlson5be73b42007-12-20 20:09:29 -08003169static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003170{
3171 int res = 0;
3172 struct tg3_fiber_aneginfo aninfo;
3173 int status = ANEG_FAILED;
3174 unsigned int tick;
3175 u32 tmp;
3176
3177 tw32_f(MAC_TX_AUTO_NEG, 0);
3178
3179 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3180 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3181 udelay(40);
3182
3183 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3184 udelay(40);
3185
3186 memset(&aninfo, 0, sizeof(aninfo));
3187 aninfo.flags |= MR_AN_ENABLE;
3188 aninfo.state = ANEG_STATE_UNKNOWN;
3189 aninfo.cur_time = 0;
3190 tick = 0;
3191 while (++tick < 195000) {
3192 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3193 if (status == ANEG_DONE || status == ANEG_FAILED)
3194 break;
3195
3196 udelay(1);
3197 }
3198
3199 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3200 tw32_f(MAC_MODE, tp->mac_mode);
3201 udelay(40);
3202
Matt Carlson5be73b42007-12-20 20:09:29 -08003203 *txflags = aninfo.txconfig;
3204 *rxflags = aninfo.flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205
3206 if (status == ANEG_DONE &&
3207 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3208 MR_LP_ADV_FULL_DUPLEX)))
3209 res = 1;
3210
3211 return res;
3212}
3213
3214static void tg3_init_bcm8002(struct tg3 *tp)
3215{
3216 u32 mac_status = tr32(MAC_STATUS);
3217 int i;
3218
3219 /* Reset when initting first time or we have a link. */
3220 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3221 !(mac_status & MAC_STATUS_PCS_SYNCED))
3222 return;
3223
3224 /* Set PLL lock range. */
3225 tg3_writephy(tp, 0x16, 0x8007);
3226
3227 /* SW reset */
3228 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3229
3230 /* Wait for reset to complete. */
3231 /* XXX schedule_timeout() ... */
3232 for (i = 0; i < 500; i++)
3233 udelay(10);
3234
3235 /* Config mode; select PMA/Ch 1 regs. */
3236 tg3_writephy(tp, 0x10, 0x8411);
3237
3238 /* Enable auto-lock and comdet, select txclk for tx. */
3239 tg3_writephy(tp, 0x11, 0x0a10);
3240
3241 tg3_writephy(tp, 0x18, 0x00a0);
3242 tg3_writephy(tp, 0x16, 0x41ff);
3243
3244 /* Assert and deassert POR. */
3245 tg3_writephy(tp, 0x13, 0x0400);
3246 udelay(40);
3247 tg3_writephy(tp, 0x13, 0x0000);
3248
3249 tg3_writephy(tp, 0x11, 0x0a50);
3250 udelay(40);
3251 tg3_writephy(tp, 0x11, 0x0a10);
3252
3253 /* Wait for signal to stabilize */
3254 /* XXX schedule_timeout() ... */
3255 for (i = 0; i < 15000; i++)
3256 udelay(10);
3257
3258 /* Deselect the channel register so we can read the PHYID
3259 * later.
3260 */
3261 tg3_writephy(tp, 0x10, 0x8011);
3262}
3263
3264static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3265{
Matt Carlson82cd3d12007-12-20 20:09:00 -08003266 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267 u32 sg_dig_ctrl, sg_dig_status;
3268 u32 serdes_cfg, expected_sg_dig_ctrl;
3269 int workaround, port_a;
3270 int current_link_up;
3271
3272 serdes_cfg = 0;
3273 expected_sg_dig_ctrl = 0;
3274 workaround = 0;
3275 port_a = 1;
3276 current_link_up = 0;
3277
3278 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3279 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3280 workaround = 1;
3281 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3282 port_a = 0;
3283
3284 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3285 /* preserve bits 20-23 for voltage regulator */
3286 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3287 }
3288
3289 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3290
3291 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003292 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003293 if (workaround) {
3294 u32 val = serdes_cfg;
3295
3296 if (port_a)
3297 val |= 0xc010000;
3298 else
3299 val |= 0x4010000;
3300 tw32_f(MAC_SERDES_CFG, val);
3301 }
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003302
3303 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003304 }
3305 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3306 tg3_setup_flow_control(tp, 0, 0);
3307 current_link_up = 1;
3308 }
3309 goto out;
3310 }
3311
3312 /* Want auto-negotiation. */
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003313 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003314
Matt Carlson82cd3d12007-12-20 20:09:00 -08003315 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3316 if (flowctrl & ADVERTISE_1000XPAUSE)
3317 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3318 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3319 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320
3321 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003322 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3323 tp->serdes_counter &&
3324 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3325 MAC_STATUS_RCVD_CFG)) ==
3326 MAC_STATUS_PCS_SYNCED)) {
3327 tp->serdes_counter--;
3328 current_link_up = 1;
3329 goto out;
3330 }
3331restart_autoneg:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003332 if (workaround)
3333 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003334 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003335 udelay(5);
3336 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3337
Michael Chan3d3ebe72006-09-27 15:59:15 -07003338 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3339 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003340 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3341 MAC_STATUS_SIGNAL_DET)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003342 sg_dig_status = tr32(SG_DIG_STATUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343 mac_status = tr32(MAC_STATUS);
3344
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003345 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003346 (mac_status & MAC_STATUS_PCS_SYNCED)) {
Matt Carlson82cd3d12007-12-20 20:09:00 -08003347 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348
Matt Carlson82cd3d12007-12-20 20:09:00 -08003349 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3350 local_adv |= ADVERTISE_1000XPAUSE;
3351 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3352 local_adv |= ADVERTISE_1000XPSE_ASYM;
3353
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003354 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08003355 remote_adv |= LPA_1000XPAUSE;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003356 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08003357 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358
3359 tg3_setup_flow_control(tp, local_adv, remote_adv);
3360 current_link_up = 1;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003361 tp->serdes_counter = 0;
3362 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003363 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003364 if (tp->serdes_counter)
3365 tp->serdes_counter--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366 else {
3367 if (workaround) {
3368 u32 val = serdes_cfg;
3369
3370 if (port_a)
3371 val |= 0xc010000;
3372 else
3373 val |= 0x4010000;
3374
3375 tw32_f(MAC_SERDES_CFG, val);
3376 }
3377
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003378 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379 udelay(40);
3380
3381 /* Link parallel detection - link is up */
3382 /* only if we have PCS_SYNC and not */
3383 /* receiving config code words */
3384 mac_status = tr32(MAC_STATUS);
3385 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3386 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3387 tg3_setup_flow_control(tp, 0, 0);
3388 current_link_up = 1;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003389 tp->tg3_flags2 |=
3390 TG3_FLG2_PARALLEL_DETECT;
3391 tp->serdes_counter =
3392 SERDES_PARALLEL_DET_TIMEOUT;
3393 } else
3394 goto restart_autoneg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003395 }
3396 }
Michael Chan3d3ebe72006-09-27 15:59:15 -07003397 } else {
3398 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3399 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003400 }
3401
3402out:
3403 return current_link_up;
3404}
3405
3406static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3407{
3408 int current_link_up = 0;
3409
Michael Chan5cf64b82007-05-05 12:11:21 -07003410 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412
3413 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
Matt Carlson5be73b42007-12-20 20:09:29 -08003414 u32 txflags, rxflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003416
Matt Carlson5be73b42007-12-20 20:09:29 -08003417 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3418 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419
Matt Carlson5be73b42007-12-20 20:09:29 -08003420 if (txflags & ANEG_CFG_PS1)
3421 local_adv |= ADVERTISE_1000XPAUSE;
3422 if (txflags & ANEG_CFG_PS2)
3423 local_adv |= ADVERTISE_1000XPSE_ASYM;
3424
3425 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3426 remote_adv |= LPA_1000XPAUSE;
3427 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3428 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429
3430 tg3_setup_flow_control(tp, local_adv, remote_adv);
3431
Linus Torvalds1da177e2005-04-16 15:20:36 -07003432 current_link_up = 1;
3433 }
3434 for (i = 0; i < 30; i++) {
3435 udelay(20);
3436 tw32_f(MAC_STATUS,
3437 (MAC_STATUS_SYNC_CHANGED |
3438 MAC_STATUS_CFG_CHANGED));
3439 udelay(40);
3440 if ((tr32(MAC_STATUS) &
3441 (MAC_STATUS_SYNC_CHANGED |
3442 MAC_STATUS_CFG_CHANGED)) == 0)
3443 break;
3444 }
3445
3446 mac_status = tr32(MAC_STATUS);
3447 if (current_link_up == 0 &&
3448 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3449 !(mac_status & MAC_STATUS_RCVD_CFG))
3450 current_link_up = 1;
3451 } else {
Matt Carlson5be73b42007-12-20 20:09:29 -08003452 tg3_setup_flow_control(tp, 0, 0);
3453
Linus Torvalds1da177e2005-04-16 15:20:36 -07003454 /* Forcing 1000FD link up. */
3455 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003456
3457 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3458 udelay(40);
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07003459
3460 tw32_f(MAC_MODE, tp->mac_mode);
3461 udelay(40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462 }
3463
3464out:
3465 return current_link_up;
3466}
3467
3468static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3469{
3470 u32 orig_pause_cfg;
3471 u16 orig_active_speed;
3472 u8 orig_active_duplex;
3473 u32 mac_status;
3474 int current_link_up;
3475 int i;
3476
Matt Carlson8d018622007-12-20 20:05:44 -08003477 orig_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478 orig_active_speed = tp->link_config.active_speed;
3479 orig_active_duplex = tp->link_config.active_duplex;
3480
3481 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3482 netif_carrier_ok(tp->dev) &&
3483 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3484 mac_status = tr32(MAC_STATUS);
3485 mac_status &= (MAC_STATUS_PCS_SYNCED |
3486 MAC_STATUS_SIGNAL_DET |
3487 MAC_STATUS_CFG_CHANGED |
3488 MAC_STATUS_RCVD_CFG);
3489 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3490 MAC_STATUS_SIGNAL_DET)) {
3491 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3492 MAC_STATUS_CFG_CHANGED));
3493 return 0;
3494 }
3495 }
3496
3497 tw32_f(MAC_TX_AUTO_NEG, 0);
3498
3499 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3500 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3501 tw32_f(MAC_MODE, tp->mac_mode);
3502 udelay(40);
3503
3504 if (tp->phy_id == PHY_ID_BCM8002)
3505 tg3_init_bcm8002(tp);
3506
3507 /* Enable link change event even when serdes polling. */
3508 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3509 udelay(40);
3510
3511 current_link_up = 0;
3512 mac_status = tr32(MAC_STATUS);
3513
3514 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3515 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3516 else
3517 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3518
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519 tp->hw_status->status =
3520 (SD_STATUS_UPDATED |
3521 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3522
3523 for (i = 0; i < 100; i++) {
3524 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3525 MAC_STATUS_CFG_CHANGED));
3526 udelay(5);
3527 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
Michael Chan3d3ebe72006-09-27 15:59:15 -07003528 MAC_STATUS_CFG_CHANGED |
3529 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003530 break;
3531 }
3532
3533 mac_status = tr32(MAC_STATUS);
3534 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3535 current_link_up = 0;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003536 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3537 tp->serdes_counter == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538 tw32_f(MAC_MODE, (tp->mac_mode |
3539 MAC_MODE_SEND_CONFIGS));
3540 udelay(1);
3541 tw32_f(MAC_MODE, tp->mac_mode);
3542 }
3543 }
3544
3545 if (current_link_up == 1) {
3546 tp->link_config.active_speed = SPEED_1000;
3547 tp->link_config.active_duplex = DUPLEX_FULL;
3548 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3549 LED_CTRL_LNKLED_OVERRIDE |
3550 LED_CTRL_1000MBPS_ON));
3551 } else {
3552 tp->link_config.active_speed = SPEED_INVALID;
3553 tp->link_config.active_duplex = DUPLEX_INVALID;
3554 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3555 LED_CTRL_LNKLED_OVERRIDE |
3556 LED_CTRL_TRAFFIC_OVERRIDE));
3557 }
3558
3559 if (current_link_up != netif_carrier_ok(tp->dev)) {
3560 if (current_link_up)
3561 netif_carrier_on(tp->dev);
3562 else
3563 netif_carrier_off(tp->dev);
3564 tg3_link_report(tp);
3565 } else {
Matt Carlson8d018622007-12-20 20:05:44 -08003566 u32 now_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003567 if (orig_pause_cfg != now_pause_cfg ||
3568 orig_active_speed != tp->link_config.active_speed ||
3569 orig_active_duplex != tp->link_config.active_duplex)
3570 tg3_link_report(tp);
3571 }
3572
3573 return 0;
3574}
3575
Michael Chan747e8f82005-07-25 12:33:22 -07003576static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3577{
3578 int current_link_up, err = 0;
3579 u32 bmsr, bmcr;
3580 u16 current_speed;
3581 u8 current_duplex;
Matt Carlsonef167e22007-12-20 20:10:01 -08003582 u32 local_adv, remote_adv;
Michael Chan747e8f82005-07-25 12:33:22 -07003583
3584 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3585 tw32_f(MAC_MODE, tp->mac_mode);
3586 udelay(40);
3587
3588 tw32(MAC_EVENT, 0);
3589
3590 tw32_f(MAC_STATUS,
3591 (MAC_STATUS_SYNC_CHANGED |
3592 MAC_STATUS_CFG_CHANGED |
3593 MAC_STATUS_MI_COMPLETION |
3594 MAC_STATUS_LNKSTATE_CHANGED));
3595 udelay(40);
3596
3597 if (force_reset)
3598 tg3_phy_reset(tp);
3599
3600 current_link_up = 0;
3601 current_speed = SPEED_INVALID;
3602 current_duplex = DUPLEX_INVALID;
3603
3604 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3605 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08003606 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3607 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3608 bmsr |= BMSR_LSTATUS;
3609 else
3610 bmsr &= ~BMSR_LSTATUS;
3611 }
Michael Chan747e8f82005-07-25 12:33:22 -07003612
3613 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3614
3615 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
Matt Carlson2bd3ed02008-06-09 15:39:55 -07003616 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
Michael Chan747e8f82005-07-25 12:33:22 -07003617 /* do nothing, just check for link up at the end */
3618 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3619 u32 adv, new_adv;
3620
3621 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3622 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3623 ADVERTISE_1000XPAUSE |
3624 ADVERTISE_1000XPSE_ASYM |
3625 ADVERTISE_SLCT);
3626
Matt Carlsonba4d07a2007-12-20 20:08:00 -08003627 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
Michael Chan747e8f82005-07-25 12:33:22 -07003628
3629 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3630 new_adv |= ADVERTISE_1000XHALF;
3631 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3632 new_adv |= ADVERTISE_1000XFULL;
3633
3634 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3635 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3636 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3637 tg3_writephy(tp, MII_BMCR, bmcr);
3638
3639 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
Michael Chan3d3ebe72006-09-27 15:59:15 -07003640 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
Michael Chan747e8f82005-07-25 12:33:22 -07003641 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3642
3643 return err;
3644 }
3645 } else {
3646 u32 new_bmcr;
3647
3648 bmcr &= ~BMCR_SPEED1000;
3649 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3650
3651 if (tp->link_config.duplex == DUPLEX_FULL)
3652 new_bmcr |= BMCR_FULLDPLX;
3653
3654 if (new_bmcr != bmcr) {
3655 /* BMCR_SPEED1000 is a reserved bit that needs
3656 * to be set on write.
3657 */
3658 new_bmcr |= BMCR_SPEED1000;
3659
3660 /* Force a linkdown */
3661 if (netif_carrier_ok(tp->dev)) {
3662 u32 adv;
3663
3664 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3665 adv &= ~(ADVERTISE_1000XFULL |
3666 ADVERTISE_1000XHALF |
3667 ADVERTISE_SLCT);
3668 tg3_writephy(tp, MII_ADVERTISE, adv);
3669 tg3_writephy(tp, MII_BMCR, bmcr |
3670 BMCR_ANRESTART |
3671 BMCR_ANENABLE);
3672 udelay(10);
3673 netif_carrier_off(tp->dev);
3674 }
3675 tg3_writephy(tp, MII_BMCR, new_bmcr);
3676 bmcr = new_bmcr;
3677 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3678 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08003679 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3680 ASIC_REV_5714) {
3681 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3682 bmsr |= BMSR_LSTATUS;
3683 else
3684 bmsr &= ~BMSR_LSTATUS;
3685 }
Michael Chan747e8f82005-07-25 12:33:22 -07003686 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3687 }
3688 }
3689
3690 if (bmsr & BMSR_LSTATUS) {
3691 current_speed = SPEED_1000;
3692 current_link_up = 1;
3693 if (bmcr & BMCR_FULLDPLX)
3694 current_duplex = DUPLEX_FULL;
3695 else
3696 current_duplex = DUPLEX_HALF;
3697
Matt Carlsonef167e22007-12-20 20:10:01 -08003698 local_adv = 0;
3699 remote_adv = 0;
3700
Michael Chan747e8f82005-07-25 12:33:22 -07003701 if (bmcr & BMCR_ANENABLE) {
Matt Carlsonef167e22007-12-20 20:10:01 -08003702 u32 common;
Michael Chan747e8f82005-07-25 12:33:22 -07003703
3704 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3705 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3706 common = local_adv & remote_adv;
3707 if (common & (ADVERTISE_1000XHALF |
3708 ADVERTISE_1000XFULL)) {
3709 if (common & ADVERTISE_1000XFULL)
3710 current_duplex = DUPLEX_FULL;
3711 else
3712 current_duplex = DUPLEX_HALF;
Michael Chan747e8f82005-07-25 12:33:22 -07003713 }
3714 else
3715 current_link_up = 0;
3716 }
3717 }
3718
Matt Carlsonef167e22007-12-20 20:10:01 -08003719 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3720 tg3_setup_flow_control(tp, local_adv, remote_adv);
3721
Michael Chan747e8f82005-07-25 12:33:22 -07003722 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3723 if (tp->link_config.active_duplex == DUPLEX_HALF)
3724 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3725
3726 tw32_f(MAC_MODE, tp->mac_mode);
3727 udelay(40);
3728
3729 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3730
3731 tp->link_config.active_speed = current_speed;
3732 tp->link_config.active_duplex = current_duplex;
3733
3734 if (current_link_up != netif_carrier_ok(tp->dev)) {
3735 if (current_link_up)
3736 netif_carrier_on(tp->dev);
3737 else {
3738 netif_carrier_off(tp->dev);
3739 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3740 }
3741 tg3_link_report(tp);
3742 }
3743 return err;
3744}
3745
3746static void tg3_serdes_parallel_detect(struct tg3 *tp)
3747{
Michael Chan3d3ebe72006-09-27 15:59:15 -07003748 if (tp->serdes_counter) {
Michael Chan747e8f82005-07-25 12:33:22 -07003749 /* Give autoneg time to complete. */
Michael Chan3d3ebe72006-09-27 15:59:15 -07003750 tp->serdes_counter--;
Michael Chan747e8f82005-07-25 12:33:22 -07003751 return;
3752 }
3753 if (!netif_carrier_ok(tp->dev) &&
3754 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3755 u32 bmcr;
3756
3757 tg3_readphy(tp, MII_BMCR, &bmcr);
3758 if (bmcr & BMCR_ANENABLE) {
3759 u32 phy1, phy2;
3760
3761 /* Select shadow register 0x1f */
3762 tg3_writephy(tp, 0x1c, 0x7c00);
3763 tg3_readphy(tp, 0x1c, &phy1);
3764
3765 /* Select expansion interrupt status register */
3766 tg3_writephy(tp, 0x17, 0x0f01);
3767 tg3_readphy(tp, 0x15, &phy2);
3768 tg3_readphy(tp, 0x15, &phy2);
3769
3770 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3771 /* We have signal detect and not receiving
3772 * config code words, link is up by parallel
3773 * detection.
3774 */
3775
3776 bmcr &= ~BMCR_ANENABLE;
3777 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3778 tg3_writephy(tp, MII_BMCR, bmcr);
3779 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3780 }
3781 }
3782 }
3783 else if (netif_carrier_ok(tp->dev) &&
3784 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3785 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3786 u32 phy2;
3787
3788 /* Select expansion interrupt status register */
3789 tg3_writephy(tp, 0x17, 0x0f01);
3790 tg3_readphy(tp, 0x15, &phy2);
3791 if (phy2 & 0x20) {
3792 u32 bmcr;
3793
3794 /* Config code words received, turn on autoneg. */
3795 tg3_readphy(tp, MII_BMCR, &bmcr);
3796 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3797
3798 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3799
3800 }
3801 }
3802}
3803
Linus Torvalds1da177e2005-04-16 15:20:36 -07003804static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3805{
3806 int err;
3807
3808 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3809 err = tg3_setup_fiber_phy(tp, force_reset);
Michael Chan747e8f82005-07-25 12:33:22 -07003810 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3811 err = tg3_setup_fiber_mii_phy(tp, force_reset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003812 } else {
3813 err = tg3_setup_copper_phy(tp, force_reset);
3814 }
3815
Matt Carlsonbcb37f62008-11-03 16:52:09 -08003816 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
Matt Carlsonaa6c91f2007-11-12 21:18:04 -08003817 u32 val, scale;
3818
3819 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3820 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3821 scale = 65;
3822 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3823 scale = 6;
3824 else
3825 scale = 12;
3826
3827 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3828 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3829 tw32(GRC_MISC_CFG, val);
3830 }
3831
Linus Torvalds1da177e2005-04-16 15:20:36 -07003832 if (tp->link_config.active_speed == SPEED_1000 &&
3833 tp->link_config.active_duplex == DUPLEX_HALF)
3834 tw32(MAC_TX_LENGTHS,
3835 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3836 (6 << TX_LENGTHS_IPG_SHIFT) |
3837 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3838 else
3839 tw32(MAC_TX_LENGTHS,
3840 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3841 (6 << TX_LENGTHS_IPG_SHIFT) |
3842 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3843
3844 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3845 if (netif_carrier_ok(tp->dev)) {
3846 tw32(HOSTCC_STAT_COAL_TICKS,
David S. Miller15f98502005-05-18 22:49:26 -07003847 tp->coal.stats_block_coalesce_usecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003848 } else {
3849 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3850 }
3851 }
3852
Matt Carlson8ed5d972007-05-07 00:25:49 -07003853 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3854 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3855 if (!netif_carrier_ok(tp->dev))
3856 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3857 tp->pwrmgmt_thresh;
3858 else
3859 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3860 tw32(PCIE_PWR_MGMT_THRESH, val);
3861 }
3862
Linus Torvalds1da177e2005-04-16 15:20:36 -07003863 return err;
3864}
3865
Michael Chandf3e6542006-05-26 17:48:07 -07003866/* This is called whenever we suspect that the system chipset is re-
3867 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3868 * is bogus tx completions. We try to recover by setting the
3869 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3870 * in the workqueue.
3871 */
3872static void tg3_tx_recover(struct tg3 *tp)
3873{
3874 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3875 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3876
3877 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3878 "mapped I/O cycles to the network device, attempting to "
3879 "recover. Please report the problem to the driver maintainer "
3880 "and include system chipset information.\n", tp->dev->name);
3881
3882 spin_lock(&tp->lock);
Michael Chandf3e6542006-05-26 17:48:07 -07003883 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
Michael Chandf3e6542006-05-26 17:48:07 -07003884 spin_unlock(&tp->lock);
3885}
3886
Michael Chan1b2a7202006-08-07 21:46:02 -07003887static inline u32 tg3_tx_avail(struct tg3 *tp)
3888{
3889 smp_mb();
3890 return (tp->tx_pending -
3891 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3892}
3893
Linus Torvalds1da177e2005-04-16 15:20:36 -07003894/* Tigon3 never reports partial packet sends. So we do not
3895 * need special logic to handle SKBs that have not had all
3896 * of their frags sent yet, like SunGEM does.
3897 */
3898static void tg3_tx(struct tg3 *tp)
3899{
3900 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3901 u32 sw_idx = tp->tx_cons;
3902
3903 while (sw_idx != hw_idx) {
3904 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3905 struct sk_buff *skb = ri->skb;
Michael Chandf3e6542006-05-26 17:48:07 -07003906 int i, tx_bug = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003907
Michael Chandf3e6542006-05-26 17:48:07 -07003908 if (unlikely(skb == NULL)) {
3909 tg3_tx_recover(tp);
3910 return;
3911 }
3912
David S. Miller90079ce2008-09-11 04:52:51 -07003913 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003914
3915 ri->skb = NULL;
3916
3917 sw_idx = NEXT_TX(sw_idx);
3918
3919 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003920 ri = &tp->tx_buffers[sw_idx];
Michael Chandf3e6542006-05-26 17:48:07 -07003921 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3922 tx_bug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003923 sw_idx = NEXT_TX(sw_idx);
3924 }
3925
David S. Millerf47c11e2005-06-24 20:18:35 -07003926 dev_kfree_skb(skb);
Michael Chandf3e6542006-05-26 17:48:07 -07003927
3928 if (unlikely(tx_bug)) {
3929 tg3_tx_recover(tp);
3930 return;
3931 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003932 }
3933
3934 tp->tx_cons = sw_idx;
3935
Michael Chan1b2a7202006-08-07 21:46:02 -07003936 /* Need to make the tx_cons update visible to tg3_start_xmit()
3937 * before checking for netif_queue_stopped(). Without the
3938 * memory barrier, there is a small possibility that tg3_start_xmit()
3939 * will miss it and cause the queue to be stopped forever.
3940 */
3941 smp_mb();
3942
3943 if (unlikely(netif_queue_stopped(tp->dev) &&
Ranjit Manomohan42952232006-10-18 20:54:26 -07003944 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
Michael Chan1b2a7202006-08-07 21:46:02 -07003945 netif_tx_lock(tp->dev);
Michael Chan51b91462005-09-01 17:41:28 -07003946 if (netif_queue_stopped(tp->dev) &&
Ranjit Manomohan42952232006-10-18 20:54:26 -07003947 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
Michael Chan51b91462005-09-01 17:41:28 -07003948 netif_wake_queue(tp->dev);
Michael Chan1b2a7202006-08-07 21:46:02 -07003949 netif_tx_unlock(tp->dev);
Michael Chan51b91462005-09-01 17:41:28 -07003950 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003951}
3952
3953/* Returns size of skb allocated or < 0 on error.
3954 *
3955 * We only need to fill in the address because the other members
3956 * of the RX descriptor are invariant, see tg3_init_rings.
3957 *
3958 * Note the purposeful assymetry of cpu vs. chip accesses. For
3959 * posting buffers we only dirty the first cache line of the RX
3960 * descriptor (containing the address). Whereas for the RX status
3961 * buffers the cpu only reads the last cacheline of the RX descriptor
3962 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3963 */
3964static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3965 int src_idx, u32 dest_idx_unmasked)
3966{
3967 struct tg3_rx_buffer_desc *desc;
3968 struct ring_info *map, *src_map;
3969 struct sk_buff *skb;
3970 dma_addr_t mapping;
3971 int skb_size, dest_idx;
3972
3973 src_map = NULL;
3974 switch (opaque_key) {
3975 case RXD_OPAQUE_RING_STD:
3976 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3977 desc = &tp->rx_std[dest_idx];
3978 map = &tp->rx_std_buffers[dest_idx];
3979 if (src_idx >= 0)
3980 src_map = &tp->rx_std_buffers[src_idx];
Michael Chan7e72aad2005-07-25 12:31:17 -07003981 skb_size = tp->rx_pkt_buf_sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003982 break;
3983
3984 case RXD_OPAQUE_RING_JUMBO:
3985 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3986 desc = &tp->rx_jumbo[dest_idx];
3987 map = &tp->rx_jumbo_buffers[dest_idx];
3988 if (src_idx >= 0)
3989 src_map = &tp->rx_jumbo_buffers[src_idx];
3990 skb_size = RX_JUMBO_PKT_BUF_SZ;
3991 break;
3992
3993 default:
3994 return -EINVAL;
Stephen Hemminger855e1112008-04-16 16:37:28 -07003995 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003996
3997 /* Do not overwrite any of the map or rp information
3998 * until we are sure we can commit to a new buffer.
3999 *
4000 * Callers depend upon this behavior and assume that
4001 * we leave everything unchanged if we fail.
4002 */
David S. Millera20e9c62006-07-31 22:38:16 -07004003 skb = netdev_alloc_skb(tp->dev, skb_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004004 if (skb == NULL)
4005 return -ENOMEM;
4006
Linus Torvalds1da177e2005-04-16 15:20:36 -07004007 skb_reserve(skb, tp->rx_offset);
4008
4009 mapping = pci_map_single(tp->pdev, skb->data,
4010 skb_size - tp->rx_offset,
4011 PCI_DMA_FROMDEVICE);
4012
4013 map->skb = skb;
4014 pci_unmap_addr_set(map, mapping, mapping);
4015
4016 if (src_map != NULL)
4017 src_map->skb = NULL;
4018
4019 desc->addr_hi = ((u64)mapping >> 32);
4020 desc->addr_lo = ((u64)mapping & 0xffffffff);
4021
4022 return skb_size;
4023}
4024
4025/* We only need to move over in the address because the other
4026 * members of the RX descriptor are invariant. See notes above
4027 * tg3_alloc_rx_skb for full details.
4028 */
4029static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
4030 int src_idx, u32 dest_idx_unmasked)
4031{
4032 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4033 struct ring_info *src_map, *dest_map;
4034 int dest_idx;
4035
4036 switch (opaque_key) {
4037 case RXD_OPAQUE_RING_STD:
4038 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4039 dest_desc = &tp->rx_std[dest_idx];
4040 dest_map = &tp->rx_std_buffers[dest_idx];
4041 src_desc = &tp->rx_std[src_idx];
4042 src_map = &tp->rx_std_buffers[src_idx];
4043 break;
4044
4045 case RXD_OPAQUE_RING_JUMBO:
4046 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4047 dest_desc = &tp->rx_jumbo[dest_idx];
4048 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4049 src_desc = &tp->rx_jumbo[src_idx];
4050 src_map = &tp->rx_jumbo_buffers[src_idx];
4051 break;
4052
4053 default:
4054 return;
Stephen Hemminger855e1112008-04-16 16:37:28 -07004055 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004056
4057 dest_map->skb = src_map->skb;
4058 pci_unmap_addr_set(dest_map, mapping,
4059 pci_unmap_addr(src_map, mapping));
4060 dest_desc->addr_hi = src_desc->addr_hi;
4061 dest_desc->addr_lo = src_desc->addr_lo;
4062
4063 src_map->skb = NULL;
4064}
4065
4066#if TG3_VLAN_TAG_USED
4067static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4068{
4069 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4070}
4071#endif
4072
4073/* The RX ring scheme is composed of multiple rings which post fresh
4074 * buffers to the chip, and one special ring the chip uses to report
4075 * status back to the host.
4076 *
4077 * The special ring reports the status of received packets to the
4078 * host. The chip does not write into the original descriptor the
4079 * RX buffer was obtained from. The chip simply takes the original
4080 * descriptor as provided by the host, updates the status and length
4081 * field, then writes this into the next status ring entry.
4082 *
4083 * Each ring the host uses to post buffers to the chip is described
4084 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4085 * it is first placed into the on-chip ram. When the packet's length
4086 * is known, it walks down the TG3_BDINFO entries to select the ring.
4087 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4088 * which is within the range of the new packet's length is chosen.
4089 *
4090 * The "separate ring for rx status" scheme may sound queer, but it makes
4091 * sense from a cache coherency perspective. If only the host writes
4092 * to the buffer post rings, and only the chip writes to the rx status
4093 * rings, then cache lines never move beyond shared-modified state.
4094 * If both the host and chip were to write into the same ring, cache line
4095 * eviction could occur since both entities want it in an exclusive state.
4096 */
4097static int tg3_rx(struct tg3 *tp, int budget)
4098{
Michael Chanf92905d2006-06-29 20:14:29 -07004099 u32 work_mask, rx_std_posted = 0;
Michael Chan483ba502005-04-25 15:14:03 -07004100 u32 sw_idx = tp->rx_rcb_ptr;
4101 u16 hw_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004102 int received;
4103
4104 hw_idx = tp->hw_status->idx[0].rx_producer;
4105 /*
4106 * We need to order the read of hw_idx and the read of
4107 * the opaque cookie.
4108 */
4109 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004110 work_mask = 0;
4111 received = 0;
4112 while (sw_idx != hw_idx && budget > 0) {
4113 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4114 unsigned int len;
4115 struct sk_buff *skb;
4116 dma_addr_t dma_addr;
4117 u32 opaque_key, desc_idx, *post_ptr;
4118
4119 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4120 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4121 if (opaque_key == RXD_OPAQUE_RING_STD) {
4122 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4123 mapping);
4124 skb = tp->rx_std_buffers[desc_idx].skb;
4125 post_ptr = &tp->rx_std_ptr;
Michael Chanf92905d2006-06-29 20:14:29 -07004126 rx_std_posted++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004127 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4128 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4129 mapping);
4130 skb = tp->rx_jumbo_buffers[desc_idx].skb;
4131 post_ptr = &tp->rx_jumbo_ptr;
4132 }
4133 else {
4134 goto next_pkt_nopost;
4135 }
4136
4137 work_mask |= opaque_key;
4138
4139 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4140 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4141 drop_it:
4142 tg3_recycle_rx(tp, opaque_key,
4143 desc_idx, *post_ptr);
4144 drop_it_no_recycle:
4145 /* Other statistics kept track of by card. */
4146 tp->net_stats.rx_dropped++;
4147 goto next_pkt;
4148 }
4149
4150 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
4151
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004152 if (len > RX_COPY_THRESHOLD
Linus Torvalds1da177e2005-04-16 15:20:36 -07004153 && tp->rx_offset == 2
4154 /* rx_offset != 2 iff this is a 5701 card running
4155 * in PCI-X mode [see tg3_get_invariants()] */
4156 ) {
4157 int skb_size;
4158
4159 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4160 desc_idx, *post_ptr);
4161 if (skb_size < 0)
4162 goto drop_it;
4163
4164 pci_unmap_single(tp->pdev, dma_addr,
4165 skb_size - tp->rx_offset,
4166 PCI_DMA_FROMDEVICE);
4167
4168 skb_put(skb, len);
4169 } else {
4170 struct sk_buff *copy_skb;
4171
4172 tg3_recycle_rx(tp, opaque_key,
4173 desc_idx, *post_ptr);
4174
David S. Millera20e9c62006-07-31 22:38:16 -07004175 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004176 if (copy_skb == NULL)
4177 goto drop_it_no_recycle;
4178
Linus Torvalds1da177e2005-04-16 15:20:36 -07004179 skb_reserve(copy_skb, 2);
4180 skb_put(copy_skb, len);
4181 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03004182 skb_copy_from_linear_data(skb, copy_skb->data, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004183 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4184
4185 /* We'll reuse the original ring buffer. */
4186 skb = copy_skb;
4187 }
4188
4189 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4190 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4191 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4192 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4193 skb->ip_summed = CHECKSUM_UNNECESSARY;
4194 else
4195 skb->ip_summed = CHECKSUM_NONE;
4196
4197 skb->protocol = eth_type_trans(skb, tp->dev);
4198#if TG3_VLAN_TAG_USED
4199 if (tp->vlgrp != NULL &&
4200 desc->type_flags & RXD_FLAG_VLAN) {
4201 tg3_vlan_rx(tp, skb,
4202 desc->err_vlan & RXD_VLAN_MASK);
4203 } else
4204#endif
4205 netif_receive_skb(skb);
4206
4207 tp->dev->last_rx = jiffies;
4208 received++;
4209 budget--;
4210
4211next_pkt:
4212 (*post_ptr)++;
Michael Chanf92905d2006-06-29 20:14:29 -07004213
4214 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4215 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4216
4217 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4218 TG3_64BIT_REG_LOW, idx);
4219 work_mask &= ~RXD_OPAQUE_RING_STD;
4220 rx_std_posted = 0;
4221 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004222next_pkt_nopost:
Michael Chan483ba502005-04-25 15:14:03 -07004223 sw_idx++;
Eric Dumazet6b31a512007-02-06 13:29:21 -08004224 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
Michael Chan52f6d692005-04-25 15:14:32 -07004225
4226 /* Refresh hw_idx to see if there is new work */
4227 if (sw_idx == hw_idx) {
4228 hw_idx = tp->hw_status->idx[0].rx_producer;
4229 rmb();
4230 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004231 }
4232
4233 /* ACK the status ring. */
Michael Chan483ba502005-04-25 15:14:03 -07004234 tp->rx_rcb_ptr = sw_idx;
4235 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004236
4237 /* Refill RX ring(s). */
4238 if (work_mask & RXD_OPAQUE_RING_STD) {
4239 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4240 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4241 sw_idx);
4242 }
4243 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4244 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4245 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4246 sw_idx);
4247 }
4248 mmiowb();
4249
4250 return received;
4251}
4252
David S. Miller6f535762007-10-11 18:08:29 -07004253static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004254{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004255 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004256
Linus Torvalds1da177e2005-04-16 15:20:36 -07004257 /* handle link change and other phy events */
4258 if (!(tp->tg3_flags &
4259 (TG3_FLAG_USE_LINKCHG_REG |
4260 TG3_FLAG_POLL_SERDES))) {
4261 if (sblk->status & SD_STATUS_LINK_CHG) {
4262 sblk->status = SD_STATUS_UPDATED |
4263 (sblk->status & ~SD_STATUS_LINK_CHG);
David S. Millerf47c11e2005-06-24 20:18:35 -07004264 spin_lock(&tp->lock);
Matt Carlsondd477002008-05-25 23:45:58 -07004265 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4266 tw32_f(MAC_STATUS,
4267 (MAC_STATUS_SYNC_CHANGED |
4268 MAC_STATUS_CFG_CHANGED |
4269 MAC_STATUS_MI_COMPLETION |
4270 MAC_STATUS_LNKSTATE_CHANGED));
4271 udelay(40);
4272 } else
4273 tg3_setup_phy(tp, 0);
David S. Millerf47c11e2005-06-24 20:18:35 -07004274 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004275 }
4276 }
4277
4278 /* run TX completion thread */
4279 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004280 tg3_tx(tp);
David S. Miller6f535762007-10-11 18:08:29 -07004281 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
Michael Chan4fd7ab52007-10-12 01:39:50 -07004282 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004283 }
4284
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285 /* run RX thread, within the bounds set by NAPI.
4286 * All RX "locking" is done by ensuring outside
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004287 * code synchronizes with tg3->napi.poll()
Linus Torvalds1da177e2005-04-16 15:20:36 -07004288 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004289 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
David S. Miller6f535762007-10-11 18:08:29 -07004290 work_done += tg3_rx(tp, budget - work_done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004291
David S. Miller6f535762007-10-11 18:08:29 -07004292 return work_done;
4293}
David S. Millerf7383c22005-05-18 22:50:53 -07004294
David S. Miller6f535762007-10-11 18:08:29 -07004295static int tg3_poll(struct napi_struct *napi, int budget)
4296{
4297 struct tg3 *tp = container_of(napi, struct tg3, napi);
4298 int work_done = 0;
Michael Chan4fd7ab52007-10-12 01:39:50 -07004299 struct tg3_hw_status *sblk = tp->hw_status;
David S. Miller6f535762007-10-11 18:08:29 -07004300
4301 while (1) {
4302 work_done = tg3_poll_work(tp, work_done, budget);
4303
4304 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4305 goto tx_recovery;
4306
4307 if (unlikely(work_done >= budget))
4308 break;
4309
Michael Chan4fd7ab52007-10-12 01:39:50 -07004310 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4311 /* tp->last_tag is used in tg3_restart_ints() below
4312 * to tell the hw how much work has been processed,
4313 * so we must read it before checking for more work.
4314 */
4315 tp->last_tag = sblk->status_tag;
4316 rmb();
4317 } else
4318 sblk->status &= ~SD_STATUS_UPDATED;
4319
David S. Miller6f535762007-10-11 18:08:29 -07004320 if (likely(!tg3_has_work(tp))) {
David S. Miller6f535762007-10-11 18:08:29 -07004321 netif_rx_complete(tp->dev, napi);
4322 tg3_restart_ints(tp);
4323 break;
4324 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004325 }
4326
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004327 return work_done;
David S. Miller6f535762007-10-11 18:08:29 -07004328
4329tx_recovery:
Michael Chan4fd7ab52007-10-12 01:39:50 -07004330 /* work_done is guaranteed to be less than budget. */
David S. Miller6f535762007-10-11 18:08:29 -07004331 netif_rx_complete(tp->dev, napi);
4332 schedule_work(&tp->reset_task);
Michael Chan4fd7ab52007-10-12 01:39:50 -07004333 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004334}
4335
David S. Millerf47c11e2005-06-24 20:18:35 -07004336static void tg3_irq_quiesce(struct tg3 *tp)
4337{
4338 BUG_ON(tp->irq_sync);
4339
4340 tp->irq_sync = 1;
4341 smp_mb();
4342
4343 synchronize_irq(tp->pdev->irq);
4344}
4345
4346static inline int tg3_irq_sync(struct tg3 *tp)
4347{
4348 return tp->irq_sync;
4349}
4350
4351/* Fully shutdown all tg3 driver activity elsewhere in the system.
4352 * If irq_sync is non-zero, then the IRQ handler must be synchronized
4353 * with as well. Most of the time, this is not necessary except when
4354 * shutting down the device.
4355 */
4356static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4357{
Michael Chan46966542007-07-11 19:47:19 -07004358 spin_lock_bh(&tp->lock);
David S. Millerf47c11e2005-06-24 20:18:35 -07004359 if (irq_sync)
4360 tg3_irq_quiesce(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07004361}
4362
4363static inline void tg3_full_unlock(struct tg3 *tp)
4364{
David S. Millerf47c11e2005-06-24 20:18:35 -07004365 spin_unlock_bh(&tp->lock);
4366}
4367
Michael Chanfcfa0a32006-03-20 22:28:41 -08004368/* One-shot MSI handler - Chip automatically disables interrupt
4369 * after sending MSI so driver doesn't have to do it.
4370 */
David Howells7d12e782006-10-05 14:55:46 +01004371static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
Michael Chanfcfa0a32006-03-20 22:28:41 -08004372{
4373 struct net_device *dev = dev_id;
4374 struct tg3 *tp = netdev_priv(dev);
4375
4376 prefetch(tp->hw_status);
4377 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4378
4379 if (likely(!tg3_irq_sync(tp)))
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004380 netif_rx_schedule(dev, &tp->napi);
Michael Chanfcfa0a32006-03-20 22:28:41 -08004381
4382 return IRQ_HANDLED;
4383}
4384
Michael Chan88b06bc2005-04-21 17:13:25 -07004385/* MSI ISR - No need to check for interrupt sharing and no need to
4386 * flush status block and interrupt mailbox. PCI ordering rules
4387 * guarantee that MSI will arrive after the status block.
4388 */
David Howells7d12e782006-10-05 14:55:46 +01004389static irqreturn_t tg3_msi(int irq, void *dev_id)
Michael Chan88b06bc2005-04-21 17:13:25 -07004390{
4391 struct net_device *dev = dev_id;
4392 struct tg3 *tp = netdev_priv(dev);
Michael Chan88b06bc2005-04-21 17:13:25 -07004393
Michael Chan61487482005-09-05 17:53:19 -07004394 prefetch(tp->hw_status);
4395 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Michael Chan88b06bc2005-04-21 17:13:25 -07004396 /*
David S. Millerfac9b832005-05-18 22:46:34 -07004397 * Writing any value to intr-mbox-0 clears PCI INTA# and
Michael Chan88b06bc2005-04-21 17:13:25 -07004398 * chip-internal interrupt pending events.
David S. Millerfac9b832005-05-18 22:46:34 -07004399 * Writing non-zero to intr-mbox-0 additional tells the
Michael Chan88b06bc2005-04-21 17:13:25 -07004400 * NIC to stop sending us irqs, engaging "in-intr-handler"
4401 * event coalescing.
4402 */
4403 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chan61487482005-09-05 17:53:19 -07004404 if (likely(!tg3_irq_sync(tp)))
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004405 netif_rx_schedule(dev, &tp->napi);
Michael Chan61487482005-09-05 17:53:19 -07004406
Michael Chan88b06bc2005-04-21 17:13:25 -07004407 return IRQ_RETVAL(1);
4408}
4409
David Howells7d12e782006-10-05 14:55:46 +01004410static irqreturn_t tg3_interrupt(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004411{
4412 struct net_device *dev = dev_id;
4413 struct tg3 *tp = netdev_priv(dev);
4414 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004415 unsigned int handled = 1;
4416
Linus Torvalds1da177e2005-04-16 15:20:36 -07004417 /* In INTx mode, it is possible for the interrupt to arrive at
4418 * the CPU before the status block posted prior to the interrupt.
4419 * Reading the PCI State register will confirm whether the
4420 * interrupt is ours and will flush the status block.
4421 */
Michael Chand18edcb2007-03-24 20:57:11 -07004422 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4423 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4424 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4425 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07004426 goto out;
David S. Millerfac9b832005-05-18 22:46:34 -07004427 }
Michael Chand18edcb2007-03-24 20:57:11 -07004428 }
4429
4430 /*
4431 * Writing any value to intr-mbox-0 clears PCI INTA# and
4432 * chip-internal interrupt pending events.
4433 * Writing non-zero to intr-mbox-0 additional tells the
4434 * NIC to stop sending us irqs, engaging "in-intr-handler"
4435 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07004436 *
4437 * Flush the mailbox to de-assert the IRQ immediately to prevent
4438 * spurious interrupts. The flush impacts performance but
4439 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07004440 */
Michael Chanc04cb342007-05-07 00:26:15 -07004441 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chand18edcb2007-03-24 20:57:11 -07004442 if (tg3_irq_sync(tp))
4443 goto out;
4444 sblk->status &= ~SD_STATUS_UPDATED;
4445 if (likely(tg3_has_work(tp))) {
4446 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004447 netif_rx_schedule(dev, &tp->napi);
Michael Chand18edcb2007-03-24 20:57:11 -07004448 } else {
4449 /* No work, shared interrupt perhaps? re-enable
4450 * interrupts, and flush that PCI write
4451 */
4452 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4453 0x00000000);
David S. Millerfac9b832005-05-18 22:46:34 -07004454 }
David S. Millerf47c11e2005-06-24 20:18:35 -07004455out:
David S. Millerfac9b832005-05-18 22:46:34 -07004456 return IRQ_RETVAL(handled);
4457}
4458
David Howells7d12e782006-10-05 14:55:46 +01004459static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
David S. Millerfac9b832005-05-18 22:46:34 -07004460{
4461 struct net_device *dev = dev_id;
4462 struct tg3 *tp = netdev_priv(dev);
4463 struct tg3_hw_status *sblk = tp->hw_status;
David S. Millerfac9b832005-05-18 22:46:34 -07004464 unsigned int handled = 1;
4465
David S. Millerfac9b832005-05-18 22:46:34 -07004466 /* In INTx mode, it is possible for the interrupt to arrive at
4467 * the CPU before the status block posted prior to the interrupt.
4468 * Reading the PCI State register will confirm whether the
4469 * interrupt is ours and will flush the status block.
4470 */
Michael Chand18edcb2007-03-24 20:57:11 -07004471 if (unlikely(sblk->status_tag == tp->last_tag)) {
4472 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4473 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4474 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07004475 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004476 }
Michael Chand18edcb2007-03-24 20:57:11 -07004477 }
4478
4479 /*
4480 * writing any value to intr-mbox-0 clears PCI INTA# and
4481 * chip-internal interrupt pending events.
4482 * writing non-zero to intr-mbox-0 additional tells the
4483 * NIC to stop sending us irqs, engaging "in-intr-handler"
4484 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07004485 *
4486 * Flush the mailbox to de-assert the IRQ immediately to prevent
4487 * spurious interrupts. The flush impacts performance but
4488 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07004489 */
Michael Chanc04cb342007-05-07 00:26:15 -07004490 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chand18edcb2007-03-24 20:57:11 -07004491 if (tg3_irq_sync(tp))
4492 goto out;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004493 if (netif_rx_schedule_prep(dev, &tp->napi)) {
Michael Chand18edcb2007-03-24 20:57:11 -07004494 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4495 /* Update last_tag to mark that this status has been
4496 * seen. Because interrupt may be shared, we may be
4497 * racing with tg3_poll(), so only update last_tag
4498 * if tg3_poll() is not scheduled.
4499 */
4500 tp->last_tag = sblk->status_tag;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004501 __netif_rx_schedule(dev, &tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004502 }
David S. Millerf47c11e2005-06-24 20:18:35 -07004503out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004504 return IRQ_RETVAL(handled);
4505}
4506
Michael Chan79381092005-04-21 17:13:59 -07004507/* ISR for interrupt test */
David Howells7d12e782006-10-05 14:55:46 +01004508static irqreturn_t tg3_test_isr(int irq, void *dev_id)
Michael Chan79381092005-04-21 17:13:59 -07004509{
4510 struct net_device *dev = dev_id;
4511 struct tg3 *tp = netdev_priv(dev);
4512 struct tg3_hw_status *sblk = tp->hw_status;
4513
Michael Chanf9804dd2005-09-27 12:13:10 -07004514 if ((sblk->status & SD_STATUS_UPDATED) ||
4515 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
Michael Chanb16250e2006-09-27 16:10:14 -07004516 tg3_disable_ints(tp);
Michael Chan79381092005-04-21 17:13:59 -07004517 return IRQ_RETVAL(1);
4518 }
4519 return IRQ_RETVAL(0);
4520}
4521
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07004522static int tg3_init_hw(struct tg3 *, int);
Michael Chan944d9802005-05-29 14:57:48 -07004523static int tg3_halt(struct tg3 *, int, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004524
Michael Chanb9ec6c12006-07-25 16:37:27 -07004525/* Restart hardware after configuration changes, self-test, etc.
4526 * Invoked with tp->lock held.
4527 */
4528static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
Eric Dumazet78c61462008-04-24 23:33:06 -07004529 __releases(tp->lock)
4530 __acquires(tp->lock)
Michael Chanb9ec6c12006-07-25 16:37:27 -07004531{
4532 int err;
4533
4534 err = tg3_init_hw(tp, reset_phy);
4535 if (err) {
4536 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4537 "aborting.\n", tp->dev->name);
4538 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4539 tg3_full_unlock(tp);
4540 del_timer_sync(&tp->timer);
4541 tp->irq_sync = 0;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004542 napi_enable(&tp->napi);
Michael Chanb9ec6c12006-07-25 16:37:27 -07004543 dev_close(tp->dev);
4544 tg3_full_lock(tp, 0);
4545 }
4546 return err;
4547}
4548
Linus Torvalds1da177e2005-04-16 15:20:36 -07004549#ifdef CONFIG_NET_POLL_CONTROLLER
4550static void tg3_poll_controller(struct net_device *dev)
4551{
Michael Chan88b06bc2005-04-21 17:13:25 -07004552 struct tg3 *tp = netdev_priv(dev);
4553
David Howells7d12e782006-10-05 14:55:46 +01004554 tg3_interrupt(tp->pdev->irq, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004555}
4556#endif
4557
David Howellsc4028952006-11-22 14:57:56 +00004558static void tg3_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004559{
David Howellsc4028952006-11-22 14:57:56 +00004560 struct tg3 *tp = container_of(work, struct tg3, reset_task);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004561 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004562 unsigned int restart_timer;
4563
Michael Chan7faa0062006-02-02 17:29:28 -08004564 tg3_full_lock(tp, 0);
Michael Chan7faa0062006-02-02 17:29:28 -08004565
4566 if (!netif_running(tp->dev)) {
Michael Chan7faa0062006-02-02 17:29:28 -08004567 tg3_full_unlock(tp);
4568 return;
4569 }
4570
4571 tg3_full_unlock(tp);
4572
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004573 tg3_phy_stop(tp);
4574
Linus Torvalds1da177e2005-04-16 15:20:36 -07004575 tg3_netif_stop(tp);
4576
David S. Millerf47c11e2005-06-24 20:18:35 -07004577 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004578
4579 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4580 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4581
Michael Chandf3e6542006-05-26 17:48:07 -07004582 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4583 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4584 tp->write32_rx_mbox = tg3_write_flush_reg32;
4585 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4586 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4587 }
4588
Michael Chan944d9802005-05-29 14:57:48 -07004589 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004590 err = tg3_init_hw(tp, 1);
4591 if (err)
Michael Chanb9ec6c12006-07-25 16:37:27 -07004592 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004593
4594 tg3_netif_start(tp);
4595
Linus Torvalds1da177e2005-04-16 15:20:36 -07004596 if (restart_timer)
4597 mod_timer(&tp->timer, jiffies + 1);
Michael Chan7faa0062006-02-02 17:29:28 -08004598
Michael Chanb9ec6c12006-07-25 16:37:27 -07004599out:
Michael Chan7faa0062006-02-02 17:29:28 -08004600 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004601
4602 if (!err)
4603 tg3_phy_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004604}
4605
Michael Chanb0408752007-02-13 12:18:30 -08004606static void tg3_dump_short_state(struct tg3 *tp)
4607{
4608 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4609 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4610 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4611 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4612}
4613
Linus Torvalds1da177e2005-04-16 15:20:36 -07004614static void tg3_tx_timeout(struct net_device *dev)
4615{
4616 struct tg3 *tp = netdev_priv(dev);
4617
Michael Chanb0408752007-02-13 12:18:30 -08004618 if (netif_msg_tx_err(tp)) {
Michael Chan9f88f292006-12-07 00:22:54 -08004619 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4620 dev->name);
Michael Chanb0408752007-02-13 12:18:30 -08004621 tg3_dump_short_state(tp);
4622 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004623
4624 schedule_work(&tp->reset_task);
4625}
4626
Michael Chanc58ec932005-09-17 00:46:27 -07004627/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4628static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4629{
4630 u32 base = (u32) mapping & 0xffffffff;
4631
4632 return ((base > 0xffffdcc0) &&
4633 (base + len + 8 < base));
4634}
4635
Michael Chan72f2afb2006-03-06 19:28:35 -08004636/* Test for DMA addresses > 40-bit */
4637static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4638 int len)
4639{
4640#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
Michael Chan6728a8e2006-03-27 23:16:49 -08004641 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
Michael Chan72f2afb2006-03-06 19:28:35 -08004642 return (((u64) mapping + len) > DMA_40BIT_MASK);
4643 return 0;
4644#else
4645 return 0;
4646#endif
4647}
4648
Linus Torvalds1da177e2005-04-16 15:20:36 -07004649static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4650
Michael Chan72f2afb2006-03-06 19:28:35 -08004651/* Workaround 4GB and 40-bit hardware DMA bugs. */
4652static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
Michael Chanc58ec932005-09-17 00:46:27 -07004653 u32 last_plus_one, u32 *start,
4654 u32 base_flags, u32 mss)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004655{
Matt Carlson41588ba2008-04-19 18:12:33 -07004656 struct sk_buff *new_skb;
Michael Chanc58ec932005-09-17 00:46:27 -07004657 dma_addr_t new_addr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004658 u32 entry = *start;
Michael Chanc58ec932005-09-17 00:46:27 -07004659 int i, ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004660
Matt Carlson41588ba2008-04-19 18:12:33 -07004661 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4662 new_skb = skb_copy(skb, GFP_ATOMIC);
4663 else {
4664 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4665
4666 new_skb = skb_copy_expand(skb,
4667 skb_headroom(skb) + more_headroom,
4668 skb_tailroom(skb), GFP_ATOMIC);
4669 }
4670
Linus Torvalds1da177e2005-04-16 15:20:36 -07004671 if (!new_skb) {
Michael Chanc58ec932005-09-17 00:46:27 -07004672 ret = -1;
4673 } else {
4674 /* New SKB is guaranteed to be linear. */
4675 entry = *start;
David S. Miller90079ce2008-09-11 04:52:51 -07004676 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4677 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4678
Michael Chanc58ec932005-09-17 00:46:27 -07004679 /* Make sure new skb does not cross any 4G boundaries.
4680 * Drop the packet if it does.
4681 */
David S. Miller90079ce2008-09-11 04:52:51 -07004682 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
David S. Miller638266f2008-09-11 15:45:19 -07004683 if (!ret)
4684 skb_dma_unmap(&tp->pdev->dev, new_skb,
4685 DMA_TO_DEVICE);
Michael Chanc58ec932005-09-17 00:46:27 -07004686 ret = -1;
4687 dev_kfree_skb(new_skb);
4688 new_skb = NULL;
4689 } else {
4690 tg3_set_txd(tp, entry, new_addr, new_skb->len,
4691 base_flags, 1 | (mss << 1));
4692 *start = NEXT_TX(entry);
4693 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004694 }
4695
Linus Torvalds1da177e2005-04-16 15:20:36 -07004696 /* Now clean up the sw ring entries. */
4697 i = 0;
4698 while (entry != last_plus_one) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004699 if (i == 0) {
4700 tp->tx_buffers[entry].skb = new_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004701 } else {
4702 tp->tx_buffers[entry].skb = NULL;
4703 }
4704 entry = NEXT_TX(entry);
4705 i++;
4706 }
4707
David S. Miller90079ce2008-09-11 04:52:51 -07004708 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004709 dev_kfree_skb(skb);
4710
Michael Chanc58ec932005-09-17 00:46:27 -07004711 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004712}
4713
4714static void tg3_set_txd(struct tg3 *tp, int entry,
4715 dma_addr_t mapping, int len, u32 flags,
4716 u32 mss_and_is_end)
4717{
4718 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4719 int is_end = (mss_and_is_end & 0x1);
4720 u32 mss = (mss_and_is_end >> 1);
4721 u32 vlan_tag = 0;
4722
4723 if (is_end)
4724 flags |= TXD_FLAG_END;
4725 if (flags & TXD_FLAG_VLAN) {
4726 vlan_tag = flags >> 16;
4727 flags &= 0xffff;
4728 }
4729 vlan_tag |= (mss << TXD_MSS_SHIFT);
4730
4731 txd->addr_hi = ((u64) mapping >> 32);
4732 txd->addr_lo = ((u64) mapping & 0xffffffff);
4733 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4734 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4735}
4736
Michael Chan5a6f3072006-03-20 22:28:05 -08004737/* hard_start_xmit for devices that don't have any bugs and
4738 * support TG3_FLG2_HW_TSO_2 only.
4739 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004740static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4741{
4742 struct tg3 *tp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743 u32 len, entry, base_flags, mss;
David S. Miller90079ce2008-09-11 04:52:51 -07004744 struct skb_shared_info *sp;
4745 dma_addr_t mapping;
Michael Chan5a6f3072006-03-20 22:28:05 -08004746
4747 len = skb_headlen(skb);
4748
Michael Chan00b70502006-06-17 21:58:45 -07004749 /* We are running in BH disabled context with netif_tx_lock
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004750 * and TX reclaim runs via tp->napi.poll inside of a software
Michael Chan5a6f3072006-03-20 22:28:05 -08004751 * interrupt. Furthermore, IRQ processing runs lockless so we have
4752 * no IRQ context deadlocks to worry about either. Rejoice!
4753 */
Michael Chan1b2a7202006-08-07 21:46:02 -07004754 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004755 if (!netif_queue_stopped(dev)) {
4756 netif_stop_queue(dev);
4757
4758 /* This is a hard error, log it. */
4759 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4760 "queue awake!\n", dev->name);
4761 }
Michael Chan5a6f3072006-03-20 22:28:05 -08004762 return NETDEV_TX_BUSY;
4763 }
4764
4765 entry = tp->tx_prod;
4766 base_flags = 0;
Michael Chan5a6f3072006-03-20 22:28:05 -08004767 mss = 0;
Matt Carlsonc13e3712007-05-05 11:50:04 -07004768 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004769 int tcp_opt_len, ip_tcp_len;
4770
4771 if (skb_header_cloned(skb) &&
4772 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4773 dev_kfree_skb(skb);
4774 goto out_unlock;
4775 }
4776
Michael Chanb0026622006-07-03 19:42:14 -07004777 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4778 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4779 else {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004780 struct iphdr *iph = ip_hdr(skb);
4781
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004782 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -03004783 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
Michael Chanb0026622006-07-03 19:42:14 -07004784
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004785 iph->check = 0;
4786 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
Michael Chanb0026622006-07-03 19:42:14 -07004787 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4788 }
Michael Chan5a6f3072006-03-20 22:28:05 -08004789
4790 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4791 TXD_FLAG_CPU_POST_DMA);
4792
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004793 tcp_hdr(skb)->check = 0;
Michael Chan5a6f3072006-03-20 22:28:05 -08004794
Michael Chan5a6f3072006-03-20 22:28:05 -08004795 }
Patrick McHardy84fa7932006-08-29 16:44:56 -07004796 else if (skb->ip_summed == CHECKSUM_PARTIAL)
Michael Chan5a6f3072006-03-20 22:28:05 -08004797 base_flags |= TXD_FLAG_TCPUDP_CSUM;
Michael Chan5a6f3072006-03-20 22:28:05 -08004798#if TG3_VLAN_TAG_USED
4799 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4800 base_flags |= (TXD_FLAG_VLAN |
4801 (vlan_tx_tag_get(skb) << 16));
4802#endif
4803
David S. Miller90079ce2008-09-11 04:52:51 -07004804 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4805 dev_kfree_skb(skb);
4806 goto out_unlock;
4807 }
4808
4809 sp = skb_shinfo(skb);
4810
4811 mapping = sp->dma_maps[0];
Michael Chan5a6f3072006-03-20 22:28:05 -08004812
4813 tp->tx_buffers[entry].skb = skb;
Michael Chan5a6f3072006-03-20 22:28:05 -08004814
4815 tg3_set_txd(tp, entry, mapping, len, base_flags,
4816 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4817
4818 entry = NEXT_TX(entry);
4819
4820 /* Now loop through additional data fragments, and queue them. */
4821 if (skb_shinfo(skb)->nr_frags > 0) {
4822 unsigned int i, last;
4823
4824 last = skb_shinfo(skb)->nr_frags - 1;
4825 for (i = 0; i <= last; i++) {
4826 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4827
4828 len = frag->size;
David S. Miller90079ce2008-09-11 04:52:51 -07004829 mapping = sp->dma_maps[i + 1];
Michael Chan5a6f3072006-03-20 22:28:05 -08004830 tp->tx_buffers[entry].skb = NULL;
Michael Chan5a6f3072006-03-20 22:28:05 -08004831
4832 tg3_set_txd(tp, entry, mapping, len,
4833 base_flags, (i == last) | (mss << 1));
4834
4835 entry = NEXT_TX(entry);
4836 }
4837 }
4838
4839 /* Packets are ready, update Tx producer idx local and on card. */
4840 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4841
4842 tp->tx_prod = entry;
Michael Chan1b2a7202006-08-07 21:46:02 -07004843 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004844 netif_stop_queue(dev);
Ranjit Manomohan42952232006-10-18 20:54:26 -07004845 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
Michael Chan5a6f3072006-03-20 22:28:05 -08004846 netif_wake_queue(tp->dev);
4847 }
4848
4849out_unlock:
4850 mmiowb();
Michael Chan5a6f3072006-03-20 22:28:05 -08004851
4852 dev->trans_start = jiffies;
4853
4854 return NETDEV_TX_OK;
4855}
4856
Michael Chan52c0fd82006-06-29 20:15:54 -07004857static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4858
4859/* Use GSO to workaround a rare TSO bug that may be triggered when the
4860 * TSO header is greater than 80 bytes.
4861 */
4862static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4863{
4864 struct sk_buff *segs, *nskb;
4865
4866 /* Estimate the number of fragments in the worst case */
Michael Chan1b2a7202006-08-07 21:46:02 -07004867 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
Michael Chan52c0fd82006-06-29 20:15:54 -07004868 netif_stop_queue(tp->dev);
Michael Chan7f62ad52007-02-20 23:25:40 -08004869 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4870 return NETDEV_TX_BUSY;
4871
4872 netif_wake_queue(tp->dev);
Michael Chan52c0fd82006-06-29 20:15:54 -07004873 }
4874
4875 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07004876 if (IS_ERR(segs))
Michael Chan52c0fd82006-06-29 20:15:54 -07004877 goto tg3_tso_bug_end;
4878
4879 do {
4880 nskb = segs;
4881 segs = segs->next;
4882 nskb->next = NULL;
4883 tg3_start_xmit_dma_bug(nskb, tp->dev);
4884 } while (segs);
4885
4886tg3_tso_bug_end:
4887 dev_kfree_skb(skb);
4888
4889 return NETDEV_TX_OK;
4890}
Michael Chan52c0fd82006-06-29 20:15:54 -07004891
Michael Chan5a6f3072006-03-20 22:28:05 -08004892/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4893 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4894 */
4895static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4896{
4897 struct tg3 *tp = netdev_priv(dev);
Michael Chan5a6f3072006-03-20 22:28:05 -08004898 u32 len, entry, base_flags, mss;
David S. Miller90079ce2008-09-11 04:52:51 -07004899 struct skb_shared_info *sp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004900 int would_hit_hwbug;
David S. Miller90079ce2008-09-11 04:52:51 -07004901 dma_addr_t mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004902
4903 len = skb_headlen(skb);
4904
Michael Chan00b70502006-06-17 21:58:45 -07004905 /* We are running in BH disabled context with netif_tx_lock
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004906 * and TX reclaim runs via tp->napi.poll inside of a software
David S. Millerf47c11e2005-06-24 20:18:35 -07004907 * interrupt. Furthermore, IRQ processing runs lockless so we have
4908 * no IRQ context deadlocks to worry about either. Rejoice!
Linus Torvalds1da177e2005-04-16 15:20:36 -07004909 */
Michael Chan1b2a7202006-08-07 21:46:02 -07004910 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
Stephen Hemminger1f064a82005-12-06 17:36:44 -08004911 if (!netif_queue_stopped(dev)) {
4912 netif_stop_queue(dev);
4913
4914 /* This is a hard error, log it. */
4915 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4916 "queue awake!\n", dev->name);
4917 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004918 return NETDEV_TX_BUSY;
4919 }
4920
4921 entry = tp->tx_prod;
4922 base_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07004923 if (skb->ip_summed == CHECKSUM_PARTIAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004924 base_flags |= TXD_FLAG_TCPUDP_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004925 mss = 0;
Matt Carlsonc13e3712007-05-05 11:50:04 -07004926 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004927 struct iphdr *iph;
Michael Chan52c0fd82006-06-29 20:15:54 -07004928 int tcp_opt_len, ip_tcp_len, hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004929
4930 if (skb_header_cloned(skb) &&
4931 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4932 dev_kfree_skb(skb);
4933 goto out_unlock;
4934 }
4935
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004936 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -03004937 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004938
Michael Chan52c0fd82006-06-29 20:15:54 -07004939 hdr_len = ip_tcp_len + tcp_opt_len;
4940 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
Michael Chan7f62ad52007-02-20 23:25:40 -08004941 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
Michael Chan52c0fd82006-06-29 20:15:54 -07004942 return (tg3_tso_bug(tp, skb));
4943
Linus Torvalds1da177e2005-04-16 15:20:36 -07004944 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4945 TXD_FLAG_CPU_POST_DMA);
4946
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004947 iph = ip_hdr(skb);
4948 iph->check = 0;
4949 iph->tot_len = htons(mss + hdr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004950 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004951 tcp_hdr(skb)->check = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004952 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004953 } else
4954 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4955 iph->daddr, 0,
4956 IPPROTO_TCP,
4957 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004958
4959 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4960 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004961 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004962 int tsflags;
4963
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004964 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004965 mss |= (tsflags << 11);
4966 }
4967 } else {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004968 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004969 int tsflags;
4970
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004971 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004972 base_flags |= tsflags << 12;
4973 }
4974 }
4975 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004976#if TG3_VLAN_TAG_USED
4977 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4978 base_flags |= (TXD_FLAG_VLAN |
4979 (vlan_tx_tag_get(skb) << 16));
4980#endif
4981
David S. Miller90079ce2008-09-11 04:52:51 -07004982 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4983 dev_kfree_skb(skb);
4984 goto out_unlock;
4985 }
4986
4987 sp = skb_shinfo(skb);
4988
4989 mapping = sp->dma_maps[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004990
4991 tp->tx_buffers[entry].skb = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004992
4993 would_hit_hwbug = 0;
4994
Matt Carlson41588ba2008-04-19 18:12:33 -07004995 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4996 would_hit_hwbug = 1;
4997 else if (tg3_4g_overflow_test(mapping, len))
Michael Chanc58ec932005-09-17 00:46:27 -07004998 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004999
5000 tg3_set_txd(tp, entry, mapping, len, base_flags,
5001 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5002
5003 entry = NEXT_TX(entry);
5004
5005 /* Now loop through additional data fragments, and queue them. */
5006 if (skb_shinfo(skb)->nr_frags > 0) {
5007 unsigned int i, last;
5008
5009 last = skb_shinfo(skb)->nr_frags - 1;
5010 for (i = 0; i <= last; i++) {
5011 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5012
5013 len = frag->size;
David S. Miller90079ce2008-09-11 04:52:51 -07005014 mapping = sp->dma_maps[i + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07005015
5016 tp->tx_buffers[entry].skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005017
Michael Chanc58ec932005-09-17 00:46:27 -07005018 if (tg3_4g_overflow_test(mapping, len))
5019 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005020
Michael Chan72f2afb2006-03-06 19:28:35 -08005021 if (tg3_40bit_overflow_test(tp, mapping, len))
5022 would_hit_hwbug = 1;
5023
Linus Torvalds1da177e2005-04-16 15:20:36 -07005024 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5025 tg3_set_txd(tp, entry, mapping, len,
5026 base_flags, (i == last)|(mss << 1));
5027 else
5028 tg3_set_txd(tp, entry, mapping, len,
5029 base_flags, (i == last));
5030
5031 entry = NEXT_TX(entry);
5032 }
5033 }
5034
5035 if (would_hit_hwbug) {
5036 u32 last_plus_one = entry;
5037 u32 start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005038
Michael Chanc58ec932005-09-17 00:46:27 -07005039 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5040 start &= (TG3_TX_RING_SIZE - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005041
5042 /* If the workaround fails due to memory/mapping
5043 * failure, silently drop this packet.
5044 */
Michael Chan72f2afb2006-03-06 19:28:35 -08005045 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
Michael Chanc58ec932005-09-17 00:46:27 -07005046 &start, base_flags, mss))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005047 goto out_unlock;
5048
5049 entry = start;
5050 }
5051
5052 /* Packets are ready, update Tx producer idx local and on card. */
5053 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5054
5055 tp->tx_prod = entry;
Michael Chan1b2a7202006-08-07 21:46:02 -07005056 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005057 netif_stop_queue(dev);
Ranjit Manomohan42952232006-10-18 20:54:26 -07005058 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
Michael Chan51b91462005-09-01 17:41:28 -07005059 netif_wake_queue(tp->dev);
5060 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005061
5062out_unlock:
5063 mmiowb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005064
5065 dev->trans_start = jiffies;
5066
5067 return NETDEV_TX_OK;
5068}
5069
5070static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5071 int new_mtu)
5072{
5073 dev->mtu = new_mtu;
5074
Michael Chanef7f5ec2005-07-25 12:32:25 -07005075 if (new_mtu > ETH_DATA_LEN) {
Michael Chana4e2b342005-10-26 15:46:52 -07005076 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chanef7f5ec2005-07-25 12:32:25 -07005077 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5078 ethtool_op_set_tso(dev, 0);
5079 }
5080 else
5081 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5082 } else {
Michael Chana4e2b342005-10-26 15:46:52 -07005083 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chanef7f5ec2005-07-25 12:32:25 -07005084 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
Michael Chan0f893dc2005-07-25 12:30:38 -07005085 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
Michael Chanef7f5ec2005-07-25 12:32:25 -07005086 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005087}
5088
5089static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5090{
5091 struct tg3 *tp = netdev_priv(dev);
Michael Chanb9ec6c12006-07-25 16:37:27 -07005092 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005093
5094 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5095 return -EINVAL;
5096
5097 if (!netif_running(dev)) {
5098 /* We'll just catch it later when the
5099 * device is up'd.
5100 */
5101 tg3_set_mtu(dev, tp, new_mtu);
5102 return 0;
5103 }
5104
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07005105 tg3_phy_stop(tp);
5106
Linus Torvalds1da177e2005-04-16 15:20:36 -07005107 tg3_netif_stop(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07005108
5109 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005110
Michael Chan944d9802005-05-29 14:57:48 -07005111 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005112
5113 tg3_set_mtu(dev, tp, new_mtu);
5114
Michael Chanb9ec6c12006-07-25 16:37:27 -07005115 err = tg3_restart_hw(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005116
Michael Chanb9ec6c12006-07-25 16:37:27 -07005117 if (!err)
5118 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005119
David S. Millerf47c11e2005-06-24 20:18:35 -07005120 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005121
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07005122 if (!err)
5123 tg3_phy_start(tp);
5124
Michael Chanb9ec6c12006-07-25 16:37:27 -07005125 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005126}
5127
5128/* Free up pending packets in all rx/tx rings.
5129 *
5130 * The chip has been shut down and the driver detached from
5131 * the networking, so no interrupts or new tx packets will
5132 * end up in the driver. tp->{tx,}lock is not held and we are not
5133 * in an interrupt context and thus may sleep.
5134 */
5135static void tg3_free_rings(struct tg3 *tp)
5136{
5137 struct ring_info *rxp;
5138 int i;
5139
5140 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5141 rxp = &tp->rx_std_buffers[i];
5142
5143 if (rxp->skb == NULL)
5144 continue;
5145 pci_unmap_single(tp->pdev,
5146 pci_unmap_addr(rxp, mapping),
Michael Chan7e72aad2005-07-25 12:31:17 -07005147 tp->rx_pkt_buf_sz - tp->rx_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005148 PCI_DMA_FROMDEVICE);
5149 dev_kfree_skb_any(rxp->skb);
5150 rxp->skb = NULL;
5151 }
5152
5153 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5154 rxp = &tp->rx_jumbo_buffers[i];
5155
5156 if (rxp->skb == NULL)
5157 continue;
5158 pci_unmap_single(tp->pdev,
5159 pci_unmap_addr(rxp, mapping),
5160 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5161 PCI_DMA_FROMDEVICE);
5162 dev_kfree_skb_any(rxp->skb);
5163 rxp->skb = NULL;
5164 }
5165
5166 for (i = 0; i < TG3_TX_RING_SIZE; ) {
5167 struct tx_ring_info *txp;
5168 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005169
5170 txp = &tp->tx_buffers[i];
5171 skb = txp->skb;
5172
5173 if (skb == NULL) {
5174 i++;
5175 continue;
5176 }
5177
David S. Miller90079ce2008-09-11 04:52:51 -07005178 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5179
Linus Torvalds1da177e2005-04-16 15:20:36 -07005180 txp->skb = NULL;
5181
David S. Miller90079ce2008-09-11 04:52:51 -07005182 i += skb_shinfo(skb)->nr_frags + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005183
5184 dev_kfree_skb_any(skb);
5185 }
5186}
5187
5188/* Initialize tx/rx rings for packet processing.
5189 *
5190 * The chip has been shut down and the driver detached from
5191 * the networking, so no interrupts or new tx packets will
5192 * end up in the driver. tp->{tx,}lock are held and thus
5193 * we may not sleep.
5194 */
Michael Chan32d8c572006-07-25 16:38:29 -07005195static int tg3_init_rings(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005196{
5197 u32 i;
5198
5199 /* Free up all the SKBs. */
5200 tg3_free_rings(tp);
5201
5202 /* Zero out all descriptors. */
5203 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5204 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5205 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5206 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5207
Michael Chan7e72aad2005-07-25 12:31:17 -07005208 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
Michael Chana4e2b342005-10-26 15:46:52 -07005209 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
Michael Chan7e72aad2005-07-25 12:31:17 -07005210 (tp->dev->mtu > ETH_DATA_LEN))
5211 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5212
Linus Torvalds1da177e2005-04-16 15:20:36 -07005213 /* Initialize invariants of the rings, we only set this
5214 * stuff once. This works because the card does not
5215 * write into the rx buffer posting rings.
5216 */
5217 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5218 struct tg3_rx_buffer_desc *rxd;
5219
5220 rxd = &tp->rx_std[i];
Michael Chan7e72aad2005-07-25 12:31:17 -07005221 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005222 << RXD_LEN_SHIFT;
5223 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5224 rxd->opaque = (RXD_OPAQUE_RING_STD |
5225 (i << RXD_OPAQUE_INDEX_SHIFT));
5226 }
5227
Michael Chan0f893dc2005-07-25 12:30:38 -07005228 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005229 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5230 struct tg3_rx_buffer_desc *rxd;
5231
5232 rxd = &tp->rx_jumbo[i];
5233 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5234 << RXD_LEN_SHIFT;
5235 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5236 RXD_FLAG_JUMBO;
5237 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5238 (i << RXD_OPAQUE_INDEX_SHIFT));
5239 }
5240 }
5241
5242 /* Now allocate fresh SKBs for each rx ring. */
5243 for (i = 0; i < tp->rx_pending; i++) {
Michael Chan32d8c572006-07-25 16:38:29 -07005244 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5245 printk(KERN_WARNING PFX
5246 "%s: Using a smaller RX standard ring, "
5247 "only %d out of %d buffers were allocated "
5248 "successfully.\n",
5249 tp->dev->name, i, tp->rx_pending);
5250 if (i == 0)
5251 return -ENOMEM;
5252 tp->rx_pending = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005253 break;
Michael Chan32d8c572006-07-25 16:38:29 -07005254 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005255 }
5256
Michael Chan0f893dc2005-07-25 12:30:38 -07005257 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005258 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5259 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
Michael Chan32d8c572006-07-25 16:38:29 -07005260 -1, i) < 0) {
5261 printk(KERN_WARNING PFX
5262 "%s: Using a smaller RX jumbo ring, "
5263 "only %d out of %d buffers were "
5264 "allocated successfully.\n",
5265 tp->dev->name, i, tp->rx_jumbo_pending);
5266 if (i == 0) {
5267 tg3_free_rings(tp);
5268 return -ENOMEM;
5269 }
5270 tp->rx_jumbo_pending = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005271 break;
Michael Chan32d8c572006-07-25 16:38:29 -07005272 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005273 }
5274 }
Michael Chan32d8c572006-07-25 16:38:29 -07005275 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005276}
5277
5278/*
5279 * Must not be invoked with interrupt sources disabled and
5280 * the hardware shutdown down.
5281 */
5282static void tg3_free_consistent(struct tg3 *tp)
5283{
Jesper Juhlb4558ea2005-10-28 16:53:13 -04005284 kfree(tp->rx_std_buffers);
5285 tp->rx_std_buffers = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005286 if (tp->rx_std) {
5287 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5288 tp->rx_std, tp->rx_std_mapping);
5289 tp->rx_std = NULL;
5290 }
5291 if (tp->rx_jumbo) {
5292 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5293 tp->rx_jumbo, tp->rx_jumbo_mapping);
5294 tp->rx_jumbo = NULL;
5295 }
5296 if (tp->rx_rcb) {
5297 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5298 tp->rx_rcb, tp->rx_rcb_mapping);
5299 tp->rx_rcb = NULL;
5300 }
5301 if (tp->tx_ring) {
5302 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5303 tp->tx_ring, tp->tx_desc_mapping);
5304 tp->tx_ring = NULL;
5305 }
5306 if (tp->hw_status) {
5307 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5308 tp->hw_status, tp->status_mapping);
5309 tp->hw_status = NULL;
5310 }
5311 if (tp->hw_stats) {
5312 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5313 tp->hw_stats, tp->stats_mapping);
5314 tp->hw_stats = NULL;
5315 }
5316}
5317
5318/*
5319 * Must not be invoked with interrupt sources disabled and
5320 * the hardware shutdown down. Can sleep.
5321 */
5322static int tg3_alloc_consistent(struct tg3 *tp)
5323{
Yan Burmanbd2b3342006-12-14 15:25:00 -08005324 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005325 (TG3_RX_RING_SIZE +
5326 TG3_RX_JUMBO_RING_SIZE)) +
5327 (sizeof(struct tx_ring_info) *
5328 TG3_TX_RING_SIZE),
5329 GFP_KERNEL);
5330 if (!tp->rx_std_buffers)
5331 return -ENOMEM;
5332
Linus Torvalds1da177e2005-04-16 15:20:36 -07005333 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5334 tp->tx_buffers = (struct tx_ring_info *)
5335 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5336
5337 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5338 &tp->rx_std_mapping);
5339 if (!tp->rx_std)
5340 goto err_out;
5341
5342 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5343 &tp->rx_jumbo_mapping);
5344
5345 if (!tp->rx_jumbo)
5346 goto err_out;
5347
5348 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5349 &tp->rx_rcb_mapping);
5350 if (!tp->rx_rcb)
5351 goto err_out;
5352
5353 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5354 &tp->tx_desc_mapping);
5355 if (!tp->tx_ring)
5356 goto err_out;
5357
5358 tp->hw_status = pci_alloc_consistent(tp->pdev,
5359 TG3_HW_STATUS_SIZE,
5360 &tp->status_mapping);
5361 if (!tp->hw_status)
5362 goto err_out;
5363
5364 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5365 sizeof(struct tg3_hw_stats),
5366 &tp->stats_mapping);
5367 if (!tp->hw_stats)
5368 goto err_out;
5369
5370 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5371 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5372
5373 return 0;
5374
5375err_out:
5376 tg3_free_consistent(tp);
5377 return -ENOMEM;
5378}
5379
5380#define MAX_WAIT_CNT 1000
5381
5382/* To stop a block, clear the enable bit and poll till it
5383 * clears. tp->lock is held.
5384 */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005385static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005386{
5387 unsigned int i;
5388 u32 val;
5389
5390 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5391 switch (ofs) {
5392 case RCVLSC_MODE:
5393 case DMAC_MODE:
5394 case MBFREE_MODE:
5395 case BUFMGR_MODE:
5396 case MEMARB_MODE:
5397 /* We can't enable/disable these bits of the
5398 * 5705/5750, just say success.
5399 */
5400 return 0;
5401
5402 default:
5403 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005404 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005405 }
5406
5407 val = tr32(ofs);
5408 val &= ~enable_bit;
5409 tw32_f(ofs, val);
5410
5411 for (i = 0; i < MAX_WAIT_CNT; i++) {
5412 udelay(100);
5413 val = tr32(ofs);
5414 if ((val & enable_bit) == 0)
5415 break;
5416 }
5417
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005418 if (i == MAX_WAIT_CNT && !silent) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005419 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5420 "ofs=%lx enable_bit=%x\n",
5421 ofs, enable_bit);
5422 return -ENODEV;
5423 }
5424
5425 return 0;
5426}
5427
5428/* tp->lock is held. */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005429static int tg3_abort_hw(struct tg3 *tp, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005430{
5431 int i, err;
5432
5433 tg3_disable_ints(tp);
5434
5435 tp->rx_mode &= ~RX_MODE_ENABLE;
5436 tw32_f(MAC_RX_MODE, tp->rx_mode);
5437 udelay(10);
5438
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005439 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5440 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5441 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5442 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5443 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5444 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005445
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005446 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5447 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5448 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5449 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5450 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5451 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5452 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005453
5454 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5455 tw32_f(MAC_MODE, tp->mac_mode);
5456 udelay(40);
5457
5458 tp->tx_mode &= ~TX_MODE_ENABLE;
5459 tw32_f(MAC_TX_MODE, tp->tx_mode);
5460
5461 for (i = 0; i < MAX_WAIT_CNT; i++) {
5462 udelay(100);
5463 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5464 break;
5465 }
5466 if (i >= MAX_WAIT_CNT) {
5467 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5468 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5469 tp->dev->name, tr32(MAC_TX_MODE));
Michael Chane6de8ad2005-05-05 14:42:41 -07005470 err |= -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005471 }
5472
Michael Chane6de8ad2005-05-05 14:42:41 -07005473 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005474 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5475 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005476
5477 tw32(FTQ_RESET, 0xffffffff);
5478 tw32(FTQ_RESET, 0x00000000);
5479
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005480 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5481 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005482
5483 if (tp->hw_status)
5484 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5485 if (tp->hw_stats)
5486 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5487
Linus Torvalds1da177e2005-04-16 15:20:36 -07005488 return err;
5489}
5490
5491/* tp->lock is held. */
5492static int tg3_nvram_lock(struct tg3 *tp)
5493{
5494 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5495 int i;
5496
Michael Chanec41c7d2006-01-17 02:40:55 -08005497 if (tp->nvram_lock_cnt == 0) {
5498 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5499 for (i = 0; i < 8000; i++) {
5500 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5501 break;
5502 udelay(20);
5503 }
5504 if (i == 8000) {
5505 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5506 return -ENODEV;
5507 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005508 }
Michael Chanec41c7d2006-01-17 02:40:55 -08005509 tp->nvram_lock_cnt++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005510 }
5511 return 0;
5512}
5513
5514/* tp->lock is held. */
5515static void tg3_nvram_unlock(struct tg3 *tp)
5516{
Michael Chanec41c7d2006-01-17 02:40:55 -08005517 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5518 if (tp->nvram_lock_cnt > 0)
5519 tp->nvram_lock_cnt--;
5520 if (tp->nvram_lock_cnt == 0)
5521 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5522 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005523}
5524
5525/* tp->lock is held. */
Michael Chane6af3012005-04-21 17:12:05 -07005526static void tg3_enable_nvram_access(struct tg3 *tp)
5527{
5528 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5529 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5530 u32 nvaccess = tr32(NVRAM_ACCESS);
5531
5532 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5533 }
5534}
5535
5536/* tp->lock is held. */
5537static void tg3_disable_nvram_access(struct tg3 *tp)
5538{
5539 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5540 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5541 u32 nvaccess = tr32(NVRAM_ACCESS);
5542
5543 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5544 }
5545}
5546
Matt Carlson0d3031d2007-10-10 18:02:43 -07005547static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5548{
5549 int i;
5550 u32 apedata;
5551
5552 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5553 if (apedata != APE_SEG_SIG_MAGIC)
5554 return;
5555
5556 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
Matt Carlson731fd792008-08-15 14:07:51 -07005557 if (!(apedata & APE_FW_STATUS_READY))
Matt Carlson0d3031d2007-10-10 18:02:43 -07005558 return;
5559
5560 /* Wait for up to 1 millisecond for APE to service previous event. */
5561 for (i = 0; i < 10; i++) {
5562 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5563 return;
5564
5565 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5566
5567 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5568 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5569 event | APE_EVENT_STATUS_EVENT_PENDING);
5570
5571 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5572
5573 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5574 break;
5575
5576 udelay(100);
5577 }
5578
5579 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5580 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5581}
5582
5583static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5584{
5585 u32 event;
5586 u32 apedata;
5587
5588 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5589 return;
5590
5591 switch (kind) {
5592 case RESET_KIND_INIT:
5593 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5594 APE_HOST_SEG_SIG_MAGIC);
5595 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5596 APE_HOST_SEG_LEN_MAGIC);
5597 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5598 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5599 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5600 APE_HOST_DRIVER_ID_MAGIC);
5601 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5602 APE_HOST_BEHAV_NO_PHYLOCK);
5603
5604 event = APE_EVENT_STATUS_STATE_START;
5605 break;
5606 case RESET_KIND_SHUTDOWN:
Matt Carlsonb2aee152008-11-03 16:51:11 -08005607 /* With the interface we are currently using,
5608 * APE does not track driver state. Wiping
5609 * out the HOST SEGMENT SIGNATURE forces
5610 * the APE to assume OS absent status.
5611 */
5612 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
5613
Matt Carlson0d3031d2007-10-10 18:02:43 -07005614 event = APE_EVENT_STATUS_STATE_UNLOAD;
5615 break;
5616 case RESET_KIND_SUSPEND:
5617 event = APE_EVENT_STATUS_STATE_SUSPEND;
5618 break;
5619 default:
5620 return;
5621 }
5622
5623 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5624
5625 tg3_ape_send_event(tp, event);
5626}
5627
Michael Chane6af3012005-04-21 17:12:05 -07005628/* tp->lock is held. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005629static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5630{
David S. Millerf49639e2006-06-09 11:58:36 -07005631 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5632 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005633
5634 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5635 switch (kind) {
5636 case RESET_KIND_INIT:
5637 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5638 DRV_STATE_START);
5639 break;
5640
5641 case RESET_KIND_SHUTDOWN:
5642 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5643 DRV_STATE_UNLOAD);
5644 break;
5645
5646 case RESET_KIND_SUSPEND:
5647 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5648 DRV_STATE_SUSPEND);
5649 break;
5650
5651 default:
5652 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005653 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005654 }
Matt Carlson0d3031d2007-10-10 18:02:43 -07005655
5656 if (kind == RESET_KIND_INIT ||
5657 kind == RESET_KIND_SUSPEND)
5658 tg3_ape_driver_state_change(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005659}
5660
5661/* tp->lock is held. */
5662static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5663{
5664 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5665 switch (kind) {
5666 case RESET_KIND_INIT:
5667 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5668 DRV_STATE_START_DONE);
5669 break;
5670
5671 case RESET_KIND_SHUTDOWN:
5672 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5673 DRV_STATE_UNLOAD_DONE);
5674 break;
5675
5676 default:
5677 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005678 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005679 }
Matt Carlson0d3031d2007-10-10 18:02:43 -07005680
5681 if (kind == RESET_KIND_SHUTDOWN)
5682 tg3_ape_driver_state_change(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005683}
5684
5685/* tp->lock is held. */
5686static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5687{
5688 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5689 switch (kind) {
5690 case RESET_KIND_INIT:
5691 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5692 DRV_STATE_START);
5693 break;
5694
5695 case RESET_KIND_SHUTDOWN:
5696 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5697 DRV_STATE_UNLOAD);
5698 break;
5699
5700 case RESET_KIND_SUSPEND:
5701 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5702 DRV_STATE_SUSPEND);
5703 break;
5704
5705 default:
5706 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005707 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005708 }
5709}
5710
Michael Chan7a6f4362006-09-27 16:03:31 -07005711static int tg3_poll_fw(struct tg3 *tp)
5712{
5713 int i;
5714 u32 val;
5715
Michael Chanb5d37722006-09-27 16:06:21 -07005716 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Gary Zambrano0ccead12006-11-14 16:34:00 -08005717 /* Wait up to 20ms for init done. */
5718 for (i = 0; i < 200; i++) {
Michael Chanb5d37722006-09-27 16:06:21 -07005719 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5720 return 0;
Gary Zambrano0ccead12006-11-14 16:34:00 -08005721 udelay(100);
Michael Chanb5d37722006-09-27 16:06:21 -07005722 }
5723 return -ENODEV;
5724 }
5725
Michael Chan7a6f4362006-09-27 16:03:31 -07005726 /* Wait for firmware initialization to complete. */
5727 for (i = 0; i < 100000; i++) {
5728 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5729 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5730 break;
5731 udelay(10);
5732 }
5733
5734 /* Chip might not be fitted with firmware. Some Sun onboard
5735 * parts are configured like that. So don't signal the timeout
5736 * of the above loop as an error, but do report the lack of
5737 * running firmware once.
5738 */
5739 if (i >= 100000 &&
5740 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5741 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5742
5743 printk(KERN_INFO PFX "%s: No firmware running.\n",
5744 tp->dev->name);
5745 }
5746
5747 return 0;
5748}
5749
Michael Chanee6a99b2007-07-18 21:49:10 -07005750/* Save PCI command register before chip reset */
5751static void tg3_save_pci_state(struct tg3 *tp)
5752{
Matt Carlson8a6eac92007-10-21 16:17:55 -07005753 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07005754}
5755
5756/* Restore PCI state after chip reset */
5757static void tg3_restore_pci_state(struct tg3 *tp)
5758{
5759 u32 val;
5760
5761 /* Re-enable indirect register accesses. */
5762 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5763 tp->misc_host_ctrl);
5764
5765 /* Set MAX PCI retry to zero. */
5766 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5767 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5768 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5769 val |= PCISTATE_RETRY_SAME_DMA;
Matt Carlson0d3031d2007-10-10 18:02:43 -07005770 /* Allow reads and writes to the APE register and memory space. */
5771 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5772 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5773 PCISTATE_ALLOW_APE_SHMEM_WR;
Michael Chanee6a99b2007-07-18 21:49:10 -07005774 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5775
Matt Carlson8a6eac92007-10-21 16:17:55 -07005776 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07005777
Matt Carlson5f5c51e2007-11-12 21:19:37 -08005778 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5779 pcie_set_readrq(tp->pdev, 4096);
5780 else {
Michael Chan114342f2007-10-15 02:12:26 -07005781 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5782 tp->pci_cacheline_sz);
5783 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5784 tp->pci_lat_timer);
5785 }
Matt Carlson5f5c51e2007-11-12 21:19:37 -08005786
Michael Chanee6a99b2007-07-18 21:49:10 -07005787 /* Make sure PCI-X relaxed ordering bit is clear. */
Matt Carlson9974a352007-10-07 23:27:28 -07005788 if (tp->pcix_cap) {
5789 u16 pcix_cmd;
5790
5791 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5792 &pcix_cmd);
5793 pcix_cmd &= ~PCI_X_CMD_ERO;
5794 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5795 pcix_cmd);
5796 }
Michael Chanee6a99b2007-07-18 21:49:10 -07005797
5798 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chanee6a99b2007-07-18 21:49:10 -07005799
5800 /* Chip reset on 5780 will reset MSI enable bit,
5801 * so need to restore it.
5802 */
5803 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5804 u16 ctrl;
5805
5806 pci_read_config_word(tp->pdev,
5807 tp->msi_cap + PCI_MSI_FLAGS,
5808 &ctrl);
5809 pci_write_config_word(tp->pdev,
5810 tp->msi_cap + PCI_MSI_FLAGS,
5811 ctrl | PCI_MSI_FLAGS_ENABLE);
5812 val = tr32(MSGINT_MODE);
5813 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5814 }
5815 }
5816}
5817
Linus Torvalds1da177e2005-04-16 15:20:36 -07005818static void tg3_stop_fw(struct tg3 *);
5819
5820/* tp->lock is held. */
5821static int tg3_chip_reset(struct tg3 *tp)
5822{
5823 u32 val;
Michael Chan1ee582d2005-08-09 20:16:46 -07005824 void (*write_op)(struct tg3 *, u32, u32);
Michael Chan7a6f4362006-09-27 16:03:31 -07005825 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005826
David S. Millerf49639e2006-06-09 11:58:36 -07005827 tg3_nvram_lock(tp);
5828
Matt Carlson158d7ab2008-05-29 01:37:54 -07005829 tg3_mdio_stop(tp);
5830
Matt Carlson77b483f2008-08-15 14:07:24 -07005831 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5832
David S. Millerf49639e2006-06-09 11:58:36 -07005833 /* No matching tg3_nvram_unlock() after this because
5834 * chip reset below will undo the nvram lock.
5835 */
5836 tp->nvram_lock_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005837
Michael Chanee6a99b2007-07-18 21:49:10 -07005838 /* GRC_MISC_CFG core clock reset will clear the memory
5839 * enable bit in PCI register 4 and the MSI enable bit
5840 * on some chips, so we save relevant registers here.
5841 */
5842 tg3_save_pci_state(tp);
5843
Michael Chand9ab5ad2006-03-20 22:27:35 -08005844 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -08005845 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07005846 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07005847 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -07005848 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5849 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chand9ab5ad2006-03-20 22:27:35 -08005850 tw32(GRC_FASTBOOT_PC, 0);
5851
Linus Torvalds1da177e2005-04-16 15:20:36 -07005852 /*
5853 * We must avoid the readl() that normally takes place.
5854 * It locks machines, causes machine checks, and other
5855 * fun things. So, temporarily disable the 5701
5856 * hardware workaround, while we do the reset.
5857 */
Michael Chan1ee582d2005-08-09 20:16:46 -07005858 write_op = tp->write32;
5859 if (write_op == tg3_write_flush_reg32)
5860 tp->write32 = tg3_write32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005861
Michael Chand18edcb2007-03-24 20:57:11 -07005862 /* Prevent the irq handler from reading or writing PCI registers
5863 * during chip reset when the memory enable bit in the PCI command
5864 * register may be cleared. The chip does not generate interrupt
5865 * at this time, but the irq handler may still be called due to irq
5866 * sharing or irqpoll.
5867 */
5868 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
Michael Chanb8fa2f32007-04-06 17:35:37 -07005869 if (tp->hw_status) {
5870 tp->hw_status->status = 0;
5871 tp->hw_status->status_tag = 0;
5872 }
Michael Chand18edcb2007-03-24 20:57:11 -07005873 tp->last_tag = 0;
5874 smp_mb();
5875 synchronize_irq(tp->pdev->irq);
5876
Linus Torvalds1da177e2005-04-16 15:20:36 -07005877 /* do the reset */
5878 val = GRC_MISC_CFG_CORECLK_RESET;
5879
5880 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5881 if (tr32(0x7e2c) == 0x60) {
5882 tw32(0x7e2c, 0x20);
5883 }
5884 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5885 tw32(GRC_MISC_CFG, (1 << 29));
5886 val |= (1 << 29);
5887 }
5888 }
5889
Michael Chanb5d37722006-09-27 16:06:21 -07005890 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5891 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5892 tw32(GRC_VCPU_EXT_CTRL,
5893 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5894 }
5895
Linus Torvalds1da177e2005-04-16 15:20:36 -07005896 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5897 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5898 tw32(GRC_MISC_CFG, val);
5899
Michael Chan1ee582d2005-08-09 20:16:46 -07005900 /* restore 5701 hardware bug workaround write method */
5901 tp->write32 = write_op;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005902
5903 /* Unfortunately, we have to delay before the PCI read back.
5904 * Some 575X chips even will not respond to a PCI cfg access
5905 * when the reset command is given to the chip.
5906 *
5907 * How do these hardware designers expect things to work
5908 * properly if the PCI write is posted for a long period
5909 * of time? It is always necessary to have some method by
5910 * which a register read back can occur to push the write
5911 * out which does the reset.
5912 *
5913 * For most tg3 variants the trick below was working.
5914 * Ho hum...
5915 */
5916 udelay(120);
5917
5918 /* Flush PCI posted writes. The normal MMIO registers
5919 * are inaccessible at this time so this is the only
5920 * way to make this reliably (actually, this is no longer
5921 * the case, see above). I tried to use indirect
5922 * register read/write but this upset some 5701 variants.
5923 */
5924 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5925
5926 udelay(120);
5927
5928 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5929 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5930 int i;
5931 u32 cfg_val;
5932
5933 /* Wait for link training to complete. */
5934 for (i = 0; i < 5000; i++)
5935 udelay(100);
5936
5937 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5938 pci_write_config_dword(tp->pdev, 0xc4,
5939 cfg_val | (1 << 15));
5940 }
5941 /* Set PCIE max payload size and clear error status. */
5942 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5943 }
5944
Michael Chanee6a99b2007-07-18 21:49:10 -07005945 tg3_restore_pci_state(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005946
Michael Chand18edcb2007-03-24 20:57:11 -07005947 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5948
Michael Chanee6a99b2007-07-18 21:49:10 -07005949 val = 0;
5950 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan4cf78e42005-07-25 12:29:19 -07005951 val = tr32(MEMARB_MODE);
Michael Chanee6a99b2007-07-18 21:49:10 -07005952 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005953
5954 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5955 tg3_stop_fw(tp);
5956 tw32(0x5000, 0x400);
5957 }
5958
5959 tw32(GRC_MODE, tp->grc_mode);
5960
5961 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01005962 val = tr32(0xc4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005963
5964 tw32(0xc4, val | (1 << 15));
5965 }
5966
5967 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5968 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5969 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5970 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5971 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5972 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5973 }
5974
5975 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5976 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5977 tw32_f(MAC_MODE, tp->mac_mode);
Michael Chan747e8f82005-07-25 12:33:22 -07005978 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5979 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5980 tw32_f(MAC_MODE, tp->mac_mode);
Matt Carlson3bda1252008-08-15 14:08:22 -07005981 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
5982 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
5983 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
5984 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
5985 tw32_f(MAC_MODE, tp->mac_mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005986 } else
5987 tw32_f(MAC_MODE, 0);
5988 udelay(40);
5989
Matt Carlson158d7ab2008-05-29 01:37:54 -07005990 tg3_mdio_start(tp);
5991
Matt Carlson77b483f2008-08-15 14:07:24 -07005992 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
5993
Michael Chan7a6f4362006-09-27 16:03:31 -07005994 err = tg3_poll_fw(tp);
5995 if (err)
5996 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005997
5998 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5999 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01006000 val = tr32(0x7c00);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006001
6002 tw32(0x7c00, val | (1 << 25));
6003 }
6004
6005 /* Reprobe ASF enable state. */
6006 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6007 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6008 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6009 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6010 u32 nic_cfg;
6011
6012 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6013 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6014 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
Matt Carlson4ba526c2008-08-15 14:10:04 -07006015 tp->last_event_jiffies = jiffies;
John W. Linvillecbf46852005-04-21 17:01:29 -07006016 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006017 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6018 }
6019 }
6020
6021 return 0;
6022}
6023
6024/* tp->lock is held. */
6025static void tg3_stop_fw(struct tg3 *tp)
6026{
Matt Carlson0d3031d2007-10-10 18:02:43 -07006027 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6028 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Matt Carlson7c5026a2008-05-02 16:49:29 -07006029 /* Wait for RX cpu to ACK the previous event. */
6030 tg3_wait_for_event_ack(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006031
6032 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
Matt Carlson4ba526c2008-08-15 14:10:04 -07006033
6034 tg3_generate_fw_event(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006035
Matt Carlson7c5026a2008-05-02 16:49:29 -07006036 /* Wait for RX cpu to ACK this event. */
6037 tg3_wait_for_event_ack(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006038 }
6039}
6040
6041/* tp->lock is held. */
Michael Chan944d9802005-05-29 14:57:48 -07006042static int tg3_halt(struct tg3 *tp, int kind, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006043{
6044 int err;
6045
6046 tg3_stop_fw(tp);
6047
Michael Chan944d9802005-05-29 14:57:48 -07006048 tg3_write_sig_pre_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006049
David S. Millerb3b7d6b2005-05-05 14:40:20 -07006050 tg3_abort_hw(tp, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006051 err = tg3_chip_reset(tp);
6052
Michael Chan944d9802005-05-29 14:57:48 -07006053 tg3_write_sig_legacy(tp, kind);
6054 tg3_write_sig_post_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006055
6056 if (err)
6057 return err;
6058
6059 return 0;
6060}
6061
6062#define TG3_FW_RELEASE_MAJOR 0x0
6063#define TG3_FW_RELASE_MINOR 0x0
6064#define TG3_FW_RELEASE_FIX 0x0
6065#define TG3_FW_START_ADDR 0x08000000
6066#define TG3_FW_TEXT_ADDR 0x08000000
6067#define TG3_FW_TEXT_LEN 0x9c0
6068#define TG3_FW_RODATA_ADDR 0x080009c0
6069#define TG3_FW_RODATA_LEN 0x60
6070#define TG3_FW_DATA_ADDR 0x08000a40
6071#define TG3_FW_DATA_LEN 0x20
6072#define TG3_FW_SBSS_ADDR 0x08000a60
6073#define TG3_FW_SBSS_LEN 0xc
6074#define TG3_FW_BSS_ADDR 0x08000a70
6075#define TG3_FW_BSS_LEN 0x10
6076
Andreas Mohr50da8592006-08-14 23:54:30 -07006077static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006078 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6079 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6080 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6081 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6082 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6083 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6084 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6085 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6086 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6087 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6088 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6089 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6090 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6091 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6092 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6093 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6094 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6095 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6096 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6097 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6098 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6099 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6100 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6103 0, 0, 0, 0, 0, 0,
6104 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6105 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6106 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6107 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6108 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6109 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6110 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6111 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6112 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6113 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6114 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6115 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6116 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6117 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6118 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6119 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6120 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6121 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6122 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6123 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6124 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6125 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6126 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6127 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6128 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6129 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6130 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6131 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6132 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6133 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6134 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6135 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6136 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6137 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6138 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6139 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6140 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6141 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6142 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6143 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6144 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6145 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6146 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6147 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6148 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6149 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6150 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6151 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6152 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6153 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6154 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6155 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6156 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6157 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6158 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6159 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6160 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6161 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6162 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6163 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6164 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6165 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6166 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6167 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6168 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6169};
6170
Andreas Mohr50da8592006-08-14 23:54:30 -07006171static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006172 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6173 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6174 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6175 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6176 0x00000000
6177};
6178
6179#if 0 /* All zeros, don't eat up space with it. */
6180u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6181 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6182 0x00000000, 0x00000000, 0x00000000, 0x00000000
6183};
6184#endif
6185
6186#define RX_CPU_SCRATCH_BASE 0x30000
6187#define RX_CPU_SCRATCH_SIZE 0x04000
6188#define TX_CPU_SCRATCH_BASE 0x34000
6189#define TX_CPU_SCRATCH_SIZE 0x04000
6190
6191/* tp->lock is held. */
6192static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6193{
6194 int i;
6195
Eric Sesterhenn5d9428d2006-04-02 13:52:48 +02006196 BUG_ON(offset == TX_CPU_BASE &&
6197 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006198
Michael Chanb5d37722006-09-27 16:06:21 -07006199 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6200 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6201
6202 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6203 return 0;
6204 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006205 if (offset == RX_CPU_BASE) {
6206 for (i = 0; i < 10000; i++) {
6207 tw32(offset + CPU_STATE, 0xffffffff);
6208 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6209 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6210 break;
6211 }
6212
6213 tw32(offset + CPU_STATE, 0xffffffff);
6214 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
6215 udelay(10);
6216 } else {
6217 for (i = 0; i < 10000; i++) {
6218 tw32(offset + CPU_STATE, 0xffffffff);
6219 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6220 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6221 break;
6222 }
6223 }
6224
6225 if (i >= 10000) {
6226 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6227 "and %s CPU\n",
6228 tp->dev->name,
6229 (offset == RX_CPU_BASE ? "RX" : "TX"));
6230 return -ENODEV;
6231 }
Michael Chanec41c7d2006-01-17 02:40:55 -08006232
6233 /* Clear firmware's nvram arbitration. */
6234 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6235 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006236 return 0;
6237}
6238
6239struct fw_info {
6240 unsigned int text_base;
6241 unsigned int text_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006242 const u32 *text_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006243 unsigned int rodata_base;
6244 unsigned int rodata_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006245 const u32 *rodata_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006246 unsigned int data_base;
6247 unsigned int data_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006248 const u32 *data_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006249};
6250
6251/* tp->lock is held. */
6252static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6253 int cpu_scratch_size, struct fw_info *info)
6254{
Michael Chanec41c7d2006-01-17 02:40:55 -08006255 int err, lock_err, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006256 void (*write_op)(struct tg3 *, u32, u32);
6257
6258 if (cpu_base == TX_CPU_BASE &&
6259 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6260 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6261 "TX cpu firmware on %s which is 5705.\n",
6262 tp->dev->name);
6263 return -EINVAL;
6264 }
6265
6266 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6267 write_op = tg3_write_mem;
6268 else
6269 write_op = tg3_write_indirect_reg32;
6270
Michael Chan1b628152005-05-29 14:59:49 -07006271 /* It is possible that bootcode is still loading at this point.
6272 * Get the nvram lock first before halting the cpu.
6273 */
Michael Chanec41c7d2006-01-17 02:40:55 -08006274 lock_err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006275 err = tg3_halt_cpu(tp, cpu_base);
Michael Chanec41c7d2006-01-17 02:40:55 -08006276 if (!lock_err)
6277 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006278 if (err)
6279 goto out;
6280
6281 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6282 write_op(tp, cpu_scratch_base + i, 0);
6283 tw32(cpu_base + CPU_STATE, 0xffffffff);
6284 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6285 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6286 write_op(tp, (cpu_scratch_base +
6287 (info->text_base & 0xffff) +
6288 (i * sizeof(u32))),
6289 (info->text_data ?
6290 info->text_data[i] : 0));
6291 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6292 write_op(tp, (cpu_scratch_base +
6293 (info->rodata_base & 0xffff) +
6294 (i * sizeof(u32))),
6295 (info->rodata_data ?
6296 info->rodata_data[i] : 0));
6297 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6298 write_op(tp, (cpu_scratch_base +
6299 (info->data_base & 0xffff) +
6300 (i * sizeof(u32))),
6301 (info->data_data ?
6302 info->data_data[i] : 0));
6303
6304 err = 0;
6305
6306out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006307 return err;
6308}
6309
6310/* tp->lock is held. */
6311static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6312{
6313 struct fw_info info;
6314 int err, i;
6315
6316 info.text_base = TG3_FW_TEXT_ADDR;
6317 info.text_len = TG3_FW_TEXT_LEN;
6318 info.text_data = &tg3FwText[0];
6319 info.rodata_base = TG3_FW_RODATA_ADDR;
6320 info.rodata_len = TG3_FW_RODATA_LEN;
6321 info.rodata_data = &tg3FwRodata[0];
6322 info.data_base = TG3_FW_DATA_ADDR;
6323 info.data_len = TG3_FW_DATA_LEN;
6324 info.data_data = NULL;
6325
6326 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6327 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6328 &info);
6329 if (err)
6330 return err;
6331
6332 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6333 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6334 &info);
6335 if (err)
6336 return err;
6337
6338 /* Now startup only the RX cpu. */
6339 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6340 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6341
6342 for (i = 0; i < 5; i++) {
6343 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6344 break;
6345 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6346 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
6347 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6348 udelay(1000);
6349 }
6350 if (i >= 5) {
6351 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6352 "to set RX CPU PC, is %08x should be %08x\n",
6353 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6354 TG3_FW_TEXT_ADDR);
6355 return -ENODEV;
6356 }
6357 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6358 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
6359
6360 return 0;
6361}
6362
Linus Torvalds1da177e2005-04-16 15:20:36 -07006363
6364#define TG3_TSO_FW_RELEASE_MAJOR 0x1
6365#define TG3_TSO_FW_RELASE_MINOR 0x6
6366#define TG3_TSO_FW_RELEASE_FIX 0x0
6367#define TG3_TSO_FW_START_ADDR 0x08000000
6368#define TG3_TSO_FW_TEXT_ADDR 0x08000000
6369#define TG3_TSO_FW_TEXT_LEN 0x1aa0
6370#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
6371#define TG3_TSO_FW_RODATA_LEN 0x60
6372#define TG3_TSO_FW_DATA_ADDR 0x08001b20
6373#define TG3_TSO_FW_DATA_LEN 0x30
6374#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
6375#define TG3_TSO_FW_SBSS_LEN 0x2c
6376#define TG3_TSO_FW_BSS_ADDR 0x08001b80
6377#define TG3_TSO_FW_BSS_LEN 0x894
6378
Andreas Mohr50da8592006-08-14 23:54:30 -07006379static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006380 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6381 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6382 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6383 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6384 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6385 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6386 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6387 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6388 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6389 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6390 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6391 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6392 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6393 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6394 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6395 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6396 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6397 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6398 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6399 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6400 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6401 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6402 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6403 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6404 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6405 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6406 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6407 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6408 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6409 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6410 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6411 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6412 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6413 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6414 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6415 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6416 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6417 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6418 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6419 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6420 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6421 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6422 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6423 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6424 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6425 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6426 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6427 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6428 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6429 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6430 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6431 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6432 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6433 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6434 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6435 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6436 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6437 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6438 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6439 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6440 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6441 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6442 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6443 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6444 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6445 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6446 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6447 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6448 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6449 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6450 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6451 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6452 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6453 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6454 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6455 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6456 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6457 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6458 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6459 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6460 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6461 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6462 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6463 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6464 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6465 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6466 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6467 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6468 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6469 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6470 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6471 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6472 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6473 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6474 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6475 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6476 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6477 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6478 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6479 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6480 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6481 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6482 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6483 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6484 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6485 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6486 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6487 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6488 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6489 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6490 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6491 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6492 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6493 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6494 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6495 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6496 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6497 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6498 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6499 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6500 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6501 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6502 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6503 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6504 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6505 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6506 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6507 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6508 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6509 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6510 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6511 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6512 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6513 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6514 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6515 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6516 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6517 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6518 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6519 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6520 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6521 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6522 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6523 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6524 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6525 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6526 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6527 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6528 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6529 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6530 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6531 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6532 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6533 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6534 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6535 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6536 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6537 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6538 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6539 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6540 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6541 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6542 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6543 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6544 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6545 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6546 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6547 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6548 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6549 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6550 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6551 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6552 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6553 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6554 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6555 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6556 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6557 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6558 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6559 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6560 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6561 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6562 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6563 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6564 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6565 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6566 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6567 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6568 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6569 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6570 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6571 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6572 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6573 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6574 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6575 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6576 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6577 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6578 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6579 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6580 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6581 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6582 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6583 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6584 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6585 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6586 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6587 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6588 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6589 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6590 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6591 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6592 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6593 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6594 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6595 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6596 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6597 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6598 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6599 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6600 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6601 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6602 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6603 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6604 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6605 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6606 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6607 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6608 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6609 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6610 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6611 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6612 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6613 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6614 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6615 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6616 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6617 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6618 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6619 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6620 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6621 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6622 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6623 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6624 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6625 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6626 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6627 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6628 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6629 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6630 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6631 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6632 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6633 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6634 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6635 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6636 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6637 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6638 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6639 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6640 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6641 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6642 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6643 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6644 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6645 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6646 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6647 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6648 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6649 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6650 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6651 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6652 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6653 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6654 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6655 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6656 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6657 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6658 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6659 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6660 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6661 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6662 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6663 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6664};
6665
Andreas Mohr50da8592006-08-14 23:54:30 -07006666static const u32 tg3TsoFwRodata[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006667 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6668 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6669 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6670 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6671 0x00000000,
6672};
6673
Andreas Mohr50da8592006-08-14 23:54:30 -07006674static const u32 tg3TsoFwData[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006675 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6676 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6677 0x00000000,
6678};
6679
6680/* 5705 needs a special version of the TSO firmware. */
6681#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
6682#define TG3_TSO5_FW_RELASE_MINOR 0x2
6683#define TG3_TSO5_FW_RELEASE_FIX 0x0
6684#define TG3_TSO5_FW_START_ADDR 0x00010000
6685#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
6686#define TG3_TSO5_FW_TEXT_LEN 0xe90
6687#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
6688#define TG3_TSO5_FW_RODATA_LEN 0x50
6689#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
6690#define TG3_TSO5_FW_DATA_LEN 0x20
6691#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
6692#define TG3_TSO5_FW_SBSS_LEN 0x28
6693#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
6694#define TG3_TSO5_FW_BSS_LEN 0x88
6695
Andreas Mohr50da8592006-08-14 23:54:30 -07006696static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006697 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6698 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6699 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6700 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6701 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6702 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6703 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6704 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6705 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6706 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6707 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6708 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6709 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6710 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6711 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6712 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6713 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6714 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6715 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6716 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6717 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6718 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6719 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6720 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6721 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6722 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6723 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6724 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6725 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6726 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6727 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6728 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6729 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6730 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6731 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6732 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6733 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6734 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6735 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6736 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6737 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6738 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6739 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6740 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6741 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6742 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6743 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6744 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6745 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6746 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6747 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6748 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6749 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6750 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6751 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6752 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6753 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6754 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6755 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6756 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6757 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6758 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6759 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6760 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6761 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6762 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6763 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6764 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6765 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6766 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6767 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6768 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6769 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6770 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6771 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6772 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6773 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6774 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6775 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6776 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6777 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6778 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6779 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6780 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6781 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6782 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6783 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6784 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6785 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6786 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6787 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6788 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6789 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6790 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6791 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6792 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6793 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6794 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6795 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6796 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6797 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6798 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6799 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6800 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6801 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6802 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6803 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6804 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6805 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6806 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6807 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6808 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6809 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6810 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6811 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6812 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6813 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6814 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6815 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6816 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6817 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6818 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6819 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6820 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6821 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6822 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6823 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6824 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6825 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6826 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6827 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6828 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6829 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6830 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6831 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6832 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6833 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6834 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6835 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6836 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6837 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6838 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6839 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6840 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6841 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6842 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6843 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6844 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6845 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6846 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6847 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6848 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6849 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6850 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6851 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6852 0x00000000, 0x00000000, 0x00000000,
6853};
6854
Andreas Mohr50da8592006-08-14 23:54:30 -07006855static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006856 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6857 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6858 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6859 0x00000000, 0x00000000, 0x00000000,
6860};
6861
Andreas Mohr50da8592006-08-14 23:54:30 -07006862static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006863 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6864 0x00000000, 0x00000000, 0x00000000,
6865};
6866
6867/* tp->lock is held. */
6868static int tg3_load_tso_firmware(struct tg3 *tp)
6869{
6870 struct fw_info info;
6871 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6872 int err, i;
6873
6874 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6875 return 0;
6876
6877 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6878 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6879 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6880 info.text_data = &tg3Tso5FwText[0];
6881 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6882 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6883 info.rodata_data = &tg3Tso5FwRodata[0];
6884 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6885 info.data_len = TG3_TSO5_FW_DATA_LEN;
6886 info.data_data = &tg3Tso5FwData[0];
6887 cpu_base = RX_CPU_BASE;
6888 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6889 cpu_scratch_size = (info.text_len +
6890 info.rodata_len +
6891 info.data_len +
6892 TG3_TSO5_FW_SBSS_LEN +
6893 TG3_TSO5_FW_BSS_LEN);
6894 } else {
6895 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6896 info.text_len = TG3_TSO_FW_TEXT_LEN;
6897 info.text_data = &tg3TsoFwText[0];
6898 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6899 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6900 info.rodata_data = &tg3TsoFwRodata[0];
6901 info.data_base = TG3_TSO_FW_DATA_ADDR;
6902 info.data_len = TG3_TSO_FW_DATA_LEN;
6903 info.data_data = &tg3TsoFwData[0];
6904 cpu_base = TX_CPU_BASE;
6905 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6906 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6907 }
6908
6909 err = tg3_load_firmware_cpu(tp, cpu_base,
6910 cpu_scratch_base, cpu_scratch_size,
6911 &info);
6912 if (err)
6913 return err;
6914
6915 /* Now startup the cpu. */
6916 tw32(cpu_base + CPU_STATE, 0xffffffff);
6917 tw32_f(cpu_base + CPU_PC, info.text_base);
6918
6919 for (i = 0; i < 5; i++) {
6920 if (tr32(cpu_base + CPU_PC) == info.text_base)
6921 break;
6922 tw32(cpu_base + CPU_STATE, 0xffffffff);
6923 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6924 tw32_f(cpu_base + CPU_PC, info.text_base);
6925 udelay(1000);
6926 }
6927 if (i >= 5) {
6928 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6929 "to set CPU PC, is %08x should be %08x\n",
6930 tp->dev->name, tr32(cpu_base + CPU_PC),
6931 info.text_base);
6932 return -ENODEV;
6933 }
6934 tw32(cpu_base + CPU_STATE, 0xffffffff);
6935 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6936 return 0;
6937}
6938
Linus Torvalds1da177e2005-04-16 15:20:36 -07006939
Linus Torvalds1da177e2005-04-16 15:20:36 -07006940static int tg3_set_mac_addr(struct net_device *dev, void *p)
6941{
6942 struct tg3 *tp = netdev_priv(dev);
6943 struct sockaddr *addr = p;
Michael Chan986e0ae2007-05-05 12:10:20 -07006944 int err = 0, skip_mac_1 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006945
Michael Chanf9804dd2005-09-27 12:13:10 -07006946 if (!is_valid_ether_addr(addr->sa_data))
6947 return -EINVAL;
6948
Linus Torvalds1da177e2005-04-16 15:20:36 -07006949 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6950
Michael Chane75f7c92006-03-20 21:33:26 -08006951 if (!netif_running(dev))
6952 return 0;
6953
Michael Chan58712ef2006-04-29 18:58:01 -07006954 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
Michael Chan986e0ae2007-05-05 12:10:20 -07006955 u32 addr0_high, addr0_low, addr1_high, addr1_low;
Michael Chan58712ef2006-04-29 18:58:01 -07006956
Michael Chan986e0ae2007-05-05 12:10:20 -07006957 addr0_high = tr32(MAC_ADDR_0_HIGH);
6958 addr0_low = tr32(MAC_ADDR_0_LOW);
6959 addr1_high = tr32(MAC_ADDR_1_HIGH);
6960 addr1_low = tr32(MAC_ADDR_1_LOW);
6961
6962 /* Skip MAC addr 1 if ASF is using it. */
6963 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6964 !(addr1_high == 0 && addr1_low == 0))
6965 skip_mac_1 = 1;
Michael Chan58712ef2006-04-29 18:58:01 -07006966 }
Michael Chan986e0ae2007-05-05 12:10:20 -07006967 spin_lock_bh(&tp->lock);
6968 __tg3_set_mac_addr(tp, skip_mac_1);
6969 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006970
Michael Chanb9ec6c12006-07-25 16:37:27 -07006971 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006972}
6973
6974/* tp->lock is held. */
6975static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6976 dma_addr_t mapping, u32 maxlen_flags,
6977 u32 nic_addr)
6978{
6979 tg3_write_mem(tp,
6980 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6981 ((u64) mapping >> 32));
6982 tg3_write_mem(tp,
6983 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6984 ((u64) mapping & 0xffffffff));
6985 tg3_write_mem(tp,
6986 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6987 maxlen_flags);
6988
6989 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6990 tg3_write_mem(tp,
6991 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6992 nic_addr);
6993}
6994
6995static void __tg3_set_rx_mode(struct net_device *);
Michael Chand244c892005-07-05 14:42:33 -07006996static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
David S. Miller15f98502005-05-18 22:49:26 -07006997{
6998 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6999 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7000 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7001 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7002 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7003 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7004 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7005 }
7006 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7007 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7008 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7009 u32 val = ec->stats_block_coalesce_usecs;
7010
7011 if (!netif_carrier_ok(tp->dev))
7012 val = 0;
7013
7014 tw32(HOSTCC_STAT_COAL_TICKS, val);
7015 }
7016}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007017
7018/* tp->lock is held. */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007019static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007020{
7021 u32 val, rdmac_mode;
7022 int i, err, limit;
7023
7024 tg3_disable_ints(tp);
7025
7026 tg3_stop_fw(tp);
7027
7028 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7029
7030 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
Michael Chane6de8ad2005-05-05 14:42:41 -07007031 tg3_abort_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007032 }
7033
Matt Carlsondd477002008-05-25 23:45:58 -07007034 if (reset_phy &&
7035 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
Michael Chand4d2c552006-03-20 17:47:20 -08007036 tg3_phy_reset(tp);
7037
Linus Torvalds1da177e2005-04-16 15:20:36 -07007038 err = tg3_chip_reset(tp);
7039 if (err)
7040 return err;
7041
7042 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7043
Matt Carlsonbcb37f62008-11-03 16:52:09 -08007044 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07007045 val = tr32(TG3_CPMU_CTRL);
7046 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7047 tw32(TG3_CPMU_CTRL, val);
Matt Carlson9acb9612007-11-12 21:10:06 -08007048
7049 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7050 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7051 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7052 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7053
7054 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7055 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7056 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7057 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7058
7059 val = tr32(TG3_CPMU_HST_ACC);
7060 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7061 val |= CPMU_HST_ACC_MACCLK_6_25;
7062 tw32(TG3_CPMU_HST_ACC, val);
Matt Carlsond30cdd22007-10-07 23:28:35 -07007063 }
7064
Linus Torvalds1da177e2005-04-16 15:20:36 -07007065 /* This works around an issue with Athlon chipsets on
7066 * B3 tigon3 silicon. This bit has no effect on any
7067 * other revision. But do not set this on PCI Express
Matt Carlson795d01c2007-10-07 23:28:17 -07007068 * chips and don't even touch the clocks if the CPMU is present.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007069 */
Matt Carlson795d01c2007-10-07 23:28:17 -07007070 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7071 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7072 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7073 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7074 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007075
7076 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7077 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7078 val = tr32(TG3PCI_PCISTATE);
7079 val |= PCISTATE_RETRY_SAME_DMA;
7080 tw32(TG3PCI_PCISTATE, val);
7081 }
7082
Matt Carlson0d3031d2007-10-10 18:02:43 -07007083 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7084 /* Allow reads and writes to the
7085 * APE register and memory space.
7086 */
7087 val = tr32(TG3PCI_PCISTATE);
7088 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7089 PCISTATE_ALLOW_APE_SHMEM_WR;
7090 tw32(TG3PCI_PCISTATE, val);
7091 }
7092
Linus Torvalds1da177e2005-04-16 15:20:36 -07007093 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7094 /* Enable some hw fixes. */
7095 val = tr32(TG3PCI_MSI_DATA);
7096 val |= (1 << 26) | (1 << 28) | (1 << 29);
7097 tw32(TG3PCI_MSI_DATA, val);
7098 }
7099
7100 /* Descriptor ring init may make accesses to the
7101 * NIC SRAM area to setup the TX descriptors, so we
7102 * can only do this after the hardware has been
7103 * successfully reset.
7104 */
Michael Chan32d8c572006-07-25 16:38:29 -07007105 err = tg3_init_rings(tp);
7106 if (err)
7107 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007108
Matt Carlson9936bcf2007-10-10 18:03:07 -07007109 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
Matt Carlson57e69832008-05-25 23:48:31 -07007110 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
7111 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07007112 /* This value is determined during the probe time DMA
7113 * engine test, tg3_test_dma.
7114 */
7115 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7116 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007117
7118 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7119 GRC_MODE_4X_NIC_SEND_RINGS |
7120 GRC_MODE_NO_TX_PHDR_CSUM |
7121 GRC_MODE_NO_RX_PHDR_CSUM);
7122 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
Michael Chand2d746f2006-04-06 21:45:39 -07007123
7124 /* Pseudo-header checksum is done by hardware logic and not
7125 * the offload processers, so make the chip do the pseudo-
7126 * header checksums on receive. For transmit it is more
7127 * convenient to do the pseudo-header checksum in software
7128 * as Linux does that on transmit for us in all cases.
7129 */
7130 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007131
7132 tw32(GRC_MODE,
7133 tp->grc_mode |
7134 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7135
7136 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7137 val = tr32(GRC_MISC_CFG);
7138 val &= ~0xff;
7139 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7140 tw32(GRC_MISC_CFG, val);
7141
7142 /* Initialize MBUF/DESC pool. */
John W. Linvillecbf46852005-04-21 17:01:29 -07007143 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007144 /* Do nothing. */
7145 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7146 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7147 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7148 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7149 else
7150 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7151 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7152 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7153 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007154 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7155 int fw_len;
7156
7157 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7158 TG3_TSO5_FW_RODATA_LEN +
7159 TG3_TSO5_FW_DATA_LEN +
7160 TG3_TSO5_FW_SBSS_LEN +
7161 TG3_TSO5_FW_BSS_LEN);
7162 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7163 tw32(BUFMGR_MB_POOL_ADDR,
7164 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7165 tw32(BUFMGR_MB_POOL_SIZE,
7166 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7167 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007168
Michael Chan0f893dc2005-07-25 12:30:38 -07007169 if (tp->dev->mtu <= ETH_DATA_LEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007170 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7171 tp->bufmgr_config.mbuf_read_dma_low_water);
7172 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7173 tp->bufmgr_config.mbuf_mac_rx_low_water);
7174 tw32(BUFMGR_MB_HIGH_WATER,
7175 tp->bufmgr_config.mbuf_high_water);
7176 } else {
7177 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7178 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7179 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7180 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7181 tw32(BUFMGR_MB_HIGH_WATER,
7182 tp->bufmgr_config.mbuf_high_water_jumbo);
7183 }
7184 tw32(BUFMGR_DMA_LOW_WATER,
7185 tp->bufmgr_config.dma_low_water);
7186 tw32(BUFMGR_DMA_HIGH_WATER,
7187 tp->bufmgr_config.dma_high_water);
7188
7189 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7190 for (i = 0; i < 2000; i++) {
7191 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7192 break;
7193 udelay(10);
7194 }
7195 if (i >= 2000) {
7196 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7197 tp->dev->name);
7198 return -ENODEV;
7199 }
7200
7201 /* Setup replenish threshold. */
Michael Chanf92905d2006-06-29 20:14:29 -07007202 val = tp->rx_pending / 8;
7203 if (val == 0)
7204 val = 1;
7205 else if (val > tp->rx_std_max_post)
7206 val = tp->rx_std_max_post;
Michael Chanb5d37722006-09-27 16:06:21 -07007207 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7208 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7209 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7210
7211 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7212 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7213 }
Michael Chanf92905d2006-06-29 20:14:29 -07007214
7215 tw32(RCVBDI_STD_THRESH, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007216
7217 /* Initialize TG3_BDINFO's at:
7218 * RCVDBDI_STD_BD: standard eth size rx ring
7219 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7220 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7221 *
7222 * like so:
7223 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7224 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7225 * ring attribute flags
7226 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7227 *
7228 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7229 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7230 *
7231 * The size of each ring is fixed in the firmware, but the location is
7232 * configurable.
7233 */
7234 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7235 ((u64) tp->rx_std_mapping >> 32));
7236 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7237 ((u64) tp->rx_std_mapping & 0xffffffff));
7238 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7239 NIC_SRAM_RX_BUFFER_DESC);
7240
7241 /* Don't even try to program the JUMBO/MINI buffer descriptor
7242 * configs on 5705.
7243 */
7244 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7245 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7246 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7247 } else {
7248 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7249 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7250
7251 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7252 BDINFO_FLAGS_DISABLED);
7253
7254 /* Setup replenish threshold. */
7255 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7256
Michael Chan0f893dc2005-07-25 12:30:38 -07007257 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007258 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7259 ((u64) tp->rx_jumbo_mapping >> 32));
7260 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7261 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7262 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7263 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7264 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7265 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7266 } else {
7267 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7268 BDINFO_FLAGS_DISABLED);
7269 }
7270
7271 }
7272
7273 /* There is only one send ring on 5705/5750, no need to explicitly
7274 * disable the others.
7275 */
7276 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7277 /* Clear out send RCB ring in SRAM. */
7278 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7279 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7280 BDINFO_FLAGS_DISABLED);
7281 }
7282
7283 tp->tx_prod = 0;
7284 tp->tx_cons = 0;
7285 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7286 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7287
7288 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7289 tp->tx_desc_mapping,
7290 (TG3_TX_RING_SIZE <<
7291 BDINFO_FLAGS_MAXLEN_SHIFT),
7292 NIC_SRAM_TX_BUFFER_DESC);
7293
7294 /* There is only one receive return ring on 5705/5750, no need
7295 * to explicitly disable the others.
7296 */
7297 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7298 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7299 i += TG3_BDINFO_SIZE) {
7300 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7301 BDINFO_FLAGS_DISABLED);
7302 }
7303 }
7304
7305 tp->rx_rcb_ptr = 0;
7306 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7307
7308 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7309 tp->rx_rcb_mapping,
7310 (TG3_RX_RCB_RING_SIZE(tp) <<
7311 BDINFO_FLAGS_MAXLEN_SHIFT),
7312 0);
7313
7314 tp->rx_std_ptr = tp->rx_pending;
7315 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7316 tp->rx_std_ptr);
7317
Michael Chan0f893dc2005-07-25 12:30:38 -07007318 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
Linus Torvalds1da177e2005-04-16 15:20:36 -07007319 tp->rx_jumbo_pending : 0;
7320 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7321 tp->rx_jumbo_ptr);
7322
7323 /* Initialize MAC address and backoff seed. */
Michael Chan986e0ae2007-05-05 12:10:20 -07007324 __tg3_set_mac_addr(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007325
7326 /* MTU + ethernet header + FCS + optional VLAN tag */
7327 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7328
7329 /* The slot time is changed by tg3_setup_phy if we
7330 * run at gigabit with half duplex.
7331 */
7332 tw32(MAC_TX_LENGTHS,
7333 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7334 (6 << TX_LENGTHS_IPG_SHIFT) |
7335 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7336
7337 /* Receive rules. */
7338 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7339 tw32(RCVLPC_CONFIG, 0x0181);
7340
7341 /* Calculate RDMAC_MODE setting early, we need it to determine
7342 * the RCVLPC_STATE_ENABLE mask.
7343 */
7344 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7345 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7346 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7347 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7348 RDMAC_MODE_LNGREAD_ENAB);
Michael Chan85e94ce2005-04-21 17:05:28 -07007349
Matt Carlson57e69832008-05-25 23:48:31 -07007350 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7351 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsond30cdd22007-10-07 23:28:35 -07007352 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7353 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7354 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7355
Michael Chan85e94ce2005-04-21 17:05:28 -07007356 /* If statement applies to 5705 and 5750 PCI devices only */
7357 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7358 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7359 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007360 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
Matt Carlsonc13e3712007-05-05 11:50:04 -07007361 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007362 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7363 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7364 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7365 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7366 }
7367 }
7368
Michael Chan85e94ce2005-04-21 17:05:28 -07007369 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7370 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7371
Linus Torvalds1da177e2005-04-16 15:20:36 -07007372 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7373 rdmac_mode |= (1 << 27);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007374
7375 /* Receive/send statistics. */
Michael Chan16613942006-06-29 20:15:13 -07007376 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7377 val = tr32(RCVLPC_STATS_ENABLE);
7378 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7379 tw32(RCVLPC_STATS_ENABLE, val);
7380 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7381 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007382 val = tr32(RCVLPC_STATS_ENABLE);
7383 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7384 tw32(RCVLPC_STATS_ENABLE, val);
7385 } else {
7386 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7387 }
7388 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7389 tw32(SNDDATAI_STATSENAB, 0xffffff);
7390 tw32(SNDDATAI_STATSCTRL,
7391 (SNDDATAI_SCTRL_ENABLE |
7392 SNDDATAI_SCTRL_FASTUPD));
7393
7394 /* Setup host coalescing engine. */
7395 tw32(HOSTCC_MODE, 0);
7396 for (i = 0; i < 2000; i++) {
7397 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7398 break;
7399 udelay(10);
7400 }
7401
Michael Chand244c892005-07-05 14:42:33 -07007402 __tg3_set_coalesce(tp, &tp->coal);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007403
7404 /* set status block DMA address */
7405 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7406 ((u64) tp->status_mapping >> 32));
7407 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7408 ((u64) tp->status_mapping & 0xffffffff));
7409
7410 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7411 /* Status/statistics block address. See tg3_timer,
7412 * the tg3_periodic_fetch_stats call there, and
7413 * tg3_get_stats to see how this works for 5705/5750 chips.
7414 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007415 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7416 ((u64) tp->stats_mapping >> 32));
7417 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7418 ((u64) tp->stats_mapping & 0xffffffff));
7419 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7420 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7421 }
7422
7423 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7424
7425 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7426 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7427 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7428 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7429
7430 /* Clear statistics/status block in chip, and status block in ram. */
7431 for (i = NIC_SRAM_STATS_BLK;
7432 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7433 i += sizeof(u32)) {
7434 tg3_write_mem(tp, i, 0);
7435 udelay(40);
7436 }
7437 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7438
Michael Chanc94e3942005-09-27 12:12:42 -07007439 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7440 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7441 /* reset to prevent losing 1st rx packet intermittently */
7442 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7443 udelay(10);
7444 }
7445
Matt Carlson3bda1252008-08-15 14:08:22 -07007446 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7447 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7448 else
7449 tp->mac_mode = 0;
7450 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
Linus Torvalds1da177e2005-04-16 15:20:36 -07007451 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07007452 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7453 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7454 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7455 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007456 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7457 udelay(40);
7458
Michael Chan314fba32005-04-21 17:07:04 -07007459 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
Michael Chan9d26e212006-12-07 00:21:14 -08007460 * If TG3_FLG2_IS_NIC is zero, we should read the
Michael Chan314fba32005-04-21 17:07:04 -07007461 * register to preserve the GPIO settings for LOMs. The GPIOs,
7462 * whether used as inputs or outputs, are set by boot code after
7463 * reset.
7464 */
Michael Chan9d26e212006-12-07 00:21:14 -08007465 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
Michael Chan314fba32005-04-21 17:07:04 -07007466 u32 gpio_mask;
7467
Michael Chan9d26e212006-12-07 00:21:14 -08007468 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7469 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7470 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chan3e7d83b2005-04-21 17:10:36 -07007471
7472 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7473 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7474 GRC_LCLCTRL_GPIO_OUTPUT3;
7475
Michael Chanaf36e6b2006-03-23 01:28:06 -08007476 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7477 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7478
Gary Zambranoaaf84462007-05-05 11:51:45 -07007479 tp->grc_local_ctrl &= ~gpio_mask;
Michael Chan314fba32005-04-21 17:07:04 -07007480 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7481
7482 /* GPIO1 must be driven high for eeprom write protect */
Michael Chan9d26e212006-12-07 00:21:14 -08007483 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7484 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7485 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan314fba32005-04-21 17:07:04 -07007486 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007487 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7488 udelay(100);
7489
Michael Chan09ee9292005-08-09 20:17:00 -07007490 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
David S. Millerfac9b832005-05-18 22:46:34 -07007491 tp->last_tag = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007492
7493 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7494 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7495 udelay(40);
7496 }
7497
7498 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7499 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7500 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7501 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7502 WDMAC_MODE_LNGREAD_ENAB);
7503
Michael Chan85e94ce2005-04-21 17:05:28 -07007504 /* If statement applies to 5705 and 5750 PCI devices only */
7505 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7506 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7507 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007508 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7509 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7510 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7511 /* nothing */
7512 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7513 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7514 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7515 val |= WDMAC_MODE_RX_ACCEL;
7516 }
7517 }
7518
Michael Chand9ab5ad2006-03-20 22:27:35 -08007519 /* Enable host coalescing bug fix */
Michael Chanaf36e6b2006-03-23 01:28:06 -08007520 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07007521 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07007522 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
Matt Carlson57e69832008-05-25 23:48:31 -07007523 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7524 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
Matt Carlsonf51f3562008-05-25 23:45:08 -07007525 val |= WDMAC_MODE_STATUS_TAG_FIX;
Michael Chand9ab5ad2006-03-20 22:27:35 -08007526
Linus Torvalds1da177e2005-04-16 15:20:36 -07007527 tw32_f(WDMAC_MODE, val);
7528 udelay(40);
7529
Matt Carlson9974a352007-10-07 23:27:28 -07007530 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7531 u16 pcix_cmd;
7532
7533 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7534 &pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007535 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
Matt Carlson9974a352007-10-07 23:27:28 -07007536 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7537 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007538 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
Matt Carlson9974a352007-10-07 23:27:28 -07007539 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7540 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007541 }
Matt Carlson9974a352007-10-07 23:27:28 -07007542 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7543 pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007544 }
7545
7546 tw32_f(RDMAC_MODE, rdmac_mode);
7547 udelay(40);
7548
7549 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7550 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7551 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
Matt Carlson9936bcf2007-10-10 18:03:07 -07007552
7553 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7554 tw32(SNDDATAC_MODE,
7555 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7556 else
7557 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7558
Linus Torvalds1da177e2005-04-16 15:20:36 -07007559 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7560 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7561 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7562 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007563 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7564 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007565 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7566 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7567
7568 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7569 err = tg3_load_5701_a0_firmware_fix(tp);
7570 if (err)
7571 return err;
7572 }
7573
Linus Torvalds1da177e2005-04-16 15:20:36 -07007574 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7575 err = tg3_load_tso_firmware(tp);
7576 if (err)
7577 return err;
7578 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007579
7580 tp->tx_mode = TX_MODE_ENABLE;
7581 tw32_f(MAC_TX_MODE, tp->tx_mode);
7582 udelay(100);
7583
7584 tp->rx_mode = RX_MODE_ENABLE;
Matt Carlson9936bcf2007-10-10 18:03:07 -07007585 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlson57e69832008-05-25 23:48:31 -07007586 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7587 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7588 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chanaf36e6b2006-03-23 01:28:06 -08007589 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7590
Linus Torvalds1da177e2005-04-16 15:20:36 -07007591 tw32_f(MAC_RX_MODE, tp->rx_mode);
7592 udelay(10);
7593
Linus Torvalds1da177e2005-04-16 15:20:36 -07007594 tw32(MAC_LED_CTRL, tp->led_ctrl);
7595
7596 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
Michael Chanc94e3942005-09-27 12:12:42 -07007597 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007598 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7599 udelay(10);
7600 }
7601 tw32_f(MAC_RX_MODE, tp->rx_mode);
7602 udelay(10);
7603
7604 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7605 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7606 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7607 /* Set drive transmission level to 1.2V */
7608 /* only if the signal pre-emphasis bit is not set */
7609 val = tr32(MAC_SERDES_CFG);
7610 val &= 0xfffff000;
7611 val |= 0x880;
7612 tw32(MAC_SERDES_CFG, val);
7613 }
7614 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7615 tw32(MAC_SERDES_CFG, 0x616000);
7616 }
7617
7618 /* Prevent chip from dropping frames when flow control
7619 * is enabled.
7620 */
7621 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7622
7623 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7624 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7625 /* Use hardware link auto-negotiation */
7626 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7627 }
7628
Michael Chand4d2c552006-03-20 17:47:20 -08007629 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7630 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7631 u32 tmp;
7632
7633 tmp = tr32(SERDES_RX_CTRL);
7634 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7635 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7636 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7637 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7638 }
7639
Matt Carlsondd477002008-05-25 23:45:58 -07007640 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7641 if (tp->link_config.phy_is_low_power) {
7642 tp->link_config.phy_is_low_power = 0;
7643 tp->link_config.speed = tp->link_config.orig_speed;
7644 tp->link_config.duplex = tp->link_config.orig_duplex;
7645 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7646 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007647
Matt Carlsondd477002008-05-25 23:45:58 -07007648 err = tg3_setup_phy(tp, 0);
7649 if (err)
7650 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007651
Matt Carlsondd477002008-05-25 23:45:58 -07007652 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7653 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7654 u32 tmp;
7655
7656 /* Clear CRC stats. */
7657 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7658 tg3_writephy(tp, MII_TG3_TEST1,
7659 tmp | MII_TG3_TEST1_CRC_EN);
7660 tg3_readphy(tp, 0x14, &tmp);
7661 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007662 }
7663 }
7664
7665 __tg3_set_rx_mode(tp->dev);
7666
7667 /* Initialize receive rules. */
7668 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7669 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7670 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7671 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7672
Michael Chan4cf78e42005-07-25 12:29:19 -07007673 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
Michael Chana4e2b342005-10-26 15:46:52 -07007674 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007675 limit = 8;
7676 else
7677 limit = 16;
7678 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7679 limit -= 4;
7680 switch (limit) {
7681 case 16:
7682 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7683 case 15:
7684 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7685 case 14:
7686 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7687 case 13:
7688 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7689 case 12:
7690 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7691 case 11:
7692 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7693 case 10:
7694 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7695 case 9:
7696 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7697 case 8:
7698 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7699 case 7:
7700 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7701 case 6:
7702 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7703 case 5:
7704 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7705 case 4:
7706 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7707 case 3:
7708 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7709 case 2:
7710 case 1:
7711
7712 default:
7713 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07007714 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007715
Matt Carlson9ce768e2007-10-11 19:49:11 -07007716 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7717 /* Write our heartbeat update interval to APE. */
7718 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7719 APE_HOST_HEARTBEAT_INT_DISABLE);
Matt Carlson0d3031d2007-10-10 18:02:43 -07007720
Linus Torvalds1da177e2005-04-16 15:20:36 -07007721 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7722
Linus Torvalds1da177e2005-04-16 15:20:36 -07007723 return 0;
7724}
7725
7726/* Called at device open time to get the chip ready for
7727 * packet processing. Invoked with tp->lock held.
7728 */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007729static int tg3_init_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007730{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007731 tg3_switch_clocks(tp);
7732
7733 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7734
Matt Carlson2f751b62008-08-04 23:17:34 -07007735 return tg3_reset_hw(tp, reset_phy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007736}
7737
7738#define TG3_STAT_ADD32(PSTAT, REG) \
7739do { u32 __val = tr32(REG); \
7740 (PSTAT)->low += __val; \
7741 if ((PSTAT)->low < __val) \
7742 (PSTAT)->high += 1; \
7743} while (0)
7744
7745static void tg3_periodic_fetch_stats(struct tg3 *tp)
7746{
7747 struct tg3_hw_stats *sp = tp->hw_stats;
7748
7749 if (!netif_carrier_ok(tp->dev))
7750 return;
7751
7752 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7753 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7754 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7755 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7756 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7757 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7758 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7759 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7760 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7761 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7762 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7763 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7764 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7765
7766 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7767 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7768 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7769 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7770 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7771 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7772 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7773 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7774 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7775 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7776 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7777 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7778 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7779 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
Michael Chan463d3052006-05-22 16:36:27 -07007780
7781 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7782 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7783 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007784}
7785
7786static void tg3_timer(unsigned long __opaque)
7787{
7788 struct tg3 *tp = (struct tg3 *) __opaque;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007789
Michael Chanf475f162006-03-27 23:20:14 -08007790 if (tp->irq_sync)
7791 goto restart_timer;
7792
David S. Millerf47c11e2005-06-24 20:18:35 -07007793 spin_lock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007794
David S. Millerfac9b832005-05-18 22:46:34 -07007795 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7796 /* All of this garbage is because when using non-tagged
7797 * IRQ status the mailbox/status_block protocol the chip
7798 * uses with the cpu is race prone.
7799 */
7800 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7801 tw32(GRC_LOCAL_CTRL,
7802 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7803 } else {
7804 tw32(HOSTCC_MODE, tp->coalesce_mode |
7805 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7806 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007807
David S. Millerfac9b832005-05-18 22:46:34 -07007808 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7809 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
David S. Millerf47c11e2005-06-24 20:18:35 -07007810 spin_unlock(&tp->lock);
David S. Millerfac9b832005-05-18 22:46:34 -07007811 schedule_work(&tp->reset_task);
7812 return;
7813 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007814 }
7815
Linus Torvalds1da177e2005-04-16 15:20:36 -07007816 /* This part only runs once per second. */
7817 if (!--tp->timer_counter) {
David S. Millerfac9b832005-05-18 22:46:34 -07007818 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7819 tg3_periodic_fetch_stats(tp);
7820
Linus Torvalds1da177e2005-04-16 15:20:36 -07007821 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7822 u32 mac_stat;
7823 int phy_event;
7824
7825 mac_stat = tr32(MAC_STATUS);
7826
7827 phy_event = 0;
7828 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7829 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7830 phy_event = 1;
7831 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7832 phy_event = 1;
7833
7834 if (phy_event)
7835 tg3_setup_phy(tp, 0);
7836 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7837 u32 mac_stat = tr32(MAC_STATUS);
7838 int need_setup = 0;
7839
7840 if (netif_carrier_ok(tp->dev) &&
7841 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7842 need_setup = 1;
7843 }
7844 if (! netif_carrier_ok(tp->dev) &&
7845 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7846 MAC_STATUS_SIGNAL_DET))) {
7847 need_setup = 1;
7848 }
7849 if (need_setup) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07007850 if (!tp->serdes_counter) {
7851 tw32_f(MAC_MODE,
7852 (tp->mac_mode &
7853 ~MAC_MODE_PORT_MODE_MASK));
7854 udelay(40);
7855 tw32_f(MAC_MODE, tp->mac_mode);
7856 udelay(40);
7857 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007858 tg3_setup_phy(tp, 0);
7859 }
Michael Chan747e8f82005-07-25 12:33:22 -07007860 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7861 tg3_serdes_parallel_detect(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007862
7863 tp->timer_counter = tp->timer_multiplier;
7864 }
7865
Michael Chan130b8e42006-09-27 16:00:40 -07007866 /* Heartbeat is only sent once every 2 seconds.
7867 *
7868 * The heartbeat is to tell the ASF firmware that the host
7869 * driver is still alive. In the event that the OS crashes,
7870 * ASF needs to reset the hardware to free up the FIFO space
7871 * that may be filled with rx packets destined for the host.
7872 * If the FIFO is full, ASF will no longer function properly.
7873 *
7874 * Unintended resets have been reported on real time kernels
7875 * where the timer doesn't run on time. Netpoll will also have
7876 * same problem.
7877 *
7878 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7879 * to check the ring condition when the heartbeat is expiring
7880 * before doing the reset. This will prevent most unintended
7881 * resets.
7882 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007883 if (!--tp->asf_counter) {
Matt Carlsonbc7959b2008-08-15 14:08:55 -07007884 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7885 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Matt Carlson7c5026a2008-05-02 16:49:29 -07007886 tg3_wait_for_event_ack(tp);
7887
Michael Chanbbadf502006-04-06 21:46:34 -07007888 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
Michael Chan130b8e42006-09-27 16:00:40 -07007889 FWCMD_NICDRV_ALIVE3);
Michael Chanbbadf502006-04-06 21:46:34 -07007890 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
Michael Chan28fbef72005-10-26 15:48:35 -07007891 /* 5 seconds timeout */
Michael Chanbbadf502006-04-06 21:46:34 -07007892 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
Matt Carlson4ba526c2008-08-15 14:10:04 -07007893
7894 tg3_generate_fw_event(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007895 }
7896 tp->asf_counter = tp->asf_multiplier;
7897 }
7898
David S. Millerf47c11e2005-06-24 20:18:35 -07007899 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007900
Michael Chanf475f162006-03-27 23:20:14 -08007901restart_timer:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007902 tp->timer.expires = jiffies + tp->timer_offset;
7903 add_timer(&tp->timer);
7904}
7905
Adrian Bunk81789ef2006-03-20 23:00:14 -08007906static int tg3_request_irq(struct tg3 *tp)
Michael Chanfcfa0a32006-03-20 22:28:41 -08007907{
David Howells7d12e782006-10-05 14:55:46 +01007908 irq_handler_t fn;
Michael Chanfcfa0a32006-03-20 22:28:41 -08007909 unsigned long flags;
7910 struct net_device *dev = tp->dev;
7911
7912 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7913 fn = tg3_msi;
7914 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7915 fn = tg3_msi_1shot;
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07007916 flags = IRQF_SAMPLE_RANDOM;
Michael Chanfcfa0a32006-03-20 22:28:41 -08007917 } else {
7918 fn = tg3_interrupt;
7919 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7920 fn = tg3_interrupt_tagged;
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07007921 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
Michael Chanfcfa0a32006-03-20 22:28:41 -08007922 }
7923 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7924}
7925
Michael Chan79381092005-04-21 17:13:59 -07007926static int tg3_test_interrupt(struct tg3 *tp)
7927{
7928 struct net_device *dev = tp->dev;
Michael Chanb16250e2006-09-27 16:10:14 -07007929 int err, i, intr_ok = 0;
Michael Chan79381092005-04-21 17:13:59 -07007930
Michael Chand4bc3922005-05-29 14:59:20 -07007931 if (!netif_running(dev))
7932 return -ENODEV;
7933
Michael Chan79381092005-04-21 17:13:59 -07007934 tg3_disable_ints(tp);
7935
7936 free_irq(tp->pdev->irq, dev);
7937
7938 err = request_irq(tp->pdev->irq, tg3_test_isr,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07007939 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
Michael Chan79381092005-04-21 17:13:59 -07007940 if (err)
7941 return err;
7942
Michael Chan38f38432005-09-05 17:53:32 -07007943 tp->hw_status->status &= ~SD_STATUS_UPDATED;
Michael Chan79381092005-04-21 17:13:59 -07007944 tg3_enable_ints(tp);
7945
7946 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7947 HOSTCC_MODE_NOW);
7948
7949 for (i = 0; i < 5; i++) {
Michael Chanb16250e2006-09-27 16:10:14 -07007950 u32 int_mbox, misc_host_ctrl;
7951
Michael Chan09ee9292005-08-09 20:17:00 -07007952 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7953 TG3_64BIT_REG_LOW);
Michael Chanb16250e2006-09-27 16:10:14 -07007954 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7955
7956 if ((int_mbox != 0) ||
7957 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7958 intr_ok = 1;
Michael Chan79381092005-04-21 17:13:59 -07007959 break;
Michael Chanb16250e2006-09-27 16:10:14 -07007960 }
7961
Michael Chan79381092005-04-21 17:13:59 -07007962 msleep(10);
7963 }
7964
7965 tg3_disable_ints(tp);
7966
7967 free_irq(tp->pdev->irq, dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04007968
Michael Chanfcfa0a32006-03-20 22:28:41 -08007969 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07007970
7971 if (err)
7972 return err;
7973
Michael Chanb16250e2006-09-27 16:10:14 -07007974 if (intr_ok)
Michael Chan79381092005-04-21 17:13:59 -07007975 return 0;
7976
7977 return -EIO;
7978}
7979
7980/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7981 * successfully restored
7982 */
7983static int tg3_test_msi(struct tg3 *tp)
7984{
7985 struct net_device *dev = tp->dev;
7986 int err;
7987 u16 pci_cmd;
7988
7989 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7990 return 0;
7991
7992 /* Turn off SERR reporting in case MSI terminates with Master
7993 * Abort.
7994 */
7995 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7996 pci_write_config_word(tp->pdev, PCI_COMMAND,
7997 pci_cmd & ~PCI_COMMAND_SERR);
7998
7999 err = tg3_test_interrupt(tp);
8000
8001 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8002
8003 if (!err)
8004 return 0;
8005
8006 /* other failures */
8007 if (err != -EIO)
8008 return err;
8009
8010 /* MSI test failed, go back to INTx mode */
8011 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
8012 "switching to INTx mode. Please report this failure to "
8013 "the PCI maintainer and include system chipset information.\n",
8014 tp->dev->name);
8015
8016 free_irq(tp->pdev->irq, dev);
8017 pci_disable_msi(tp->pdev);
8018
8019 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8020
Michael Chanfcfa0a32006-03-20 22:28:41 -08008021 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07008022 if (err)
8023 return err;
8024
8025 /* Need to reset the chip because the MSI cycle may have terminated
8026 * with Master Abort.
8027 */
David S. Millerf47c11e2005-06-24 20:18:35 -07008028 tg3_full_lock(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07008029
Michael Chan944d9802005-05-29 14:57:48 -07008030 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07008031 err = tg3_init_hw(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07008032
David S. Millerf47c11e2005-06-24 20:18:35 -07008033 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07008034
8035 if (err)
8036 free_irq(tp->pdev->irq, dev);
8037
8038 return err;
8039}
8040
Linus Torvalds1da177e2005-04-16 15:20:36 -07008041static int tg3_open(struct net_device *dev)
8042{
8043 struct tg3 *tp = netdev_priv(dev);
8044 int err;
8045
Michael Chanc49a1562006-12-17 17:07:29 -08008046 netif_carrier_off(tp->dev);
8047
Michael Chanbc1c7562006-03-20 17:48:03 -08008048 err = tg3_set_power_state(tp, PCI_D0);
Matt Carlson2f751b62008-08-04 23:17:34 -07008049 if (err)
Michael Chanbc1c7562006-03-20 17:48:03 -08008050 return err;
Matt Carlson2f751b62008-08-04 23:17:34 -07008051
8052 tg3_full_lock(tp, 0);
Michael Chanbc1c7562006-03-20 17:48:03 -08008053
Linus Torvalds1da177e2005-04-16 15:20:36 -07008054 tg3_disable_ints(tp);
8055 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8056
David S. Millerf47c11e2005-06-24 20:18:35 -07008057 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008058
8059 /* The placement of this call is tied
8060 * to the setup and use of Host TX descriptors.
8061 */
8062 err = tg3_alloc_consistent(tp);
8063 if (err)
8064 return err;
8065
Michael Chan7544b092007-05-05 13:08:32 -07008066 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
David S. Millerfac9b832005-05-18 22:46:34 -07008067 /* All MSI supporting chips should support tagged
8068 * status. Assert that this is the case.
8069 */
8070 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8071 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8072 "Not using MSI.\n", tp->dev->name);
8073 } else if (pci_enable_msi(tp->pdev) == 0) {
Michael Chan88b06bc2005-04-21 17:13:25 -07008074 u32 msi_mode;
8075
8076 msi_mode = tr32(MSGINT_MODE);
8077 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8078 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8079 }
8080 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08008081 err = tg3_request_irq(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008082
8083 if (err) {
Michael Chan88b06bc2005-04-21 17:13:25 -07008084 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8085 pci_disable_msi(tp->pdev);
8086 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8087 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008088 tg3_free_consistent(tp);
8089 return err;
8090 }
8091
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008092 napi_enable(&tp->napi);
8093
David S. Millerf47c11e2005-06-24 20:18:35 -07008094 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008095
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07008096 err = tg3_init_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008097 if (err) {
Michael Chan944d9802005-05-29 14:57:48 -07008098 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008099 tg3_free_rings(tp);
8100 } else {
David S. Millerfac9b832005-05-18 22:46:34 -07008101 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8102 tp->timer_offset = HZ;
8103 else
8104 tp->timer_offset = HZ / 10;
8105
8106 BUG_ON(tp->timer_offset > HZ);
8107 tp->timer_counter = tp->timer_multiplier =
8108 (HZ / tp->timer_offset);
8109 tp->asf_counter = tp->asf_multiplier =
Michael Chan28fbef72005-10-26 15:48:35 -07008110 ((HZ / tp->timer_offset) * 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008111
8112 init_timer(&tp->timer);
8113 tp->timer.expires = jiffies + tp->timer_offset;
8114 tp->timer.data = (unsigned long) tp;
8115 tp->timer.function = tg3_timer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008116 }
8117
David S. Millerf47c11e2005-06-24 20:18:35 -07008118 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008119
8120 if (err) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008121 napi_disable(&tp->napi);
Michael Chan88b06bc2005-04-21 17:13:25 -07008122 free_irq(tp->pdev->irq, dev);
8123 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8124 pci_disable_msi(tp->pdev);
8125 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8126 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008127 tg3_free_consistent(tp);
8128 return err;
8129 }
8130
Michael Chan79381092005-04-21 17:13:59 -07008131 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8132 err = tg3_test_msi(tp);
David S. Millerfac9b832005-05-18 22:46:34 -07008133
Michael Chan79381092005-04-21 17:13:59 -07008134 if (err) {
David S. Millerf47c11e2005-06-24 20:18:35 -07008135 tg3_full_lock(tp, 0);
Michael Chan79381092005-04-21 17:13:59 -07008136
8137 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8138 pci_disable_msi(tp->pdev);
8139 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8140 }
Michael Chan944d9802005-05-29 14:57:48 -07008141 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan79381092005-04-21 17:13:59 -07008142 tg3_free_rings(tp);
8143 tg3_free_consistent(tp);
8144
David S. Millerf47c11e2005-06-24 20:18:35 -07008145 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07008146
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008147 napi_disable(&tp->napi);
8148
Michael Chan79381092005-04-21 17:13:59 -07008149 return err;
8150 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08008151
8152 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8153 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
Michael Chanb5d37722006-09-27 16:06:21 -07008154 u32 val = tr32(PCIE_TRANSACTION_CFG);
Michael Chanfcfa0a32006-03-20 22:28:41 -08008155
Michael Chanb5d37722006-09-27 16:06:21 -07008156 tw32(PCIE_TRANSACTION_CFG,
8157 val | PCIE_TRANS_CFG_1SHOT_MSI);
Michael Chanfcfa0a32006-03-20 22:28:41 -08008158 }
8159 }
Michael Chan79381092005-04-21 17:13:59 -07008160 }
8161
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07008162 tg3_phy_start(tp);
8163
David S. Millerf47c11e2005-06-24 20:18:35 -07008164 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008165
Michael Chan79381092005-04-21 17:13:59 -07008166 add_timer(&tp->timer);
8167 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008168 tg3_enable_ints(tp);
8169
David S. Millerf47c11e2005-06-24 20:18:35 -07008170 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008171
8172 netif_start_queue(dev);
8173
8174 return 0;
8175}
8176
8177#if 0
8178/*static*/ void tg3_dump_state(struct tg3 *tp)
8179{
8180 u32 val32, val32_2, val32_3, val32_4, val32_5;
8181 u16 val16;
8182 int i;
8183
8184 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8185 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8186 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8187 val16, val32);
8188
8189 /* MAC block */
8190 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8191 tr32(MAC_MODE), tr32(MAC_STATUS));
8192 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8193 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8194 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8195 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8196 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8197 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8198
8199 /* Send data initiator control block */
8200 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8201 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8202 printk(" SNDDATAI_STATSCTRL[%08x]\n",
8203 tr32(SNDDATAI_STATSCTRL));
8204
8205 /* Send data completion control block */
8206 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8207
8208 /* Send BD ring selector block */
8209 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8210 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8211
8212 /* Send BD initiator control block */
8213 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8214 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8215
8216 /* Send BD completion control block */
8217 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8218
8219 /* Receive list placement control block */
8220 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8221 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8222 printk(" RCVLPC_STATSCTRL[%08x]\n",
8223 tr32(RCVLPC_STATSCTRL));
8224
8225 /* Receive data and receive BD initiator control block */
8226 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8227 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8228
8229 /* Receive data completion control block */
8230 printk("DEBUG: RCVDCC_MODE[%08x]\n",
8231 tr32(RCVDCC_MODE));
8232
8233 /* Receive BD initiator control block */
8234 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8235 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8236
8237 /* Receive BD completion control block */
8238 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8239 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8240
8241 /* Receive list selector control block */
8242 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8243 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8244
8245 /* Mbuf cluster free block */
8246 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8247 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8248
8249 /* Host coalescing control block */
8250 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8251 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8252 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8253 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8254 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8255 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8256 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8257 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8258 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8259 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8260 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8261 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8262
8263 /* Memory arbiter control block */
8264 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8265 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8266
8267 /* Buffer manager control block */
8268 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8269 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8270 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8271 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8272 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8273 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8274 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8275 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8276
8277 /* Read DMA control block */
8278 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8279 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8280
8281 /* Write DMA control block */
8282 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8283 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8284
8285 /* DMA completion block */
8286 printk("DEBUG: DMAC_MODE[%08x]\n",
8287 tr32(DMAC_MODE));
8288
8289 /* GRC block */
8290 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8291 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8292 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8293 tr32(GRC_LOCAL_CTRL));
8294
8295 /* TG3_BDINFOs */
8296 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8297 tr32(RCVDBDI_JUMBO_BD + 0x0),
8298 tr32(RCVDBDI_JUMBO_BD + 0x4),
8299 tr32(RCVDBDI_JUMBO_BD + 0x8),
8300 tr32(RCVDBDI_JUMBO_BD + 0xc));
8301 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8302 tr32(RCVDBDI_STD_BD + 0x0),
8303 tr32(RCVDBDI_STD_BD + 0x4),
8304 tr32(RCVDBDI_STD_BD + 0x8),
8305 tr32(RCVDBDI_STD_BD + 0xc));
8306 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8307 tr32(RCVDBDI_MINI_BD + 0x0),
8308 tr32(RCVDBDI_MINI_BD + 0x4),
8309 tr32(RCVDBDI_MINI_BD + 0x8),
8310 tr32(RCVDBDI_MINI_BD + 0xc));
8311
8312 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8313 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8314 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8315 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8316 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8317 val32, val32_2, val32_3, val32_4);
8318
8319 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8320 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8321 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8322 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8323 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8324 val32, val32_2, val32_3, val32_4);
8325
8326 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8327 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8328 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8329 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8330 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8331 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8332 val32, val32_2, val32_3, val32_4, val32_5);
8333
8334 /* SW status block */
8335 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8336 tp->hw_status->status,
8337 tp->hw_status->status_tag,
8338 tp->hw_status->rx_jumbo_consumer,
8339 tp->hw_status->rx_consumer,
8340 tp->hw_status->rx_mini_consumer,
8341 tp->hw_status->idx[0].rx_producer,
8342 tp->hw_status->idx[0].tx_consumer);
8343
8344 /* SW statistics block */
8345 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8346 ((u32 *)tp->hw_stats)[0],
8347 ((u32 *)tp->hw_stats)[1],
8348 ((u32 *)tp->hw_stats)[2],
8349 ((u32 *)tp->hw_stats)[3]);
8350
8351 /* Mailboxes */
8352 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
Michael Chan09ee9292005-08-09 20:17:00 -07008353 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8354 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8355 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8356 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008357
8358 /* NIC side send descriptors. */
8359 for (i = 0; i < 6; i++) {
8360 unsigned long txd;
8361
8362 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8363 + (i * sizeof(struct tg3_tx_buffer_desc));
8364 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8365 i,
8366 readl(txd + 0x0), readl(txd + 0x4),
8367 readl(txd + 0x8), readl(txd + 0xc));
8368 }
8369
8370 /* NIC side RX descriptors. */
8371 for (i = 0; i < 6; i++) {
8372 unsigned long rxd;
8373
8374 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8375 + (i * sizeof(struct tg3_rx_buffer_desc));
8376 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8377 i,
8378 readl(rxd + 0x0), readl(rxd + 0x4),
8379 readl(rxd + 0x8), readl(rxd + 0xc));
8380 rxd += (4 * sizeof(u32));
8381 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8382 i,
8383 readl(rxd + 0x0), readl(rxd + 0x4),
8384 readl(rxd + 0x8), readl(rxd + 0xc));
8385 }
8386
8387 for (i = 0; i < 6; i++) {
8388 unsigned long rxd;
8389
8390 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8391 + (i * sizeof(struct tg3_rx_buffer_desc));
8392 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8393 i,
8394 readl(rxd + 0x0), readl(rxd + 0x4),
8395 readl(rxd + 0x8), readl(rxd + 0xc));
8396 rxd += (4 * sizeof(u32));
8397 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8398 i,
8399 readl(rxd + 0x0), readl(rxd + 0x4),
8400 readl(rxd + 0x8), readl(rxd + 0xc));
8401 }
8402}
8403#endif
8404
8405static struct net_device_stats *tg3_get_stats(struct net_device *);
8406static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8407
8408static int tg3_close(struct net_device *dev)
8409{
8410 struct tg3 *tp = netdev_priv(dev);
8411
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008412 napi_disable(&tp->napi);
Oleg Nesterov28e53bd2007-05-09 02:34:22 -07008413 cancel_work_sync(&tp->reset_task);
Michael Chan7faa0062006-02-02 17:29:28 -08008414
Linus Torvalds1da177e2005-04-16 15:20:36 -07008415 netif_stop_queue(dev);
8416
8417 del_timer_sync(&tp->timer);
8418
David S. Millerf47c11e2005-06-24 20:18:35 -07008419 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008420#if 0
8421 tg3_dump_state(tp);
8422#endif
8423
8424 tg3_disable_ints(tp);
8425
Michael Chan944d9802005-05-29 14:57:48 -07008426 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008427 tg3_free_rings(tp);
Michael Chan5cf64b82007-05-05 12:11:21 -07008428 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008429
David S. Millerf47c11e2005-06-24 20:18:35 -07008430 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008431
Michael Chan88b06bc2005-04-21 17:13:25 -07008432 free_irq(tp->pdev->irq, dev);
8433 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8434 pci_disable_msi(tp->pdev);
8435 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8436 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008437
8438 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8439 sizeof(tp->net_stats_prev));
8440 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8441 sizeof(tp->estats_prev));
8442
8443 tg3_free_consistent(tp);
8444
Michael Chanbc1c7562006-03-20 17:48:03 -08008445 tg3_set_power_state(tp, PCI_D3hot);
8446
8447 netif_carrier_off(tp->dev);
8448
Linus Torvalds1da177e2005-04-16 15:20:36 -07008449 return 0;
8450}
8451
8452static inline unsigned long get_stat64(tg3_stat64_t *val)
8453{
8454 unsigned long ret;
8455
8456#if (BITS_PER_LONG == 32)
8457 ret = val->low;
8458#else
8459 ret = ((u64)val->high << 32) | ((u64)val->low);
8460#endif
8461 return ret;
8462}
8463
Stefan Buehler816f8b82008-08-15 14:10:54 -07008464static inline u64 get_estat64(tg3_stat64_t *val)
8465{
8466 return ((u64)val->high << 32) | ((u64)val->low);
8467}
8468
Linus Torvalds1da177e2005-04-16 15:20:36 -07008469static unsigned long calc_crc_errors(struct tg3 *tp)
8470{
8471 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8472
8473 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8474 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8475 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008476 u32 val;
8477
David S. Millerf47c11e2005-06-24 20:18:35 -07008478 spin_lock_bh(&tp->lock);
Michael Chan569a5df2007-02-13 12:18:15 -08008479 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8480 tg3_writephy(tp, MII_TG3_TEST1,
8481 val | MII_TG3_TEST1_CRC_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008482 tg3_readphy(tp, 0x14, &val);
8483 } else
8484 val = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07008485 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008486
8487 tp->phy_crc_errors += val;
8488
8489 return tp->phy_crc_errors;
8490 }
8491
8492 return get_stat64(&hw_stats->rx_fcs_errors);
8493}
8494
8495#define ESTAT_ADD(member) \
8496 estats->member = old_estats->member + \
Stefan Buehler816f8b82008-08-15 14:10:54 -07008497 get_estat64(&hw_stats->member)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008498
8499static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8500{
8501 struct tg3_ethtool_stats *estats = &tp->estats;
8502 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8503 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8504
8505 if (!hw_stats)
8506 return old_estats;
8507
8508 ESTAT_ADD(rx_octets);
8509 ESTAT_ADD(rx_fragments);
8510 ESTAT_ADD(rx_ucast_packets);
8511 ESTAT_ADD(rx_mcast_packets);
8512 ESTAT_ADD(rx_bcast_packets);
8513 ESTAT_ADD(rx_fcs_errors);
8514 ESTAT_ADD(rx_align_errors);
8515 ESTAT_ADD(rx_xon_pause_rcvd);
8516 ESTAT_ADD(rx_xoff_pause_rcvd);
8517 ESTAT_ADD(rx_mac_ctrl_rcvd);
8518 ESTAT_ADD(rx_xoff_entered);
8519 ESTAT_ADD(rx_frame_too_long_errors);
8520 ESTAT_ADD(rx_jabbers);
8521 ESTAT_ADD(rx_undersize_packets);
8522 ESTAT_ADD(rx_in_length_errors);
8523 ESTAT_ADD(rx_out_length_errors);
8524 ESTAT_ADD(rx_64_or_less_octet_packets);
8525 ESTAT_ADD(rx_65_to_127_octet_packets);
8526 ESTAT_ADD(rx_128_to_255_octet_packets);
8527 ESTAT_ADD(rx_256_to_511_octet_packets);
8528 ESTAT_ADD(rx_512_to_1023_octet_packets);
8529 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8530 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8531 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8532 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8533 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8534
8535 ESTAT_ADD(tx_octets);
8536 ESTAT_ADD(tx_collisions);
8537 ESTAT_ADD(tx_xon_sent);
8538 ESTAT_ADD(tx_xoff_sent);
8539 ESTAT_ADD(tx_flow_control);
8540 ESTAT_ADD(tx_mac_errors);
8541 ESTAT_ADD(tx_single_collisions);
8542 ESTAT_ADD(tx_mult_collisions);
8543 ESTAT_ADD(tx_deferred);
8544 ESTAT_ADD(tx_excessive_collisions);
8545 ESTAT_ADD(tx_late_collisions);
8546 ESTAT_ADD(tx_collide_2times);
8547 ESTAT_ADD(tx_collide_3times);
8548 ESTAT_ADD(tx_collide_4times);
8549 ESTAT_ADD(tx_collide_5times);
8550 ESTAT_ADD(tx_collide_6times);
8551 ESTAT_ADD(tx_collide_7times);
8552 ESTAT_ADD(tx_collide_8times);
8553 ESTAT_ADD(tx_collide_9times);
8554 ESTAT_ADD(tx_collide_10times);
8555 ESTAT_ADD(tx_collide_11times);
8556 ESTAT_ADD(tx_collide_12times);
8557 ESTAT_ADD(tx_collide_13times);
8558 ESTAT_ADD(tx_collide_14times);
8559 ESTAT_ADD(tx_collide_15times);
8560 ESTAT_ADD(tx_ucast_packets);
8561 ESTAT_ADD(tx_mcast_packets);
8562 ESTAT_ADD(tx_bcast_packets);
8563 ESTAT_ADD(tx_carrier_sense_errors);
8564 ESTAT_ADD(tx_discards);
8565 ESTAT_ADD(tx_errors);
8566
8567 ESTAT_ADD(dma_writeq_full);
8568 ESTAT_ADD(dma_write_prioq_full);
8569 ESTAT_ADD(rxbds_empty);
8570 ESTAT_ADD(rx_discards);
8571 ESTAT_ADD(rx_errors);
8572 ESTAT_ADD(rx_threshold_hit);
8573
8574 ESTAT_ADD(dma_readq_full);
8575 ESTAT_ADD(dma_read_prioq_full);
8576 ESTAT_ADD(tx_comp_queue_full);
8577
8578 ESTAT_ADD(ring_set_send_prod_index);
8579 ESTAT_ADD(ring_status_update);
8580 ESTAT_ADD(nic_irqs);
8581 ESTAT_ADD(nic_avoided_irqs);
8582 ESTAT_ADD(nic_tx_threshold_hit);
8583
8584 return estats;
8585}
8586
8587static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8588{
8589 struct tg3 *tp = netdev_priv(dev);
8590 struct net_device_stats *stats = &tp->net_stats;
8591 struct net_device_stats *old_stats = &tp->net_stats_prev;
8592 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8593
8594 if (!hw_stats)
8595 return old_stats;
8596
8597 stats->rx_packets = old_stats->rx_packets +
8598 get_stat64(&hw_stats->rx_ucast_packets) +
8599 get_stat64(&hw_stats->rx_mcast_packets) +
8600 get_stat64(&hw_stats->rx_bcast_packets);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008601
Linus Torvalds1da177e2005-04-16 15:20:36 -07008602 stats->tx_packets = old_stats->tx_packets +
8603 get_stat64(&hw_stats->tx_ucast_packets) +
8604 get_stat64(&hw_stats->tx_mcast_packets) +
8605 get_stat64(&hw_stats->tx_bcast_packets);
8606
8607 stats->rx_bytes = old_stats->rx_bytes +
8608 get_stat64(&hw_stats->rx_octets);
8609 stats->tx_bytes = old_stats->tx_bytes +
8610 get_stat64(&hw_stats->tx_octets);
8611
8612 stats->rx_errors = old_stats->rx_errors +
John W. Linville4f63b872005-09-12 14:43:18 -07008613 get_stat64(&hw_stats->rx_errors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008614 stats->tx_errors = old_stats->tx_errors +
8615 get_stat64(&hw_stats->tx_errors) +
8616 get_stat64(&hw_stats->tx_mac_errors) +
8617 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8618 get_stat64(&hw_stats->tx_discards);
8619
8620 stats->multicast = old_stats->multicast +
8621 get_stat64(&hw_stats->rx_mcast_packets);
8622 stats->collisions = old_stats->collisions +
8623 get_stat64(&hw_stats->tx_collisions);
8624
8625 stats->rx_length_errors = old_stats->rx_length_errors +
8626 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8627 get_stat64(&hw_stats->rx_undersize_packets);
8628
8629 stats->rx_over_errors = old_stats->rx_over_errors +
8630 get_stat64(&hw_stats->rxbds_empty);
8631 stats->rx_frame_errors = old_stats->rx_frame_errors +
8632 get_stat64(&hw_stats->rx_align_errors);
8633 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8634 get_stat64(&hw_stats->tx_discards);
8635 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8636 get_stat64(&hw_stats->tx_carrier_sense_errors);
8637
8638 stats->rx_crc_errors = old_stats->rx_crc_errors +
8639 calc_crc_errors(tp);
8640
John W. Linville4f63b872005-09-12 14:43:18 -07008641 stats->rx_missed_errors = old_stats->rx_missed_errors +
8642 get_stat64(&hw_stats->rx_discards);
8643
Linus Torvalds1da177e2005-04-16 15:20:36 -07008644 return stats;
8645}
8646
8647static inline u32 calc_crc(unsigned char *buf, int len)
8648{
8649 u32 reg;
8650 u32 tmp;
8651 int j, k;
8652
8653 reg = 0xffffffff;
8654
8655 for (j = 0; j < len; j++) {
8656 reg ^= buf[j];
8657
8658 for (k = 0; k < 8; k++) {
8659 tmp = reg & 0x01;
8660
8661 reg >>= 1;
8662
8663 if (tmp) {
8664 reg ^= 0xedb88320;
8665 }
8666 }
8667 }
8668
8669 return ~reg;
8670}
8671
8672static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8673{
8674 /* accept or reject all multicast frames */
8675 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8676 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8677 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8678 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8679}
8680
8681static void __tg3_set_rx_mode(struct net_device *dev)
8682{
8683 struct tg3 *tp = netdev_priv(dev);
8684 u32 rx_mode;
8685
8686 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8687 RX_MODE_KEEP_VLAN_TAG);
8688
8689 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8690 * flag clear.
8691 */
8692#if TG3_VLAN_TAG_USED
8693 if (!tp->vlgrp &&
8694 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8695 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8696#else
8697 /* By definition, VLAN is disabled always in this
8698 * case.
8699 */
8700 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8701 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8702#endif
8703
8704 if (dev->flags & IFF_PROMISC) {
8705 /* Promiscuous mode. */
8706 rx_mode |= RX_MODE_PROMISC;
8707 } else if (dev->flags & IFF_ALLMULTI) {
8708 /* Accept all multicast. */
8709 tg3_set_multi (tp, 1);
8710 } else if (dev->mc_count < 1) {
8711 /* Reject all multicast. */
8712 tg3_set_multi (tp, 0);
8713 } else {
8714 /* Accept one or more multicast(s). */
8715 struct dev_mc_list *mclist;
8716 unsigned int i;
8717 u32 mc_filter[4] = { 0, };
8718 u32 regidx;
8719 u32 bit;
8720 u32 crc;
8721
8722 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8723 i++, mclist = mclist->next) {
8724
8725 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8726 bit = ~crc & 0x7f;
8727 regidx = (bit & 0x60) >> 5;
8728 bit &= 0x1f;
8729 mc_filter[regidx] |= (1 << bit);
8730 }
8731
8732 tw32(MAC_HASH_REG_0, mc_filter[0]);
8733 tw32(MAC_HASH_REG_1, mc_filter[1]);
8734 tw32(MAC_HASH_REG_2, mc_filter[2]);
8735 tw32(MAC_HASH_REG_3, mc_filter[3]);
8736 }
8737
8738 if (rx_mode != tp->rx_mode) {
8739 tp->rx_mode = rx_mode;
8740 tw32_f(MAC_RX_MODE, rx_mode);
8741 udelay(10);
8742 }
8743}
8744
8745static void tg3_set_rx_mode(struct net_device *dev)
8746{
8747 struct tg3 *tp = netdev_priv(dev);
8748
Michael Chane75f7c92006-03-20 21:33:26 -08008749 if (!netif_running(dev))
8750 return;
8751
David S. Millerf47c11e2005-06-24 20:18:35 -07008752 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008753 __tg3_set_rx_mode(dev);
David S. Millerf47c11e2005-06-24 20:18:35 -07008754 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008755}
8756
8757#define TG3_REGDUMP_LEN (32 * 1024)
8758
8759static int tg3_get_regs_len(struct net_device *dev)
8760{
8761 return TG3_REGDUMP_LEN;
8762}
8763
8764static void tg3_get_regs(struct net_device *dev,
8765 struct ethtool_regs *regs, void *_p)
8766{
8767 u32 *p = _p;
8768 struct tg3 *tp = netdev_priv(dev);
8769 u8 *orig_p = _p;
8770 int i;
8771
8772 regs->version = 0;
8773
8774 memset(p, 0, TG3_REGDUMP_LEN);
8775
Michael Chanbc1c7562006-03-20 17:48:03 -08008776 if (tp->link_config.phy_is_low_power)
8777 return;
8778
David S. Millerf47c11e2005-06-24 20:18:35 -07008779 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008780
8781#define __GET_REG32(reg) (*(p)++ = tr32(reg))
8782#define GET_REG32_LOOP(base,len) \
8783do { p = (u32 *)(orig_p + (base)); \
8784 for (i = 0; i < len; i += 4) \
8785 __GET_REG32((base) + i); \
8786} while (0)
8787#define GET_REG32_1(reg) \
8788do { p = (u32 *)(orig_p + (reg)); \
8789 __GET_REG32((reg)); \
8790} while (0)
8791
8792 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8793 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8794 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8795 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8796 GET_REG32_1(SNDDATAC_MODE);
8797 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8798 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8799 GET_REG32_1(SNDBDC_MODE);
8800 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8801 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8802 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8803 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8804 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8805 GET_REG32_1(RCVDCC_MODE);
8806 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8807 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8808 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8809 GET_REG32_1(MBFREE_MODE);
8810 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8811 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8812 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8813 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8814 GET_REG32_LOOP(WDMAC_MODE, 0x08);
Chris Elmquist091465d2005-12-20 13:25:19 -08008815 GET_REG32_1(RX_CPU_MODE);
8816 GET_REG32_1(RX_CPU_STATE);
8817 GET_REG32_1(RX_CPU_PGMCTR);
8818 GET_REG32_1(RX_CPU_HWBKPT);
8819 GET_REG32_1(TX_CPU_MODE);
8820 GET_REG32_1(TX_CPU_STATE);
8821 GET_REG32_1(TX_CPU_PGMCTR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008822 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8823 GET_REG32_LOOP(FTQ_RESET, 0x120);
8824 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8825 GET_REG32_1(DMAC_MODE);
8826 GET_REG32_LOOP(GRC_MODE, 0x4c);
8827 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8828 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8829
8830#undef __GET_REG32
8831#undef GET_REG32_LOOP
8832#undef GET_REG32_1
8833
David S. Millerf47c11e2005-06-24 20:18:35 -07008834 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008835}
8836
8837static int tg3_get_eeprom_len(struct net_device *dev)
8838{
8839 struct tg3 *tp = netdev_priv(dev);
8840
8841 return tp->nvram_size;
8842}
8843
8844static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
Al Virob9fc7dc2007-12-17 22:59:57 -08008845static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
Michael Chan18201802006-03-20 22:29:15 -08008846static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008847
8848static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8849{
8850 struct tg3 *tp = netdev_priv(dev);
8851 int ret;
8852 u8 *pd;
Al Virob9fc7dc2007-12-17 22:59:57 -08008853 u32 i, offset, len, b_offset, b_count;
8854 __le32 val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008855
Michael Chanbc1c7562006-03-20 17:48:03 -08008856 if (tp->link_config.phy_is_low_power)
8857 return -EAGAIN;
8858
Linus Torvalds1da177e2005-04-16 15:20:36 -07008859 offset = eeprom->offset;
8860 len = eeprom->len;
8861 eeprom->len = 0;
8862
8863 eeprom->magic = TG3_EEPROM_MAGIC;
8864
8865 if (offset & 3) {
8866 /* adjustments to start on required 4 byte boundary */
8867 b_offset = offset & 3;
8868 b_count = 4 - b_offset;
8869 if (b_count > len) {
8870 /* i.e. offset=1 len=2 */
8871 b_count = len;
8872 }
Al Virob9fc7dc2007-12-17 22:59:57 -08008873 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008874 if (ret)
8875 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008876 memcpy(data, ((char*)&val) + b_offset, b_count);
8877 len -= b_count;
8878 offset += b_count;
8879 eeprom->len += b_count;
8880 }
8881
8882 /* read bytes upto the last 4 byte boundary */
8883 pd = &data[eeprom->len];
8884 for (i = 0; i < (len - (len & 3)); i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -08008885 ret = tg3_nvram_read_le(tp, offset + i, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008886 if (ret) {
8887 eeprom->len += i;
8888 return ret;
8889 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008890 memcpy(pd + i, &val, 4);
8891 }
8892 eeprom->len += i;
8893
8894 if (len & 3) {
8895 /* read last bytes not ending on 4 byte boundary */
8896 pd = &data[eeprom->len];
8897 b_count = len & 3;
8898 b_offset = offset + len - b_count;
Al Virob9fc7dc2007-12-17 22:59:57 -08008899 ret = tg3_nvram_read_le(tp, b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008900 if (ret)
8901 return ret;
Al Virob9fc7dc2007-12-17 22:59:57 -08008902 memcpy(pd, &val, b_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008903 eeprom->len += b_count;
8904 }
8905 return 0;
8906}
8907
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008908static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008909
8910static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8911{
8912 struct tg3 *tp = netdev_priv(dev);
8913 int ret;
Al Virob9fc7dc2007-12-17 22:59:57 -08008914 u32 offset, len, b_offset, odd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008915 u8 *buf;
Al Virob9fc7dc2007-12-17 22:59:57 -08008916 __le32 start, end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008917
Michael Chanbc1c7562006-03-20 17:48:03 -08008918 if (tp->link_config.phy_is_low_power)
8919 return -EAGAIN;
8920
Linus Torvalds1da177e2005-04-16 15:20:36 -07008921 if (eeprom->magic != TG3_EEPROM_MAGIC)
8922 return -EINVAL;
8923
8924 offset = eeprom->offset;
8925 len = eeprom->len;
8926
8927 if ((b_offset = (offset & 3))) {
8928 /* adjustments to start on required 4 byte boundary */
Al Virob9fc7dc2007-12-17 22:59:57 -08008929 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008930 if (ret)
8931 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008932 len += b_offset;
8933 offset &= ~3;
Michael Chan1c8594b2005-04-21 17:12:46 -07008934 if (len < 4)
8935 len = 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008936 }
8937
8938 odd_len = 0;
Michael Chan1c8594b2005-04-21 17:12:46 -07008939 if (len & 3) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008940 /* adjustments to end on required 4 byte boundary */
8941 odd_len = 1;
8942 len = (len + 3) & ~3;
Al Virob9fc7dc2007-12-17 22:59:57 -08008943 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008944 if (ret)
8945 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008946 }
8947
8948 buf = data;
8949 if (b_offset || odd_len) {
8950 buf = kmalloc(len, GFP_KERNEL);
Andy Gospodarekab0049b2007-09-06 20:42:14 +01008951 if (!buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008952 return -ENOMEM;
8953 if (b_offset)
8954 memcpy(buf, &start, 4);
8955 if (odd_len)
8956 memcpy(buf+len-4, &end, 4);
8957 memcpy(buf + b_offset, data, eeprom->len);
8958 }
8959
8960 ret = tg3_nvram_write_block(tp, offset, len, buf);
8961
8962 if (buf != data)
8963 kfree(buf);
8964
8965 return ret;
8966}
8967
8968static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8969{
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07008970 struct tg3 *tp = netdev_priv(dev);
8971
8972 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8973 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8974 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07008975 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07008976 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008977
Linus Torvalds1da177e2005-04-16 15:20:36 -07008978 cmd->supported = (SUPPORTED_Autoneg);
8979
8980 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8981 cmd->supported |= (SUPPORTED_1000baseT_Half |
8982 SUPPORTED_1000baseT_Full);
8983
Karsten Keilef348142006-05-12 12:49:08 -07008984 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008985 cmd->supported |= (SUPPORTED_100baseT_Half |
8986 SUPPORTED_100baseT_Full |
8987 SUPPORTED_10baseT_Half |
8988 SUPPORTED_10baseT_Full |
Matt Carlson3bebab52007-11-12 21:22:40 -08008989 SUPPORTED_TP);
Karsten Keilef348142006-05-12 12:49:08 -07008990 cmd->port = PORT_TP;
8991 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008992 cmd->supported |= SUPPORTED_FIBRE;
Karsten Keilef348142006-05-12 12:49:08 -07008993 cmd->port = PORT_FIBRE;
8994 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008995
Linus Torvalds1da177e2005-04-16 15:20:36 -07008996 cmd->advertising = tp->link_config.advertising;
8997 if (netif_running(dev)) {
8998 cmd->speed = tp->link_config.active_speed;
8999 cmd->duplex = tp->link_config.active_duplex;
9000 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009001 cmd->phy_address = PHY_ADDR;
9002 cmd->transceiver = 0;
9003 cmd->autoneg = tp->link_config.autoneg;
9004 cmd->maxtxpkt = 0;
9005 cmd->maxrxpkt = 0;
9006 return 0;
9007}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009008
Linus Torvalds1da177e2005-04-16 15:20:36 -07009009static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9010{
9011 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009012
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009013 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9014 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9015 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009016 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009017 }
9018
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009019 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009020 /* These are the only valid advertisement bits allowed. */
9021 if (cmd->autoneg == AUTONEG_ENABLE &&
9022 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
9023 ADVERTISED_1000baseT_Full |
9024 ADVERTISED_Autoneg |
9025 ADVERTISED_FIBRE)))
9026 return -EINVAL;
Michael Chan37ff2382005-10-26 15:49:51 -07009027 /* Fiber can only do SPEED_1000. */
9028 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9029 (cmd->speed != SPEED_1000))
9030 return -EINVAL;
9031 /* Copper cannot force SPEED_1000. */
9032 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9033 (cmd->speed == SPEED_1000))
9034 return -EINVAL;
9035 else if ((cmd->speed == SPEED_1000) &&
Matt Carlson0ba11fb2008-06-09 15:40:26 -07009036 (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
Michael Chan37ff2382005-10-26 15:49:51 -07009037 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009038
David S. Millerf47c11e2005-06-24 20:18:35 -07009039 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009040
9041 tp->link_config.autoneg = cmd->autoneg;
9042 if (cmd->autoneg == AUTONEG_ENABLE) {
Andy Gospodarek405d8e52007-10-08 01:08:47 -07009043 tp->link_config.advertising = (cmd->advertising |
9044 ADVERTISED_Autoneg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009045 tp->link_config.speed = SPEED_INVALID;
9046 tp->link_config.duplex = DUPLEX_INVALID;
9047 } else {
9048 tp->link_config.advertising = 0;
9049 tp->link_config.speed = cmd->speed;
9050 tp->link_config.duplex = cmd->duplex;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009051 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009052
Michael Chan24fcad62006-12-17 17:06:46 -08009053 tp->link_config.orig_speed = tp->link_config.speed;
9054 tp->link_config.orig_duplex = tp->link_config.duplex;
9055 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9056
Linus Torvalds1da177e2005-04-16 15:20:36 -07009057 if (netif_running(dev))
9058 tg3_setup_phy(tp, 1);
9059
David S. Millerf47c11e2005-06-24 20:18:35 -07009060 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009061
Linus Torvalds1da177e2005-04-16 15:20:36 -07009062 return 0;
9063}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009064
Linus Torvalds1da177e2005-04-16 15:20:36 -07009065static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9066{
9067 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009068
Linus Torvalds1da177e2005-04-16 15:20:36 -07009069 strcpy(info->driver, DRV_MODULE_NAME);
9070 strcpy(info->version, DRV_MODULE_VERSION);
Michael Chanc4e65752006-03-20 22:29:32 -08009071 strcpy(info->fw_version, tp->fw_ver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009072 strcpy(info->bus_info, pci_name(tp->pdev));
9073}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009074
Linus Torvalds1da177e2005-04-16 15:20:36 -07009075static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9076{
9077 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009078
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009079 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9080 device_can_wakeup(&tp->pdev->dev))
Gary Zambranoa85feb82007-05-05 11:52:19 -07009081 wol->supported = WAKE_MAGIC;
9082 else
9083 wol->supported = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009084 wol->wolopts = 0;
9085 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
9086 wol->wolopts = WAKE_MAGIC;
9087 memset(&wol->sopass, 0, sizeof(wol->sopass));
9088}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009089
Linus Torvalds1da177e2005-04-16 15:20:36 -07009090static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9091{
9092 struct tg3 *tp = netdev_priv(dev);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009093 struct device *dp = &tp->pdev->dev;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009094
Linus Torvalds1da177e2005-04-16 15:20:36 -07009095 if (wol->wolopts & ~WAKE_MAGIC)
9096 return -EINVAL;
9097 if ((wol->wolopts & WAKE_MAGIC) &&
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009098 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009099 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009100
David S. Millerf47c11e2005-06-24 20:18:35 -07009101 spin_lock_bh(&tp->lock);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009102 if (wol->wolopts & WAKE_MAGIC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009103 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009104 device_set_wakeup_enable(dp, true);
9105 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009106 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009107 device_set_wakeup_enable(dp, false);
9108 }
David S. Millerf47c11e2005-06-24 20:18:35 -07009109 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009110
Linus Torvalds1da177e2005-04-16 15:20:36 -07009111 return 0;
9112}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009113
Linus Torvalds1da177e2005-04-16 15:20:36 -07009114static u32 tg3_get_msglevel(struct net_device *dev)
9115{
9116 struct tg3 *tp = netdev_priv(dev);
9117 return tp->msg_enable;
9118}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009119
Linus Torvalds1da177e2005-04-16 15:20:36 -07009120static void tg3_set_msglevel(struct net_device *dev, u32 value)
9121{
9122 struct tg3 *tp = netdev_priv(dev);
9123 tp->msg_enable = value;
9124}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009125
Linus Torvalds1da177e2005-04-16 15:20:36 -07009126static int tg3_set_tso(struct net_device *dev, u32 value)
9127{
9128 struct tg3 *tp = netdev_priv(dev);
9129
9130 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9131 if (value)
9132 return -EINVAL;
9133 return 0;
9134 }
Michael Chanb5d37722006-09-27 16:06:21 -07009135 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
9136 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
Matt Carlson9936bcf2007-10-10 18:03:07 -07009137 if (value) {
Michael Chanb0026622006-07-03 19:42:14 -07009138 dev->features |= NETIF_F_TSO6;
Matt Carlson57e69832008-05-25 23:48:31 -07009139 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9140 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9141 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9142 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlson9936bcf2007-10-10 18:03:07 -07009143 dev->features |= NETIF_F_TSO_ECN;
9144 } else
9145 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
Michael Chanb0026622006-07-03 19:42:14 -07009146 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009147 return ethtool_op_set_tso(dev, value);
9148}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009149
Linus Torvalds1da177e2005-04-16 15:20:36 -07009150static int tg3_nway_reset(struct net_device *dev)
9151{
9152 struct tg3 *tp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009153 int r;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009154
Linus Torvalds1da177e2005-04-16 15:20:36 -07009155 if (!netif_running(dev))
9156 return -EAGAIN;
9157
Michael Chanc94e3942005-09-27 12:12:42 -07009158 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9159 return -EINVAL;
9160
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009161 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9162 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9163 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009164 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009165 } else {
9166 u32 bmcr;
9167
9168 spin_lock_bh(&tp->lock);
9169 r = -EINVAL;
9170 tg3_readphy(tp, MII_BMCR, &bmcr);
9171 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9172 ((bmcr & BMCR_ANENABLE) ||
9173 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9174 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9175 BMCR_ANENABLE);
9176 r = 0;
9177 }
9178 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009179 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009180
Linus Torvalds1da177e2005-04-16 15:20:36 -07009181 return r;
9182}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009183
Linus Torvalds1da177e2005-04-16 15:20:36 -07009184static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9185{
9186 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009187
Linus Torvalds1da177e2005-04-16 15:20:36 -07009188 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9189 ering->rx_mini_max_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08009190 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9191 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9192 else
9193 ering->rx_jumbo_max_pending = 0;
9194
9195 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009196
9197 ering->rx_pending = tp->rx_pending;
9198 ering->rx_mini_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08009199 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9200 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9201 else
9202 ering->rx_jumbo_pending = 0;
9203
Linus Torvalds1da177e2005-04-16 15:20:36 -07009204 ering->tx_pending = tp->tx_pending;
9205}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009206
Linus Torvalds1da177e2005-04-16 15:20:36 -07009207static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9208{
9209 struct tg3 *tp = netdev_priv(dev);
Michael Chanb9ec6c12006-07-25 16:37:27 -07009210 int irq_sync = 0, err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009211
Linus Torvalds1da177e2005-04-16 15:20:36 -07009212 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9213 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
Michael Chanbc3a9252006-10-18 20:55:18 -07009214 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9215 (ering->tx_pending <= MAX_SKB_FRAGS) ||
Michael Chan7f62ad52007-02-20 23:25:40 -08009216 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
Michael Chanbc3a9252006-10-18 20:55:18 -07009217 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009218 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009219
Michael Chanbbe832c2005-06-24 20:20:04 -07009220 if (netif_running(dev)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009221 tg3_phy_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009222 tg3_netif_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07009223 irq_sync = 1;
9224 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009225
Michael Chanbbe832c2005-06-24 20:20:04 -07009226 tg3_full_lock(tp, irq_sync);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009227
Linus Torvalds1da177e2005-04-16 15:20:36 -07009228 tp->rx_pending = ering->rx_pending;
9229
9230 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9231 tp->rx_pending > 63)
9232 tp->rx_pending = 63;
9233 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9234 tp->tx_pending = ering->tx_pending;
9235
9236 if (netif_running(dev)) {
Michael Chan944d9802005-05-29 14:57:48 -07009237 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chanb9ec6c12006-07-25 16:37:27 -07009238 err = tg3_restart_hw(tp, 1);
9239 if (!err)
9240 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009241 }
9242
David S. Millerf47c11e2005-06-24 20:18:35 -07009243 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009244
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009245 if (irq_sync && !err)
9246 tg3_phy_start(tp);
9247
Michael Chanb9ec6c12006-07-25 16:37:27 -07009248 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009249}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009250
Linus Torvalds1da177e2005-04-16 15:20:36 -07009251static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9252{
9253 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009254
Linus Torvalds1da177e2005-04-16 15:20:36 -07009255 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
Matt Carlson8d018622007-12-20 20:05:44 -08009256
9257 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
9258 epause->rx_pause = 1;
9259 else
9260 epause->rx_pause = 0;
9261
9262 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
9263 epause->tx_pause = 1;
9264 else
9265 epause->tx_pause = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009266}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009267
Linus Torvalds1da177e2005-04-16 15:20:36 -07009268static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9269{
9270 struct tg3 *tp = netdev_priv(dev);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009271 int err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009272
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009273 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9274 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9275 return -EAGAIN;
9276
9277 if (epause->autoneg) {
9278 u32 newadv;
9279 struct phy_device *phydev;
9280
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009281 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009282
9283 if (epause->rx_pause) {
9284 if (epause->tx_pause)
9285 newadv = ADVERTISED_Pause;
9286 else
9287 newadv = ADVERTISED_Pause |
9288 ADVERTISED_Asym_Pause;
9289 } else if (epause->tx_pause) {
9290 newadv = ADVERTISED_Asym_Pause;
9291 } else
9292 newadv = 0;
9293
9294 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9295 u32 oldadv = phydev->advertising &
9296 (ADVERTISED_Pause |
9297 ADVERTISED_Asym_Pause);
9298 if (oldadv != newadv) {
9299 phydev->advertising &=
9300 ~(ADVERTISED_Pause |
9301 ADVERTISED_Asym_Pause);
9302 phydev->advertising |= newadv;
9303 err = phy_start_aneg(phydev);
9304 }
9305 } else {
9306 tp->link_config.advertising &=
9307 ~(ADVERTISED_Pause |
9308 ADVERTISED_Asym_Pause);
9309 tp->link_config.advertising |= newadv;
9310 }
9311 } else {
9312 if (epause->rx_pause)
9313 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9314 else
9315 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9316
9317 if (epause->tx_pause)
9318 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9319 else
9320 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9321
9322 if (netif_running(dev))
9323 tg3_setup_flow_control(tp, 0, 0);
9324 }
9325 } else {
9326 int irq_sync = 0;
9327
9328 if (netif_running(dev)) {
9329 tg3_netif_stop(tp);
9330 irq_sync = 1;
9331 }
9332
9333 tg3_full_lock(tp, irq_sync);
9334
9335 if (epause->autoneg)
9336 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9337 else
9338 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9339 if (epause->rx_pause)
9340 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9341 else
9342 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9343 if (epause->tx_pause)
9344 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9345 else
9346 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9347
9348 if (netif_running(dev)) {
9349 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9350 err = tg3_restart_hw(tp, 1);
9351 if (!err)
9352 tg3_netif_start(tp);
9353 }
9354
9355 tg3_full_unlock(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07009356 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009357
Michael Chanb9ec6c12006-07-25 16:37:27 -07009358 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009359}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009360
Linus Torvalds1da177e2005-04-16 15:20:36 -07009361static u32 tg3_get_rx_csum(struct net_device *dev)
9362{
9363 struct tg3 *tp = netdev_priv(dev);
9364 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9365}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009366
Linus Torvalds1da177e2005-04-16 15:20:36 -07009367static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9368{
9369 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009370
Linus Torvalds1da177e2005-04-16 15:20:36 -07009371 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9372 if (data != 0)
9373 return -EINVAL;
9374 return 0;
9375 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009376
David S. Millerf47c11e2005-06-24 20:18:35 -07009377 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009378 if (data)
9379 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9380 else
9381 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
David S. Millerf47c11e2005-06-24 20:18:35 -07009382 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009383
Linus Torvalds1da177e2005-04-16 15:20:36 -07009384 return 0;
9385}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009386
Linus Torvalds1da177e2005-04-16 15:20:36 -07009387static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9388{
9389 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009390
Linus Torvalds1da177e2005-04-16 15:20:36 -07009391 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9392 if (data != 0)
9393 return -EINVAL;
9394 return 0;
9395 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009396
Michael Chanaf36e6b2006-03-23 01:28:06 -08009397 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07009398 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07009399 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -07009400 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9401 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan6460d942007-07-14 19:07:52 -07009402 ethtool_op_set_tx_ipv6_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009403 else
Michael Chan9c27dbd2006-03-20 22:28:27 -08009404 ethtool_op_set_tx_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009405
9406 return 0;
9407}
9408
Jeff Garzikb9f2c042007-10-03 18:07:32 -07009409static int tg3_get_sset_count (struct net_device *dev, int sset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009410{
Jeff Garzikb9f2c042007-10-03 18:07:32 -07009411 switch (sset) {
9412 case ETH_SS_TEST:
9413 return TG3_NUM_TEST;
9414 case ETH_SS_STATS:
9415 return TG3_NUM_STATS;
9416 default:
9417 return -EOPNOTSUPP;
9418 }
Michael Chan4cafd3f2005-05-29 14:56:34 -07009419}
9420
Linus Torvalds1da177e2005-04-16 15:20:36 -07009421static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9422{
9423 switch (stringset) {
9424 case ETH_SS_STATS:
9425 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9426 break;
Michael Chan4cafd3f2005-05-29 14:56:34 -07009427 case ETH_SS_TEST:
9428 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9429 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009430 default:
9431 WARN_ON(1); /* we need a WARN() */
9432 break;
9433 }
9434}
9435
Michael Chan4009a932005-09-05 17:52:54 -07009436static int tg3_phys_id(struct net_device *dev, u32 data)
9437{
9438 struct tg3 *tp = netdev_priv(dev);
9439 int i;
9440
9441 if (!netif_running(tp->dev))
9442 return -EAGAIN;
9443
9444 if (data == 0)
Stephen Hemminger759afc32008-02-23 19:51:59 -08009445 data = UINT_MAX / 2;
Michael Chan4009a932005-09-05 17:52:54 -07009446
9447 for (i = 0; i < (data * 2); i++) {
9448 if ((i % 2) == 0)
9449 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9450 LED_CTRL_1000MBPS_ON |
9451 LED_CTRL_100MBPS_ON |
9452 LED_CTRL_10MBPS_ON |
9453 LED_CTRL_TRAFFIC_OVERRIDE |
9454 LED_CTRL_TRAFFIC_BLINK |
9455 LED_CTRL_TRAFFIC_LED);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009456
Michael Chan4009a932005-09-05 17:52:54 -07009457 else
9458 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9459 LED_CTRL_TRAFFIC_OVERRIDE);
9460
9461 if (msleep_interruptible(500))
9462 break;
9463 }
9464 tw32(MAC_LED_CTRL, tp->led_ctrl);
9465 return 0;
9466}
9467
Linus Torvalds1da177e2005-04-16 15:20:36 -07009468static void tg3_get_ethtool_stats (struct net_device *dev,
9469 struct ethtool_stats *estats, u64 *tmp_stats)
9470{
9471 struct tg3 *tp = netdev_priv(dev);
9472 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9473}
9474
Michael Chan566f86a2005-05-29 14:56:58 -07009475#define NVRAM_TEST_SIZE 0x100
Matt Carlsona5767de2007-11-12 21:10:58 -08009476#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
9477#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
9478#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
Michael Chanb16250e2006-09-27 16:10:14 -07009479#define NVRAM_SELFBOOT_HW_SIZE 0x20
9480#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
Michael Chan566f86a2005-05-29 14:56:58 -07009481
9482static int tg3_test_nvram(struct tg3 *tp)
9483{
Al Virob9fc7dc2007-12-17 22:59:57 -08009484 u32 csum, magic;
9485 __le32 *buf;
Andy Gospodarekab0049b2007-09-06 20:42:14 +01009486 int i, j, k, err = 0, size;
Michael Chan566f86a2005-05-29 14:56:58 -07009487
Michael Chan18201802006-03-20 22:29:15 -08009488 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Michael Chan1b277772006-03-20 22:27:48 -08009489 return -EIO;
9490
Michael Chan1b277772006-03-20 22:27:48 -08009491 if (magic == TG3_EEPROM_MAGIC)
9492 size = NVRAM_TEST_SIZE;
Michael Chanb16250e2006-09-27 16:10:14 -07009493 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
Matt Carlsona5767de2007-11-12 21:10:58 -08009494 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9495 TG3_EEPROM_SB_FORMAT_1) {
9496 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9497 case TG3_EEPROM_SB_REVISION_0:
9498 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9499 break;
9500 case TG3_EEPROM_SB_REVISION_2:
9501 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9502 break;
9503 case TG3_EEPROM_SB_REVISION_3:
9504 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9505 break;
9506 default:
9507 return 0;
9508 }
9509 } else
Michael Chan1b277772006-03-20 22:27:48 -08009510 return 0;
Michael Chanb16250e2006-09-27 16:10:14 -07009511 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9512 size = NVRAM_SELFBOOT_HW_SIZE;
9513 else
Michael Chan1b277772006-03-20 22:27:48 -08009514 return -EIO;
9515
9516 buf = kmalloc(size, GFP_KERNEL);
Michael Chan566f86a2005-05-29 14:56:58 -07009517 if (buf == NULL)
9518 return -ENOMEM;
9519
Michael Chan1b277772006-03-20 22:27:48 -08009520 err = -EIO;
9521 for (i = 0, j = 0; i < size; i += 4, j++) {
Al Virob9fc7dc2007-12-17 22:59:57 -08009522 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
Michael Chan566f86a2005-05-29 14:56:58 -07009523 break;
Michael Chan566f86a2005-05-29 14:56:58 -07009524 }
Michael Chan1b277772006-03-20 22:27:48 -08009525 if (i < size)
Michael Chan566f86a2005-05-29 14:56:58 -07009526 goto out;
9527
Michael Chan1b277772006-03-20 22:27:48 -08009528 /* Selfboot format */
Al Virob9fc7dc2007-12-17 22:59:57 -08009529 magic = swab32(le32_to_cpu(buf[0]));
9530 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -07009531 TG3_EEPROM_MAGIC_FW) {
Michael Chan1b277772006-03-20 22:27:48 -08009532 u8 *buf8 = (u8 *) buf, csum8 = 0;
9533
Al Virob9fc7dc2007-12-17 22:59:57 -08009534 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
Matt Carlsona5767de2007-11-12 21:10:58 -08009535 TG3_EEPROM_SB_REVISION_2) {
9536 /* For rev 2, the csum doesn't include the MBA. */
9537 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9538 csum8 += buf8[i];
9539 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9540 csum8 += buf8[i];
9541 } else {
9542 for (i = 0; i < size; i++)
9543 csum8 += buf8[i];
9544 }
Michael Chan1b277772006-03-20 22:27:48 -08009545
Adrian Bunkad96b482006-04-05 22:21:04 -07009546 if (csum8 == 0) {
9547 err = 0;
9548 goto out;
9549 }
9550
9551 err = -EIO;
9552 goto out;
Michael Chan1b277772006-03-20 22:27:48 -08009553 }
Michael Chan566f86a2005-05-29 14:56:58 -07009554
Al Virob9fc7dc2007-12-17 22:59:57 -08009555 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -07009556 TG3_EEPROM_MAGIC_HW) {
9557 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9558 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9559 u8 *buf8 = (u8 *) buf;
Michael Chanb16250e2006-09-27 16:10:14 -07009560
9561 /* Separate the parity bits and the data bytes. */
9562 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9563 if ((i == 0) || (i == 8)) {
9564 int l;
9565 u8 msk;
9566
9567 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9568 parity[k++] = buf8[i] & msk;
9569 i++;
9570 }
9571 else if (i == 16) {
9572 int l;
9573 u8 msk;
9574
9575 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9576 parity[k++] = buf8[i] & msk;
9577 i++;
9578
9579 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9580 parity[k++] = buf8[i] & msk;
9581 i++;
9582 }
9583 data[j++] = buf8[i];
9584 }
9585
9586 err = -EIO;
9587 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9588 u8 hw8 = hweight8(data[i]);
9589
9590 if ((hw8 & 0x1) && parity[i])
9591 goto out;
9592 else if (!(hw8 & 0x1) && !parity[i])
9593 goto out;
9594 }
9595 err = 0;
9596 goto out;
9597 }
9598
Michael Chan566f86a2005-05-29 14:56:58 -07009599 /* Bootstrap checksum at offset 0x10 */
9600 csum = calc_crc((unsigned char *) buf, 0x10);
Al Virob9fc7dc2007-12-17 22:59:57 -08009601 if(csum != le32_to_cpu(buf[0x10/4]))
Michael Chan566f86a2005-05-29 14:56:58 -07009602 goto out;
9603
9604 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9605 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
Al Virob9fc7dc2007-12-17 22:59:57 -08009606 if (csum != le32_to_cpu(buf[0xfc/4]))
Michael Chan566f86a2005-05-29 14:56:58 -07009607 goto out;
9608
9609 err = 0;
9610
9611out:
9612 kfree(buf);
9613 return err;
9614}
9615
Michael Chanca430072005-05-29 14:57:23 -07009616#define TG3_SERDES_TIMEOUT_SEC 2
9617#define TG3_COPPER_TIMEOUT_SEC 6
9618
9619static int tg3_test_link(struct tg3 *tp)
9620{
9621 int i, max;
9622
9623 if (!netif_running(tp->dev))
9624 return -ENODEV;
9625
Michael Chan4c987482005-09-05 17:52:38 -07009626 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Michael Chanca430072005-05-29 14:57:23 -07009627 max = TG3_SERDES_TIMEOUT_SEC;
9628 else
9629 max = TG3_COPPER_TIMEOUT_SEC;
9630
9631 for (i = 0; i < max; i++) {
9632 if (netif_carrier_ok(tp->dev))
9633 return 0;
9634
9635 if (msleep_interruptible(1000))
9636 break;
9637 }
9638
9639 return -EIO;
9640}
9641
Michael Chana71116d2005-05-29 14:58:11 -07009642/* Only test the commonly used registers */
David S. Miller30ca3e32006-03-20 23:02:36 -08009643static int tg3_test_registers(struct tg3 *tp)
Michael Chana71116d2005-05-29 14:58:11 -07009644{
Michael Chanb16250e2006-09-27 16:10:14 -07009645 int i, is_5705, is_5750;
Michael Chana71116d2005-05-29 14:58:11 -07009646 u32 offset, read_mask, write_mask, val, save_val, read_val;
9647 static struct {
9648 u16 offset;
9649 u16 flags;
9650#define TG3_FL_5705 0x1
9651#define TG3_FL_NOT_5705 0x2
9652#define TG3_FL_NOT_5788 0x4
Michael Chanb16250e2006-09-27 16:10:14 -07009653#define TG3_FL_NOT_5750 0x8
Michael Chana71116d2005-05-29 14:58:11 -07009654 u32 read_mask;
9655 u32 write_mask;
9656 } reg_tbl[] = {
9657 /* MAC Control Registers */
9658 { MAC_MODE, TG3_FL_NOT_5705,
9659 0x00000000, 0x00ef6f8c },
9660 { MAC_MODE, TG3_FL_5705,
9661 0x00000000, 0x01ef6b8c },
9662 { MAC_STATUS, TG3_FL_NOT_5705,
9663 0x03800107, 0x00000000 },
9664 { MAC_STATUS, TG3_FL_5705,
9665 0x03800100, 0x00000000 },
9666 { MAC_ADDR_0_HIGH, 0x0000,
9667 0x00000000, 0x0000ffff },
9668 { MAC_ADDR_0_LOW, 0x0000,
9669 0x00000000, 0xffffffff },
9670 { MAC_RX_MTU_SIZE, 0x0000,
9671 0x00000000, 0x0000ffff },
9672 { MAC_TX_MODE, 0x0000,
9673 0x00000000, 0x00000070 },
9674 { MAC_TX_LENGTHS, 0x0000,
9675 0x00000000, 0x00003fff },
9676 { MAC_RX_MODE, TG3_FL_NOT_5705,
9677 0x00000000, 0x000007fc },
9678 { MAC_RX_MODE, TG3_FL_5705,
9679 0x00000000, 0x000007dc },
9680 { MAC_HASH_REG_0, 0x0000,
9681 0x00000000, 0xffffffff },
9682 { MAC_HASH_REG_1, 0x0000,
9683 0x00000000, 0xffffffff },
9684 { MAC_HASH_REG_2, 0x0000,
9685 0x00000000, 0xffffffff },
9686 { MAC_HASH_REG_3, 0x0000,
9687 0x00000000, 0xffffffff },
9688
9689 /* Receive Data and Receive BD Initiator Control Registers. */
9690 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9691 0x00000000, 0xffffffff },
9692 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9693 0x00000000, 0xffffffff },
9694 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9695 0x00000000, 0x00000003 },
9696 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9697 0x00000000, 0xffffffff },
9698 { RCVDBDI_STD_BD+0, 0x0000,
9699 0x00000000, 0xffffffff },
9700 { RCVDBDI_STD_BD+4, 0x0000,
9701 0x00000000, 0xffffffff },
9702 { RCVDBDI_STD_BD+8, 0x0000,
9703 0x00000000, 0xffff0002 },
9704 { RCVDBDI_STD_BD+0xc, 0x0000,
9705 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009706
Michael Chana71116d2005-05-29 14:58:11 -07009707 /* Receive BD Initiator Control Registers. */
9708 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9709 0x00000000, 0xffffffff },
9710 { RCVBDI_STD_THRESH, TG3_FL_5705,
9711 0x00000000, 0x000003ff },
9712 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9713 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009714
Michael Chana71116d2005-05-29 14:58:11 -07009715 /* Host Coalescing Control Registers. */
9716 { HOSTCC_MODE, TG3_FL_NOT_5705,
9717 0x00000000, 0x00000004 },
9718 { HOSTCC_MODE, TG3_FL_5705,
9719 0x00000000, 0x000000f6 },
9720 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9721 0x00000000, 0xffffffff },
9722 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9723 0x00000000, 0x000003ff },
9724 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9725 0x00000000, 0xffffffff },
9726 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9727 0x00000000, 0x000003ff },
9728 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9729 0x00000000, 0xffffffff },
9730 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9731 0x00000000, 0x000000ff },
9732 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9733 0x00000000, 0xffffffff },
9734 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9735 0x00000000, 0x000000ff },
9736 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9737 0x00000000, 0xffffffff },
9738 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9739 0x00000000, 0xffffffff },
9740 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9741 0x00000000, 0xffffffff },
9742 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9743 0x00000000, 0x000000ff },
9744 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9745 0x00000000, 0xffffffff },
9746 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9747 0x00000000, 0x000000ff },
9748 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9749 0x00000000, 0xffffffff },
9750 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9751 0x00000000, 0xffffffff },
9752 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9753 0x00000000, 0xffffffff },
9754 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9755 0x00000000, 0xffffffff },
9756 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9757 0x00000000, 0xffffffff },
9758 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9759 0xffffffff, 0x00000000 },
9760 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9761 0xffffffff, 0x00000000 },
9762
9763 /* Buffer Manager Control Registers. */
Michael Chanb16250e2006-09-27 16:10:14 -07009764 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -07009765 0x00000000, 0x007fff80 },
Michael Chanb16250e2006-09-27 16:10:14 -07009766 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -07009767 0x00000000, 0x007fffff },
9768 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9769 0x00000000, 0x0000003f },
9770 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9771 0x00000000, 0x000001ff },
9772 { BUFMGR_MB_HIGH_WATER, 0x0000,
9773 0x00000000, 0x000001ff },
9774 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9775 0xffffffff, 0x00000000 },
9776 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9777 0xffffffff, 0x00000000 },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009778
Michael Chana71116d2005-05-29 14:58:11 -07009779 /* Mailbox Registers */
9780 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9781 0x00000000, 0x000001ff },
9782 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9783 0x00000000, 0x000001ff },
9784 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9785 0x00000000, 0x000007ff },
9786 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9787 0x00000000, 0x000001ff },
9788
9789 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9790 };
9791
Michael Chanb16250e2006-09-27 16:10:14 -07009792 is_5705 = is_5750 = 0;
9793 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
Michael Chana71116d2005-05-29 14:58:11 -07009794 is_5705 = 1;
Michael Chanb16250e2006-09-27 16:10:14 -07009795 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9796 is_5750 = 1;
9797 }
Michael Chana71116d2005-05-29 14:58:11 -07009798
9799 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9800 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9801 continue;
9802
9803 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9804 continue;
9805
9806 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9807 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9808 continue;
9809
Michael Chanb16250e2006-09-27 16:10:14 -07009810 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9811 continue;
9812
Michael Chana71116d2005-05-29 14:58:11 -07009813 offset = (u32) reg_tbl[i].offset;
9814 read_mask = reg_tbl[i].read_mask;
9815 write_mask = reg_tbl[i].write_mask;
9816
9817 /* Save the original register content */
9818 save_val = tr32(offset);
9819
9820 /* Determine the read-only value. */
9821 read_val = save_val & read_mask;
9822
9823 /* Write zero to the register, then make sure the read-only bits
9824 * are not changed and the read/write bits are all zeros.
9825 */
9826 tw32(offset, 0);
9827
9828 val = tr32(offset);
9829
9830 /* Test the read-only and read/write bits. */
9831 if (((val & read_mask) != read_val) || (val & write_mask))
9832 goto out;
9833
9834 /* Write ones to all the bits defined by RdMask and WrMask, then
9835 * make sure the read-only bits are not changed and the
9836 * read/write bits are all ones.
9837 */
9838 tw32(offset, read_mask | write_mask);
9839
9840 val = tr32(offset);
9841
9842 /* Test the read-only bits. */
9843 if ((val & read_mask) != read_val)
9844 goto out;
9845
9846 /* Test the read/write bits. */
9847 if ((val & write_mask) != write_mask)
9848 goto out;
9849
9850 tw32(offset, save_val);
9851 }
9852
9853 return 0;
9854
9855out:
Michael Chan9f88f292006-12-07 00:22:54 -08009856 if (netif_msg_hw(tp))
9857 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9858 offset);
Michael Chana71116d2005-05-29 14:58:11 -07009859 tw32(offset, save_val);
9860 return -EIO;
9861}
9862
Michael Chan7942e1d2005-05-29 14:58:36 -07009863static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9864{
Arjan van de Venf71e1302006-03-03 21:33:57 -05009865 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
Michael Chan7942e1d2005-05-29 14:58:36 -07009866 int i;
9867 u32 j;
9868
Alejandro Martinez Ruize9edda62007-10-15 03:37:43 +02009869 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
Michael Chan7942e1d2005-05-29 14:58:36 -07009870 for (j = 0; j < len; j += 4) {
9871 u32 val;
9872
9873 tg3_write_mem(tp, offset + j, test_pattern[i]);
9874 tg3_read_mem(tp, offset + j, &val);
9875 if (val != test_pattern[i])
9876 return -EIO;
9877 }
9878 }
9879 return 0;
9880}
9881
9882static int tg3_test_memory(struct tg3 *tp)
9883{
9884 static struct mem_entry {
9885 u32 offset;
9886 u32 len;
9887 } mem_tbl_570x[] = {
Michael Chan38690192005-12-19 16:27:28 -08009888 { 0x00000000, 0x00b50},
Michael Chan7942e1d2005-05-29 14:58:36 -07009889 { 0x00002000, 0x1c000},
9890 { 0xffffffff, 0x00000}
9891 }, mem_tbl_5705[] = {
9892 { 0x00000100, 0x0000c},
9893 { 0x00000200, 0x00008},
Michael Chan7942e1d2005-05-29 14:58:36 -07009894 { 0x00004000, 0x00800},
9895 { 0x00006000, 0x01000},
9896 { 0x00008000, 0x02000},
9897 { 0x00010000, 0x0e000},
9898 { 0xffffffff, 0x00000}
Michael Chan79f4d132006-03-20 22:28:57 -08009899 }, mem_tbl_5755[] = {
9900 { 0x00000200, 0x00008},
9901 { 0x00004000, 0x00800},
9902 { 0x00006000, 0x00800},
9903 { 0x00008000, 0x02000},
9904 { 0x00010000, 0x0c000},
9905 { 0xffffffff, 0x00000}
Michael Chanb16250e2006-09-27 16:10:14 -07009906 }, mem_tbl_5906[] = {
9907 { 0x00000200, 0x00008},
9908 { 0x00004000, 0x00400},
9909 { 0x00006000, 0x00400},
9910 { 0x00008000, 0x01000},
9911 { 0x00010000, 0x01000},
9912 { 0xffffffff, 0x00000}
Michael Chan7942e1d2005-05-29 14:58:36 -07009913 };
9914 struct mem_entry *mem_tbl;
9915 int err = 0;
9916 int i;
9917
Michael Chan79f4d132006-03-20 22:28:57 -08009918 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
Michael Chanaf36e6b2006-03-23 01:28:06 -08009919 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07009920 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07009921 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -07009922 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9923 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan79f4d132006-03-20 22:28:57 -08009924 mem_tbl = mem_tbl_5755;
Michael Chanb16250e2006-09-27 16:10:14 -07009925 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9926 mem_tbl = mem_tbl_5906;
Michael Chan79f4d132006-03-20 22:28:57 -08009927 else
9928 mem_tbl = mem_tbl_5705;
9929 } else
Michael Chan7942e1d2005-05-29 14:58:36 -07009930 mem_tbl = mem_tbl_570x;
9931
9932 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9933 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9934 mem_tbl[i].len)) != 0)
9935 break;
9936 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009937
Michael Chan7942e1d2005-05-29 14:58:36 -07009938 return err;
9939}
9940
Michael Chan9f40dea2005-09-05 17:53:06 -07009941#define TG3_MAC_LOOPBACK 0
9942#define TG3_PHY_LOOPBACK 1
9943
9944static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
Michael Chanc76949a2005-05-29 14:58:59 -07009945{
Michael Chan9f40dea2005-09-05 17:53:06 -07009946 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
Michael Chanc76949a2005-05-29 14:58:59 -07009947 u32 desc_idx;
9948 struct sk_buff *skb, *rx_skb;
9949 u8 *tx_data;
9950 dma_addr_t map;
9951 int num_pkts, tx_len, rx_len, i, err;
9952 struct tg3_rx_buffer_desc *desc;
9953
Michael Chan9f40dea2005-09-05 17:53:06 -07009954 if (loopback_mode == TG3_MAC_LOOPBACK) {
Michael Chanc94e3942005-09-27 12:12:42 -07009955 /* HW errata - mac loopback fails in some cases on 5780.
9956 * Normal traffic and PHY loopback are not affected by
9957 * errata.
9958 */
9959 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9960 return 0;
9961
Michael Chan9f40dea2005-09-05 17:53:06 -07009962 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07009963 MAC_MODE_PORT_INT_LPBACK;
9964 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9965 mac_mode |= MAC_MODE_LINK_POLARITY;
Michael Chan3f7045c2006-09-27 16:02:29 -07009966 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9967 mac_mode |= MAC_MODE_PORT_MODE_MII;
9968 else
9969 mac_mode |= MAC_MODE_PORT_MODE_GMII;
Michael Chan9f40dea2005-09-05 17:53:06 -07009970 tw32(MAC_MODE, mac_mode);
9971 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
Michael Chan3f7045c2006-09-27 16:02:29 -07009972 u32 val;
9973
Michael Chanb16250e2006-09-27 16:10:14 -07009974 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9975 u32 phytest;
9976
9977 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9978 u32 phy;
9979
9980 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9981 phytest | MII_TG3_EPHY_SHADOW_EN);
9982 if (!tg3_readphy(tp, 0x1b, &phy))
9983 tg3_writephy(tp, 0x1b, phy & ~0x20);
Michael Chanb16250e2006-09-27 16:10:14 -07009984 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9985 }
Michael Chan5d64ad32006-12-07 00:19:40 -08009986 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9987 } else
9988 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
Michael Chan3f7045c2006-09-27 16:02:29 -07009989
Matt Carlson9ef8ca92007-07-11 19:48:29 -07009990 tg3_phy_toggle_automdix(tp, 0);
9991
Michael Chan3f7045c2006-09-27 16:02:29 -07009992 tg3_writephy(tp, MII_BMCR, val);
Michael Chanc94e3942005-09-27 12:12:42 -07009993 udelay(40);
Michael Chan5d64ad32006-12-07 00:19:40 -08009994
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07009995 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
Michael Chan5d64ad32006-12-07 00:19:40 -08009996 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chanb16250e2006-09-27 16:10:14 -07009997 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
Michael Chan5d64ad32006-12-07 00:19:40 -08009998 mac_mode |= MAC_MODE_PORT_MODE_MII;
9999 } else
10000 mac_mode |= MAC_MODE_PORT_MODE_GMII;
Michael Chanb16250e2006-09-27 16:10:14 -070010001
Michael Chanc94e3942005-09-27 12:12:42 -070010002 /* reset to prevent losing 1st rx packet intermittently */
10003 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10004 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10005 udelay(10);
10006 tw32_f(MAC_RX_MODE, tp->rx_mode);
10007 }
Matt Carlsone8f3f6c2007-07-11 19:47:55 -070010008 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10009 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10010 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10011 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10012 mac_mode |= MAC_MODE_LINK_POLARITY;
Michael Chanff18ff02006-03-27 23:17:27 -080010013 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10014 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10015 }
Michael Chan9f40dea2005-09-05 17:53:06 -070010016 tw32(MAC_MODE, mac_mode);
Michael Chan9f40dea2005-09-05 17:53:06 -070010017 }
10018 else
10019 return -EINVAL;
Michael Chanc76949a2005-05-29 14:58:59 -070010020
10021 err = -EIO;
10022
Michael Chanc76949a2005-05-29 14:58:59 -070010023 tx_len = 1514;
David S. Millera20e9c62006-07-31 22:38:16 -070010024 skb = netdev_alloc_skb(tp->dev, tx_len);
Jesper Juhla50bb7b2006-05-09 23:14:35 -070010025 if (!skb)
10026 return -ENOMEM;
10027
Michael Chanc76949a2005-05-29 14:58:59 -070010028 tx_data = skb_put(skb, tx_len);
10029 memcpy(tx_data, tp->dev->dev_addr, 6);
10030 memset(tx_data + 6, 0x0, 8);
10031
10032 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10033
10034 for (i = 14; i < tx_len; i++)
10035 tx_data[i] = (u8) (i & 0xff);
10036
10037 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10038
10039 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10040 HOSTCC_MODE_NOW);
10041
10042 udelay(10);
10043
10044 rx_start_idx = tp->hw_status->idx[0].rx_producer;
10045
Michael Chanc76949a2005-05-29 14:58:59 -070010046 num_pkts = 0;
10047
Michael Chan9f40dea2005-09-05 17:53:06 -070010048 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
Michael Chanc76949a2005-05-29 14:58:59 -070010049
Michael Chan9f40dea2005-09-05 17:53:06 -070010050 tp->tx_prod++;
Michael Chanc76949a2005-05-29 14:58:59 -070010051 num_pkts++;
10052
Michael Chan9f40dea2005-09-05 17:53:06 -070010053 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10054 tp->tx_prod);
Michael Chan09ee9292005-08-09 20:17:00 -070010055 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
Michael Chanc76949a2005-05-29 14:58:59 -070010056
10057 udelay(10);
10058
Michael Chan3f7045c2006-09-27 16:02:29 -070010059 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
10060 for (i = 0; i < 25; i++) {
Michael Chanc76949a2005-05-29 14:58:59 -070010061 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10062 HOSTCC_MODE_NOW);
10063
10064 udelay(10);
10065
10066 tx_idx = tp->hw_status->idx[0].tx_consumer;
10067 rx_idx = tp->hw_status->idx[0].rx_producer;
Michael Chan9f40dea2005-09-05 17:53:06 -070010068 if ((tx_idx == tp->tx_prod) &&
Michael Chanc76949a2005-05-29 14:58:59 -070010069 (rx_idx == (rx_start_idx + num_pkts)))
10070 break;
10071 }
10072
10073 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10074 dev_kfree_skb(skb);
10075
Michael Chan9f40dea2005-09-05 17:53:06 -070010076 if (tx_idx != tp->tx_prod)
Michael Chanc76949a2005-05-29 14:58:59 -070010077 goto out;
10078
10079 if (rx_idx != rx_start_idx + num_pkts)
10080 goto out;
10081
10082 desc = &tp->rx_rcb[rx_start_idx];
10083 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10084 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10085 if (opaque_key != RXD_OPAQUE_RING_STD)
10086 goto out;
10087
10088 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10089 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10090 goto out;
10091
10092 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10093 if (rx_len != tx_len)
10094 goto out;
10095
10096 rx_skb = tp->rx_std_buffers[desc_idx].skb;
10097
10098 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10099 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10100
10101 for (i = 14; i < tx_len; i++) {
10102 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10103 goto out;
10104 }
10105 err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010106
Michael Chanc76949a2005-05-29 14:58:59 -070010107 /* tg3_free_rings will unmap and free the rx_skb */
10108out:
10109 return err;
10110}
10111
Michael Chan9f40dea2005-09-05 17:53:06 -070010112#define TG3_MAC_LOOPBACK_FAILED 1
10113#define TG3_PHY_LOOPBACK_FAILED 2
10114#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10115 TG3_PHY_LOOPBACK_FAILED)
10116
10117static int tg3_test_loopback(struct tg3 *tp)
10118{
10119 int err = 0;
Matt Carlson9936bcf2007-10-10 18:03:07 -070010120 u32 cpmuctrl = 0;
Michael Chan9f40dea2005-09-05 17:53:06 -070010121
10122 if (!netif_running(tp->dev))
10123 return TG3_LOOPBACK_FAILED;
10124
Michael Chanb9ec6c12006-07-25 16:37:27 -070010125 err = tg3_reset_hw(tp, 1);
10126 if (err)
10127 return TG3_LOOPBACK_FAILED;
Michael Chan9f40dea2005-09-05 17:53:06 -070010128
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010129 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010130 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10131 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Matt Carlson9936bcf2007-10-10 18:03:07 -070010132 int i;
10133 u32 status;
10134
10135 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10136
10137 /* Wait for up to 40 microseconds to acquire lock. */
10138 for (i = 0; i < 4; i++) {
10139 status = tr32(TG3_CPMU_MUTEX_GNT);
10140 if (status == CPMU_MUTEX_GNT_DRIVER)
10141 break;
10142 udelay(10);
10143 }
10144
10145 if (status != CPMU_MUTEX_GNT_DRIVER)
10146 return TG3_LOOPBACK_FAILED;
10147
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010148 /* Turn off link-based power management. */
Matt Carlsone8750932007-11-12 21:11:51 -080010149 cpmuctrl = tr32(TG3_CPMU_CTRL);
Matt Carlson109115e2008-05-02 16:48:59 -070010150 tw32(TG3_CPMU_CTRL,
10151 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10152 CPMU_CTRL_LINK_AWARE_MODE));
Matt Carlson9936bcf2007-10-10 18:03:07 -070010153 }
10154
Michael Chan9f40dea2005-09-05 17:53:06 -070010155 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10156 err |= TG3_MAC_LOOPBACK_FAILED;
Matt Carlson9936bcf2007-10-10 18:03:07 -070010157
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010158 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010159 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10160 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Matt Carlson9936bcf2007-10-10 18:03:07 -070010161 tw32(TG3_CPMU_CTRL, cpmuctrl);
10162
10163 /* Release the mutex */
10164 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10165 }
10166
Matt Carlsondd477002008-05-25 23:45:58 -070010167 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10168 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
Michael Chan9f40dea2005-09-05 17:53:06 -070010169 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10170 err |= TG3_PHY_LOOPBACK_FAILED;
10171 }
10172
10173 return err;
10174}
10175
Michael Chan4cafd3f2005-05-29 14:56:34 -070010176static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10177 u64 *data)
10178{
Michael Chan566f86a2005-05-29 14:56:58 -070010179 struct tg3 *tp = netdev_priv(dev);
10180
Michael Chanbc1c7562006-03-20 17:48:03 -080010181 if (tp->link_config.phy_is_low_power)
10182 tg3_set_power_state(tp, PCI_D0);
10183
Michael Chan566f86a2005-05-29 14:56:58 -070010184 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10185
10186 if (tg3_test_nvram(tp) != 0) {
10187 etest->flags |= ETH_TEST_FL_FAILED;
10188 data[0] = 1;
10189 }
Michael Chanca430072005-05-29 14:57:23 -070010190 if (tg3_test_link(tp) != 0) {
10191 etest->flags |= ETH_TEST_FL_FAILED;
10192 data[1] = 1;
10193 }
Michael Chana71116d2005-05-29 14:58:11 -070010194 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010195 int err, err2 = 0, irq_sync = 0;
Michael Chana71116d2005-05-29 14:58:11 -070010196
Michael Chanbbe832c2005-06-24 20:20:04 -070010197 if (netif_running(dev)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010198 tg3_phy_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -070010199 tg3_netif_stop(tp);
10200 irq_sync = 1;
10201 }
10202
10203 tg3_full_lock(tp, irq_sync);
Michael Chana71116d2005-05-29 14:58:11 -070010204
10205 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
Michael Chanec41c7d2006-01-17 02:40:55 -080010206 err = tg3_nvram_lock(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010207 tg3_halt_cpu(tp, RX_CPU_BASE);
10208 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10209 tg3_halt_cpu(tp, TX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -080010210 if (!err)
10211 tg3_nvram_unlock(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010212
Michael Chand9ab5ad2006-03-20 22:27:35 -080010213 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10214 tg3_phy_reset(tp);
10215
Michael Chana71116d2005-05-29 14:58:11 -070010216 if (tg3_test_registers(tp) != 0) {
10217 etest->flags |= ETH_TEST_FL_FAILED;
10218 data[2] = 1;
10219 }
Michael Chan7942e1d2005-05-29 14:58:36 -070010220 if (tg3_test_memory(tp) != 0) {
10221 etest->flags |= ETH_TEST_FL_FAILED;
10222 data[3] = 1;
10223 }
Michael Chan9f40dea2005-09-05 17:53:06 -070010224 if ((data[4] = tg3_test_loopback(tp)) != 0)
Michael Chanc76949a2005-05-29 14:58:59 -070010225 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chana71116d2005-05-29 14:58:11 -070010226
David S. Millerf47c11e2005-06-24 20:18:35 -070010227 tg3_full_unlock(tp);
10228
Michael Chand4bc3922005-05-29 14:59:20 -070010229 if (tg3_test_interrupt(tp) != 0) {
10230 etest->flags |= ETH_TEST_FL_FAILED;
10231 data[5] = 1;
10232 }
David S. Millerf47c11e2005-06-24 20:18:35 -070010233
10234 tg3_full_lock(tp, 0);
Michael Chand4bc3922005-05-29 14:59:20 -070010235
Michael Chana71116d2005-05-29 14:58:11 -070010236 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10237 if (netif_running(dev)) {
10238 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010239 err2 = tg3_restart_hw(tp, 1);
10240 if (!err2)
Michael Chanb9ec6c12006-07-25 16:37:27 -070010241 tg3_netif_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010242 }
David S. Millerf47c11e2005-06-24 20:18:35 -070010243
10244 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010245
10246 if (irq_sync && !err2)
10247 tg3_phy_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010248 }
Michael Chanbc1c7562006-03-20 17:48:03 -080010249 if (tp->link_config.phy_is_low_power)
10250 tg3_set_power_state(tp, PCI_D3hot);
10251
Michael Chan4cafd3f2005-05-29 14:56:34 -070010252}
10253
Linus Torvalds1da177e2005-04-16 15:20:36 -070010254static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10255{
10256 struct mii_ioctl_data *data = if_mii(ifr);
10257 struct tg3 *tp = netdev_priv(dev);
10258 int err;
10259
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010260 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10261 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10262 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -070010263 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010264 }
10265
Linus Torvalds1da177e2005-04-16 15:20:36 -070010266 switch(cmd) {
10267 case SIOCGMIIPHY:
10268 data->phy_id = PHY_ADDR;
10269
10270 /* fallthru */
10271 case SIOCGMIIREG: {
10272 u32 mii_regval;
10273
10274 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10275 break; /* We have no PHY */
10276
Michael Chanbc1c7562006-03-20 17:48:03 -080010277 if (tp->link_config.phy_is_low_power)
10278 return -EAGAIN;
10279
David S. Millerf47c11e2005-06-24 20:18:35 -070010280 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010281 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
David S. Millerf47c11e2005-06-24 20:18:35 -070010282 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010283
10284 data->val_out = mii_regval;
10285
10286 return err;
10287 }
10288
10289 case SIOCSMIIREG:
10290 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10291 break; /* We have no PHY */
10292
10293 if (!capable(CAP_NET_ADMIN))
10294 return -EPERM;
10295
Michael Chanbc1c7562006-03-20 17:48:03 -080010296 if (tp->link_config.phy_is_low_power)
10297 return -EAGAIN;
10298
David S. Millerf47c11e2005-06-24 20:18:35 -070010299 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010300 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
David S. Millerf47c11e2005-06-24 20:18:35 -070010301 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010302
10303 return err;
10304
10305 default:
10306 /* do nothing */
10307 break;
10308 }
10309 return -EOPNOTSUPP;
10310}
10311
10312#if TG3_VLAN_TAG_USED
10313static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10314{
10315 struct tg3 *tp = netdev_priv(dev);
10316
Michael Chan29315e82006-06-29 20:12:30 -070010317 if (netif_running(dev))
10318 tg3_netif_stop(tp);
10319
David S. Millerf47c11e2005-06-24 20:18:35 -070010320 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010321
10322 tp->vlgrp = grp;
10323
10324 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10325 __tg3_set_rx_mode(dev);
10326
Michael Chan29315e82006-06-29 20:12:30 -070010327 if (netif_running(dev))
10328 tg3_netif_start(tp);
Michael Chan46966542007-07-11 19:47:19 -070010329
10330 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010331}
Linus Torvalds1da177e2005-04-16 15:20:36 -070010332#endif
10333
David S. Miller15f98502005-05-18 22:49:26 -070010334static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10335{
10336 struct tg3 *tp = netdev_priv(dev);
10337
10338 memcpy(ec, &tp->coal, sizeof(*ec));
10339 return 0;
10340}
10341
Michael Chand244c892005-07-05 14:42:33 -070010342static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10343{
10344 struct tg3 *tp = netdev_priv(dev);
10345 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10346 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10347
10348 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10349 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10350 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10351 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10352 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10353 }
10354
10355 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10356 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10357 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10358 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10359 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10360 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10361 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10362 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10363 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10364 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10365 return -EINVAL;
10366
10367 /* No rx interrupts will be generated if both are zero */
10368 if ((ec->rx_coalesce_usecs == 0) &&
10369 (ec->rx_max_coalesced_frames == 0))
10370 return -EINVAL;
10371
10372 /* No tx interrupts will be generated if both are zero */
10373 if ((ec->tx_coalesce_usecs == 0) &&
10374 (ec->tx_max_coalesced_frames == 0))
10375 return -EINVAL;
10376
10377 /* Only copy relevant parameters, ignore all others. */
10378 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10379 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10380 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10381 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10382 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10383 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10384 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10385 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10386 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10387
10388 if (netif_running(dev)) {
10389 tg3_full_lock(tp, 0);
10390 __tg3_set_coalesce(tp, &tp->coal);
10391 tg3_full_unlock(tp);
10392 }
10393 return 0;
10394}
10395
Jeff Garzik7282d492006-09-13 14:30:00 -040010396static const struct ethtool_ops tg3_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010397 .get_settings = tg3_get_settings,
10398 .set_settings = tg3_set_settings,
10399 .get_drvinfo = tg3_get_drvinfo,
10400 .get_regs_len = tg3_get_regs_len,
10401 .get_regs = tg3_get_regs,
10402 .get_wol = tg3_get_wol,
10403 .set_wol = tg3_set_wol,
10404 .get_msglevel = tg3_get_msglevel,
10405 .set_msglevel = tg3_set_msglevel,
10406 .nway_reset = tg3_nway_reset,
10407 .get_link = ethtool_op_get_link,
10408 .get_eeprom_len = tg3_get_eeprom_len,
10409 .get_eeprom = tg3_get_eeprom,
10410 .set_eeprom = tg3_set_eeprom,
10411 .get_ringparam = tg3_get_ringparam,
10412 .set_ringparam = tg3_set_ringparam,
10413 .get_pauseparam = tg3_get_pauseparam,
10414 .set_pauseparam = tg3_set_pauseparam,
10415 .get_rx_csum = tg3_get_rx_csum,
10416 .set_rx_csum = tg3_set_rx_csum,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010417 .set_tx_csum = tg3_set_tx_csum,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010418 .set_sg = ethtool_op_set_sg,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010419 .set_tso = tg3_set_tso,
Michael Chan4cafd3f2005-05-29 14:56:34 -070010420 .self_test = tg3_self_test,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010421 .get_strings = tg3_get_strings,
Michael Chan4009a932005-09-05 17:52:54 -070010422 .phys_id = tg3_phys_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010423 .get_ethtool_stats = tg3_get_ethtool_stats,
David S. Miller15f98502005-05-18 22:49:26 -070010424 .get_coalesce = tg3_get_coalesce,
Michael Chand244c892005-07-05 14:42:33 -070010425 .set_coalesce = tg3_set_coalesce,
Jeff Garzikb9f2c042007-10-03 18:07:32 -070010426 .get_sset_count = tg3_get_sset_count,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010427};
10428
10429static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10430{
Michael Chan1b277772006-03-20 22:27:48 -080010431 u32 cursize, val, magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010432
10433 tp->nvram_size = EEPROM_CHIP_SIZE;
10434
Michael Chan18201802006-03-20 22:29:15 -080010435 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010436 return;
10437
Michael Chanb16250e2006-09-27 16:10:14 -070010438 if ((magic != TG3_EEPROM_MAGIC) &&
10439 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10440 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
Linus Torvalds1da177e2005-04-16 15:20:36 -070010441 return;
10442
10443 /*
10444 * Size the chip by reading offsets at increasing powers of two.
10445 * When we encounter our validation signature, we know the addressing
10446 * has wrapped around, and thus have our chip size.
10447 */
Michael Chan1b277772006-03-20 22:27:48 -080010448 cursize = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010449
10450 while (cursize < tp->nvram_size) {
Michael Chan18201802006-03-20 22:29:15 -080010451 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010452 return;
10453
Michael Chan18201802006-03-20 22:29:15 -080010454 if (val == magic)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010455 break;
10456
10457 cursize <<= 1;
10458 }
10459
10460 tp->nvram_size = cursize;
10461}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010462
Linus Torvalds1da177e2005-04-16 15:20:36 -070010463static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10464{
10465 u32 val;
10466
Michael Chan18201802006-03-20 22:29:15 -080010467 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
Michael Chan1b277772006-03-20 22:27:48 -080010468 return;
10469
10470 /* Selfboot format */
Michael Chan18201802006-03-20 22:29:15 -080010471 if (val != TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -080010472 tg3_get_eeprom_size(tp);
10473 return;
10474 }
10475
Linus Torvalds1da177e2005-04-16 15:20:36 -070010476 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10477 if (val != 0) {
10478 tp->nvram_size = (val >> 16) * 1024;
10479 return;
10480 }
10481 }
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010482 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010483}
10484
10485static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10486{
10487 u32 nvcfg1;
10488
10489 nvcfg1 = tr32(NVRAM_CFG1);
10490 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10491 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10492 }
10493 else {
10494 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10495 tw32(NVRAM_CFG1, nvcfg1);
10496 }
10497
Michael Chan4c987482005-09-05 17:52:38 -070010498 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
Michael Chana4e2b342005-10-26 15:46:52 -070010499 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010500 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10501 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10502 tp->nvram_jedecnum = JEDEC_ATMEL;
10503 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10504 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10505 break;
10506 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10507 tp->nvram_jedecnum = JEDEC_ATMEL;
10508 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10509 break;
10510 case FLASH_VENDOR_ATMEL_EEPROM:
10511 tp->nvram_jedecnum = JEDEC_ATMEL;
10512 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10513 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10514 break;
10515 case FLASH_VENDOR_ST:
10516 tp->nvram_jedecnum = JEDEC_ST;
10517 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10518 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10519 break;
10520 case FLASH_VENDOR_SAIFUN:
10521 tp->nvram_jedecnum = JEDEC_SAIFUN;
10522 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10523 break;
10524 case FLASH_VENDOR_SST_SMALL:
10525 case FLASH_VENDOR_SST_LARGE:
10526 tp->nvram_jedecnum = JEDEC_SST;
10527 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10528 break;
10529 }
10530 }
10531 else {
10532 tp->nvram_jedecnum = JEDEC_ATMEL;
10533 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10534 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10535 }
10536}
10537
Michael Chan361b4ac2005-04-21 17:11:21 -070010538static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10539{
10540 u32 nvcfg1;
10541
10542 nvcfg1 = tr32(NVRAM_CFG1);
10543
Michael Chane6af3012005-04-21 17:12:05 -070010544 /* NVRAM protection for TPM */
10545 if (nvcfg1 & (1 << 27))
10546 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10547
Michael Chan361b4ac2005-04-21 17:11:21 -070010548 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10549 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10550 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10551 tp->nvram_jedecnum = JEDEC_ATMEL;
10552 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10553 break;
10554 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10555 tp->nvram_jedecnum = JEDEC_ATMEL;
10556 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10557 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10558 break;
10559 case FLASH_5752VENDOR_ST_M45PE10:
10560 case FLASH_5752VENDOR_ST_M45PE20:
10561 case FLASH_5752VENDOR_ST_M45PE40:
10562 tp->nvram_jedecnum = JEDEC_ST;
10563 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10564 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10565 break;
10566 }
10567
10568 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10569 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10570 case FLASH_5752PAGE_SIZE_256:
10571 tp->nvram_pagesize = 256;
10572 break;
10573 case FLASH_5752PAGE_SIZE_512:
10574 tp->nvram_pagesize = 512;
10575 break;
10576 case FLASH_5752PAGE_SIZE_1K:
10577 tp->nvram_pagesize = 1024;
10578 break;
10579 case FLASH_5752PAGE_SIZE_2K:
10580 tp->nvram_pagesize = 2048;
10581 break;
10582 case FLASH_5752PAGE_SIZE_4K:
10583 tp->nvram_pagesize = 4096;
10584 break;
10585 case FLASH_5752PAGE_SIZE_264:
10586 tp->nvram_pagesize = 264;
10587 break;
10588 }
10589 }
10590 else {
10591 /* For eeprom, set pagesize to maximum eeprom size */
10592 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10593
10594 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10595 tw32(NVRAM_CFG1, nvcfg1);
10596 }
10597}
10598
Michael Chand3c7b882006-03-23 01:28:25 -080010599static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10600{
Matt Carlson989a9d22007-05-05 11:51:05 -070010601 u32 nvcfg1, protect = 0;
Michael Chand3c7b882006-03-23 01:28:25 -080010602
10603 nvcfg1 = tr32(NVRAM_CFG1);
10604
10605 /* NVRAM protection for TPM */
Matt Carlson989a9d22007-05-05 11:51:05 -070010606 if (nvcfg1 & (1 << 27)) {
Michael Chand3c7b882006-03-23 01:28:25 -080010607 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
Matt Carlson989a9d22007-05-05 11:51:05 -070010608 protect = 1;
10609 }
Michael Chand3c7b882006-03-23 01:28:25 -080010610
Matt Carlson989a9d22007-05-05 11:51:05 -070010611 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10612 switch (nvcfg1) {
Michael Chand3c7b882006-03-23 01:28:25 -080010613 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10614 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10615 case FLASH_5755VENDOR_ATMEL_FLASH_3:
Matt Carlson70b65a22007-07-11 19:48:50 -070010616 case FLASH_5755VENDOR_ATMEL_FLASH_5:
Michael Chand3c7b882006-03-23 01:28:25 -080010617 tp->nvram_jedecnum = JEDEC_ATMEL;
10618 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10619 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10620 tp->nvram_pagesize = 264;
Matt Carlson70b65a22007-07-11 19:48:50 -070010621 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10622 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010623 tp->nvram_size = (protect ? 0x3e200 :
10624 TG3_NVRAM_SIZE_512KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010625 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010626 tp->nvram_size = (protect ? 0x1f200 :
10627 TG3_NVRAM_SIZE_256KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010628 else
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010629 tp->nvram_size = (protect ? 0x1f200 :
10630 TG3_NVRAM_SIZE_128KB);
Michael Chand3c7b882006-03-23 01:28:25 -080010631 break;
10632 case FLASH_5752VENDOR_ST_M45PE10:
10633 case FLASH_5752VENDOR_ST_M45PE20:
10634 case FLASH_5752VENDOR_ST_M45PE40:
10635 tp->nvram_jedecnum = JEDEC_ST;
10636 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10637 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10638 tp->nvram_pagesize = 256;
Matt Carlson989a9d22007-05-05 11:51:05 -070010639 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010640 tp->nvram_size = (protect ?
10641 TG3_NVRAM_SIZE_64KB :
10642 TG3_NVRAM_SIZE_128KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010643 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010644 tp->nvram_size = (protect ?
10645 TG3_NVRAM_SIZE_64KB :
10646 TG3_NVRAM_SIZE_256KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010647 else
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010648 tp->nvram_size = (protect ?
10649 TG3_NVRAM_SIZE_128KB :
10650 TG3_NVRAM_SIZE_512KB);
Michael Chand3c7b882006-03-23 01:28:25 -080010651 break;
10652 }
10653}
10654
Michael Chan1b277772006-03-20 22:27:48 -080010655static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10656{
10657 u32 nvcfg1;
10658
10659 nvcfg1 = tr32(NVRAM_CFG1);
10660
10661 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10662 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10663 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10664 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10665 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10666 tp->nvram_jedecnum = JEDEC_ATMEL;
10667 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10668 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10669
10670 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10671 tw32(NVRAM_CFG1, nvcfg1);
10672 break;
10673 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10674 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10675 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10676 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10677 tp->nvram_jedecnum = JEDEC_ATMEL;
10678 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10679 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10680 tp->nvram_pagesize = 264;
10681 break;
10682 case FLASH_5752VENDOR_ST_M45PE10:
10683 case FLASH_5752VENDOR_ST_M45PE20:
10684 case FLASH_5752VENDOR_ST_M45PE40:
10685 tp->nvram_jedecnum = JEDEC_ST;
10686 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10687 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10688 tp->nvram_pagesize = 256;
10689 break;
10690 }
10691}
10692
Matt Carlson6b91fa02007-10-10 18:01:09 -070010693static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10694{
10695 u32 nvcfg1, protect = 0;
10696
10697 nvcfg1 = tr32(NVRAM_CFG1);
10698
10699 /* NVRAM protection for TPM */
10700 if (nvcfg1 & (1 << 27)) {
10701 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10702 protect = 1;
10703 }
10704
10705 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10706 switch (nvcfg1) {
10707 case FLASH_5761VENDOR_ATMEL_ADB021D:
10708 case FLASH_5761VENDOR_ATMEL_ADB041D:
10709 case FLASH_5761VENDOR_ATMEL_ADB081D:
10710 case FLASH_5761VENDOR_ATMEL_ADB161D:
10711 case FLASH_5761VENDOR_ATMEL_MDB021D:
10712 case FLASH_5761VENDOR_ATMEL_MDB041D:
10713 case FLASH_5761VENDOR_ATMEL_MDB081D:
10714 case FLASH_5761VENDOR_ATMEL_MDB161D:
10715 tp->nvram_jedecnum = JEDEC_ATMEL;
10716 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10717 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10718 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10719 tp->nvram_pagesize = 256;
10720 break;
10721 case FLASH_5761VENDOR_ST_A_M45PE20:
10722 case FLASH_5761VENDOR_ST_A_M45PE40:
10723 case FLASH_5761VENDOR_ST_A_M45PE80:
10724 case FLASH_5761VENDOR_ST_A_M45PE16:
10725 case FLASH_5761VENDOR_ST_M_M45PE20:
10726 case FLASH_5761VENDOR_ST_M_M45PE40:
10727 case FLASH_5761VENDOR_ST_M_M45PE80:
10728 case FLASH_5761VENDOR_ST_M_M45PE16:
10729 tp->nvram_jedecnum = JEDEC_ST;
10730 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10731 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10732 tp->nvram_pagesize = 256;
10733 break;
10734 }
10735
10736 if (protect) {
10737 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10738 } else {
10739 switch (nvcfg1) {
10740 case FLASH_5761VENDOR_ATMEL_ADB161D:
10741 case FLASH_5761VENDOR_ATMEL_MDB161D:
10742 case FLASH_5761VENDOR_ST_A_M45PE16:
10743 case FLASH_5761VENDOR_ST_M_M45PE16:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010744 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010745 break;
10746 case FLASH_5761VENDOR_ATMEL_ADB081D:
10747 case FLASH_5761VENDOR_ATMEL_MDB081D:
10748 case FLASH_5761VENDOR_ST_A_M45PE80:
10749 case FLASH_5761VENDOR_ST_M_M45PE80:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010750 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010751 break;
10752 case FLASH_5761VENDOR_ATMEL_ADB041D:
10753 case FLASH_5761VENDOR_ATMEL_MDB041D:
10754 case FLASH_5761VENDOR_ST_A_M45PE40:
10755 case FLASH_5761VENDOR_ST_M_M45PE40:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010756 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010757 break;
10758 case FLASH_5761VENDOR_ATMEL_ADB021D:
10759 case FLASH_5761VENDOR_ATMEL_MDB021D:
10760 case FLASH_5761VENDOR_ST_A_M45PE20:
10761 case FLASH_5761VENDOR_ST_M_M45PE20:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010762 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010763 break;
10764 }
10765 }
10766}
10767
Michael Chanb5d37722006-09-27 16:06:21 -070010768static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10769{
10770 tp->nvram_jedecnum = JEDEC_ATMEL;
10771 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10772 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10773}
10774
Linus Torvalds1da177e2005-04-16 15:20:36 -070010775/* Chips other than 5700/5701 use the NVRAM for fetching info. */
10776static void __devinit tg3_nvram_init(struct tg3 *tp)
10777{
Linus Torvalds1da177e2005-04-16 15:20:36 -070010778 tw32_f(GRC_EEPROM_ADDR,
10779 (EEPROM_ADDR_FSM_RESET |
10780 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10781 EEPROM_ADDR_CLKPERD_SHIFT)));
10782
Michael Chan9d57f012006-12-07 00:23:25 -080010783 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010784
10785 /* Enable seeprom accesses. */
10786 tw32_f(GRC_LOCAL_CTRL,
10787 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10788 udelay(100);
10789
10790 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10791 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10792 tp->tg3_flags |= TG3_FLAG_NVRAM;
10793
Michael Chanec41c7d2006-01-17 02:40:55 -080010794 if (tg3_nvram_lock(tp)) {
10795 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10796 "tg3_nvram_init failed.\n", tp->dev->name);
10797 return;
10798 }
Michael Chane6af3012005-04-21 17:12:05 -070010799 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010800
Matt Carlson989a9d22007-05-05 11:51:05 -070010801 tp->nvram_size = 0;
10802
Michael Chan361b4ac2005-04-21 17:11:21 -070010803 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10804 tg3_get_5752_nvram_info(tp);
Michael Chand3c7b882006-03-23 01:28:25 -080010805 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10806 tg3_get_5755_nvram_info(tp);
Matt Carlsond30cdd22007-10-07 23:28:35 -070010807 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010808 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10809 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan1b277772006-03-20 22:27:48 -080010810 tg3_get_5787_nvram_info(tp);
Matt Carlson6b91fa02007-10-10 18:01:09 -070010811 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10812 tg3_get_5761_nvram_info(tp);
Michael Chanb5d37722006-09-27 16:06:21 -070010813 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10814 tg3_get_5906_nvram_info(tp);
Michael Chan361b4ac2005-04-21 17:11:21 -070010815 else
10816 tg3_get_nvram_info(tp);
10817
Matt Carlson989a9d22007-05-05 11:51:05 -070010818 if (tp->nvram_size == 0)
10819 tg3_get_nvram_size(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010820
Michael Chane6af3012005-04-21 17:12:05 -070010821 tg3_disable_nvram_access(tp);
Michael Chan381291b2005-12-13 21:08:21 -080010822 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010823
10824 } else {
10825 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10826
10827 tg3_get_eeprom_size(tp);
10828 }
10829}
10830
10831static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10832 u32 offset, u32 *val)
10833{
10834 u32 tmp;
10835 int i;
10836
10837 if (offset > EEPROM_ADDR_ADDR_MASK ||
10838 (offset % 4) != 0)
10839 return -EINVAL;
10840
10841 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10842 EEPROM_ADDR_DEVID_MASK |
10843 EEPROM_ADDR_READ);
10844 tw32(GRC_EEPROM_ADDR,
10845 tmp |
10846 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10847 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10848 EEPROM_ADDR_ADDR_MASK) |
10849 EEPROM_ADDR_READ | EEPROM_ADDR_START);
10850
Michael Chan9d57f012006-12-07 00:23:25 -080010851 for (i = 0; i < 1000; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010852 tmp = tr32(GRC_EEPROM_ADDR);
10853
10854 if (tmp & EEPROM_ADDR_COMPLETE)
10855 break;
Michael Chan9d57f012006-12-07 00:23:25 -080010856 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010857 }
10858 if (!(tmp & EEPROM_ADDR_COMPLETE))
10859 return -EBUSY;
10860
10861 *val = tr32(GRC_EEPROM_DATA);
10862 return 0;
10863}
10864
10865#define NVRAM_CMD_TIMEOUT 10000
10866
10867static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10868{
10869 int i;
10870
10871 tw32(NVRAM_CMD, nvram_cmd);
10872 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10873 udelay(10);
10874 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10875 udelay(10);
10876 break;
10877 }
10878 }
10879 if (i == NVRAM_CMD_TIMEOUT) {
10880 return -EBUSY;
10881 }
10882 return 0;
10883}
10884
Michael Chan18201802006-03-20 22:29:15 -080010885static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10886{
10887 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10888 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10889 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
Matt Carlson6b91fa02007-10-10 18:01:09 -070010890 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
Michael Chan18201802006-03-20 22:29:15 -080010891 (tp->nvram_jedecnum == JEDEC_ATMEL))
10892
10893 addr = ((addr / tp->nvram_pagesize) <<
10894 ATMEL_AT45DB0X1B_PAGE_POS) +
10895 (addr % tp->nvram_pagesize);
10896
10897 return addr;
10898}
10899
Michael Chanc4e65752006-03-20 22:29:32 -080010900static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10901{
10902 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10903 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10904 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
Matt Carlson6b91fa02007-10-10 18:01:09 -070010905 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
Michael Chanc4e65752006-03-20 22:29:32 -080010906 (tp->nvram_jedecnum == JEDEC_ATMEL))
10907
10908 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10909 tp->nvram_pagesize) +
10910 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10911
10912 return addr;
10913}
10914
Linus Torvalds1da177e2005-04-16 15:20:36 -070010915static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10916{
10917 int ret;
10918
Linus Torvalds1da177e2005-04-16 15:20:36 -070010919 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10920 return tg3_nvram_read_using_eeprom(tp, offset, val);
10921
Michael Chan18201802006-03-20 22:29:15 -080010922 offset = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010923
10924 if (offset > NVRAM_ADDR_MSK)
10925 return -EINVAL;
10926
Michael Chanec41c7d2006-01-17 02:40:55 -080010927 ret = tg3_nvram_lock(tp);
10928 if (ret)
10929 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010930
Michael Chane6af3012005-04-21 17:12:05 -070010931 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010932
10933 tw32(NVRAM_ADDR, offset);
10934 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10935 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10936
10937 if (ret == 0)
10938 *val = swab32(tr32(NVRAM_RDDATA));
10939
Michael Chane6af3012005-04-21 17:12:05 -070010940 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010941
Michael Chan381291b2005-12-13 21:08:21 -080010942 tg3_nvram_unlock(tp);
10943
Linus Torvalds1da177e2005-04-16 15:20:36 -070010944 return ret;
10945}
10946
Al Virob9fc7dc2007-12-17 22:59:57 -080010947static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10948{
10949 u32 v;
10950 int res = tg3_nvram_read(tp, offset, &v);
10951 if (!res)
10952 *val = cpu_to_le32(v);
10953 return res;
10954}
10955
Michael Chan18201802006-03-20 22:29:15 -080010956static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10957{
10958 int err;
10959 u32 tmp;
10960
10961 err = tg3_nvram_read(tp, offset, &tmp);
10962 *val = swab32(tmp);
10963 return err;
10964}
10965
Linus Torvalds1da177e2005-04-16 15:20:36 -070010966static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10967 u32 offset, u32 len, u8 *buf)
10968{
10969 int i, j, rc = 0;
10970 u32 val;
10971
10972 for (i = 0; i < len; i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080010973 u32 addr;
10974 __le32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010975
10976 addr = offset + i;
10977
10978 memcpy(&data, buf + i, 4);
10979
Al Virob9fc7dc2007-12-17 22:59:57 -080010980 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070010981
10982 val = tr32(GRC_EEPROM_ADDR);
10983 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10984
10985 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10986 EEPROM_ADDR_READ);
10987 tw32(GRC_EEPROM_ADDR, val |
10988 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10989 (addr & EEPROM_ADDR_ADDR_MASK) |
10990 EEPROM_ADDR_START |
10991 EEPROM_ADDR_WRITE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010992
Michael Chan9d57f012006-12-07 00:23:25 -080010993 for (j = 0; j < 1000; j++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010994 val = tr32(GRC_EEPROM_ADDR);
10995
10996 if (val & EEPROM_ADDR_COMPLETE)
10997 break;
Michael Chan9d57f012006-12-07 00:23:25 -080010998 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010999 }
11000 if (!(val & EEPROM_ADDR_COMPLETE)) {
11001 rc = -EBUSY;
11002 break;
11003 }
11004 }
11005
11006 return rc;
11007}
11008
11009/* offset and length are dword aligned */
11010static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11011 u8 *buf)
11012{
11013 int ret = 0;
11014 u32 pagesize = tp->nvram_pagesize;
11015 u32 pagemask = pagesize - 1;
11016 u32 nvram_cmd;
11017 u8 *tmp;
11018
11019 tmp = kmalloc(pagesize, GFP_KERNEL);
11020 if (tmp == NULL)
11021 return -ENOMEM;
11022
11023 while (len) {
11024 int j;
Michael Chane6af3012005-04-21 17:12:05 -070011025 u32 phy_addr, page_off, size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011026
11027 phy_addr = offset & ~pagemask;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011028
Linus Torvalds1da177e2005-04-16 15:20:36 -070011029 for (j = 0; j < pagesize; j += 4) {
Al Viro286e3102007-12-17 23:00:31 -080011030 if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
Al Virob9fc7dc2007-12-17 22:59:57 -080011031 (__le32 *) (tmp + j))))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011032 break;
11033 }
11034 if (ret)
11035 break;
11036
11037 page_off = offset & pagemask;
11038 size = pagesize;
11039 if (len < size)
11040 size = len;
11041
11042 len -= size;
11043
11044 memcpy(tmp + page_off, buf, size);
11045
11046 offset = offset + (pagesize - page_off);
11047
Michael Chane6af3012005-04-21 17:12:05 -070011048 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011049
11050 /*
11051 * Before we can erase the flash page, we need
11052 * to issue a special "write enable" command.
11053 */
11054 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11055
11056 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11057 break;
11058
11059 /* Erase the target page */
11060 tw32(NVRAM_ADDR, phy_addr);
11061
11062 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11063 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11064
11065 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11066 break;
11067
11068 /* Issue another write enable to start the write. */
11069 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11070
11071 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11072 break;
11073
11074 for (j = 0; j < pagesize; j += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011075 __be32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011076
Al Virob9fc7dc2007-12-17 22:59:57 -080011077 data = *((__be32 *) (tmp + j));
11078 /* swab32(le32_to_cpu(data)), actually */
11079 tw32(NVRAM_WRDATA, be32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011080
11081 tw32(NVRAM_ADDR, phy_addr + j);
11082
11083 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11084 NVRAM_CMD_WR;
11085
11086 if (j == 0)
11087 nvram_cmd |= NVRAM_CMD_FIRST;
11088 else if (j == (pagesize - 4))
11089 nvram_cmd |= NVRAM_CMD_LAST;
11090
11091 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11092 break;
11093 }
11094 if (ret)
11095 break;
11096 }
11097
11098 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11099 tg3_nvram_exec_cmd(tp, nvram_cmd);
11100
11101 kfree(tmp);
11102
11103 return ret;
11104}
11105
11106/* offset and length are dword aligned */
11107static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11108 u8 *buf)
11109{
11110 int i, ret = 0;
11111
11112 for (i = 0; i < len; i += 4, offset += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011113 u32 page_off, phy_addr, nvram_cmd;
11114 __be32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011115
11116 memcpy(&data, buf + i, 4);
Al Virob9fc7dc2007-12-17 22:59:57 -080011117 tw32(NVRAM_WRDATA, be32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011118
11119 page_off = offset % tp->nvram_pagesize;
11120
Michael Chan18201802006-03-20 22:29:15 -080011121 phy_addr = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011122
11123 tw32(NVRAM_ADDR, phy_addr);
11124
11125 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11126
11127 if ((page_off == 0) || (i == 0))
11128 nvram_cmd |= NVRAM_CMD_FIRST;
Michael Chanf6d9a252006-04-29 19:00:24 -070011129 if (page_off == (tp->nvram_pagesize - 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011130 nvram_cmd |= NVRAM_CMD_LAST;
11131
11132 if (i == (len - 4))
11133 nvram_cmd |= NVRAM_CMD_LAST;
11134
Michael Chan4c987482005-09-05 17:52:38 -070011135 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
Michael Chanaf36e6b2006-03-23 01:28:06 -080011136 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
Michael Chan1b277772006-03-20 22:27:48 -080011137 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
Matt Carlsond30cdd22007-10-07 23:28:35 -070011138 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
Matt Carlson9936bcf2007-10-10 18:03:07 -070011139 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
Matt Carlson57e69832008-05-25 23:48:31 -070011140 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
Michael Chan4c987482005-09-05 17:52:38 -070011141 (tp->nvram_jedecnum == JEDEC_ST) &&
11142 (nvram_cmd & NVRAM_CMD_FIRST)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011143
11144 if ((ret = tg3_nvram_exec_cmd(tp,
11145 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11146 NVRAM_CMD_DONE)))
11147
11148 break;
11149 }
11150 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11151 /* We always do complete word writes to eeprom. */
11152 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11153 }
11154
11155 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11156 break;
11157 }
11158 return ret;
11159}
11160
11161/* offset and length are dword aligned */
11162static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11163{
11164 int ret;
11165
Linus Torvalds1da177e2005-04-16 15:20:36 -070011166 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -070011167 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11168 ~GRC_LCLCTRL_GPIO_OUTPUT1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011169 udelay(40);
11170 }
11171
11172 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11173 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11174 }
11175 else {
11176 u32 grc_mode;
11177
Michael Chanec41c7d2006-01-17 02:40:55 -080011178 ret = tg3_nvram_lock(tp);
11179 if (ret)
11180 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011181
Michael Chane6af3012005-04-21 17:12:05 -070011182 tg3_enable_nvram_access(tp);
11183 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11184 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011185 tw32(NVRAM_WRITE1, 0x406);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011186
11187 grc_mode = tr32(GRC_MODE);
11188 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11189
11190 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11191 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11192
11193 ret = tg3_nvram_write_block_buffered(tp, offset, len,
11194 buf);
11195 }
11196 else {
11197 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11198 buf);
11199 }
11200
11201 grc_mode = tr32(GRC_MODE);
11202 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11203
Michael Chane6af3012005-04-21 17:12:05 -070011204 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011205 tg3_nvram_unlock(tp);
11206 }
11207
11208 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -070011209 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011210 udelay(40);
11211 }
11212
11213 return ret;
11214}
11215
11216struct subsys_tbl_ent {
11217 u16 subsys_vendor, subsys_devid;
11218 u32 phy_id;
11219};
11220
11221static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11222 /* Broadcom boards. */
11223 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11224 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11225 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11226 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
11227 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11228 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11229 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
11230 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11231 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11232 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11233 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11234
11235 /* 3com boards. */
11236 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11237 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11238 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
11239 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11240 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11241
11242 /* DELL boards. */
11243 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11244 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11245 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11246 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11247
11248 /* Compaq boards. */
11249 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11250 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11251 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
11252 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11253 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11254
11255 /* IBM boards. */
11256 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11257};
11258
11259static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11260{
11261 int i;
11262
11263 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11264 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11265 tp->pdev->subsystem_vendor) &&
11266 (subsys_id_to_phy_id[i].subsys_devid ==
11267 tp->pdev->subsystem_device))
11268 return &subsys_id_to_phy_id[i];
11269 }
11270 return NULL;
11271}
11272
Michael Chan7d0c41e2005-04-21 17:06:20 -070011273static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011274{
Linus Torvalds1da177e2005-04-16 15:20:36 -070011275 u32 val;
Michael Chancaf636c72006-03-22 01:05:31 -080011276 u16 pmcsr;
11277
11278 /* On some early chips the SRAM cannot be accessed in D3hot state,
11279 * so need make sure we're in D0.
11280 */
11281 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11282 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11283 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11284 msleep(1);
Michael Chan7d0c41e2005-04-21 17:06:20 -070011285
11286 /* Make sure register accesses (indirect or otherwise)
11287 * will function correctly.
11288 */
11289 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11290 tp->misc_host_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011291
David S. Millerf49639e2006-06-09 11:58:36 -070011292 /* The memory arbiter has to be enabled in order for SRAM accesses
11293 * to succeed. Normally on powerup the tg3 chip firmware will make
11294 * sure it is enabled, but other entities such as system netboot
11295 * code might disable it.
11296 */
11297 val = tr32(MEMARB_MODE);
11298 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11299
Linus Torvalds1da177e2005-04-16 15:20:36 -070011300 tp->phy_id = PHY_ID_INVALID;
Michael Chan7d0c41e2005-04-21 17:06:20 -070011301 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11302
Gary Zambranoa85feb82007-05-05 11:52:19 -070011303 /* Assume an onboard device and WOL capable by default. */
11304 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
David S. Miller72b845e2006-03-14 14:11:48 -080011305
Michael Chanb5d37722006-09-27 16:06:21 -070011306 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan9d26e212006-12-07 00:21:14 -080011307 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
Michael Chanb5d37722006-09-27 16:06:21 -070011308 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011309 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11310 }
Matt Carlson0527ba32007-10-10 18:03:30 -070011311 val = tr32(VCPU_CFGSHDW);
11312 if (val & VCPU_CFGSHDW_ASPM_DBNC)
Matt Carlson8ed5d972007-05-07 00:25:49 -070011313 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
Matt Carlson0527ba32007-10-10 18:03:30 -070011314 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070011315 (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11316 device_may_wakeup(&tp->pdev->dev))
Matt Carlson0527ba32007-10-10 18:03:30 -070011317 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
Michael Chanb5d37722006-09-27 16:06:21 -070011318 return;
11319 }
11320
Linus Torvalds1da177e2005-04-16 15:20:36 -070011321 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11322 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11323 u32 nic_cfg, led_cfg;
Matt Carlsona9daf362008-05-25 23:49:44 -070011324 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
Michael Chan7d0c41e2005-04-21 17:06:20 -070011325 int eeprom_phy_serdes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011326
11327 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11328 tp->nic_sram_data_cfg = nic_cfg;
11329
11330 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11331 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11332 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11333 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11334 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11335 (ver > 0) && (ver < 0x100))
11336 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11337
Matt Carlsona9daf362008-05-25 23:49:44 -070011338 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11339 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11340
Linus Torvalds1da177e2005-04-16 15:20:36 -070011341 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11342 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11343 eeprom_phy_serdes = 1;
11344
11345 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11346 if (nic_phy_id != 0) {
11347 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11348 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11349
11350 eeprom_phy_id = (id1 >> 16) << 10;
11351 eeprom_phy_id |= (id2 & 0xfc00) << 16;
11352 eeprom_phy_id |= (id2 & 0x03ff) << 0;
11353 } else
11354 eeprom_phy_id = 0;
11355
Michael Chan7d0c41e2005-04-21 17:06:20 -070011356 tp->phy_id = eeprom_phy_id;
Michael Chan747e8f82005-07-25 12:33:22 -070011357 if (eeprom_phy_serdes) {
Michael Chana4e2b342005-10-26 15:46:52 -070011358 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan747e8f82005-07-25 12:33:22 -070011359 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11360 else
11361 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11362 }
Michael Chan7d0c41e2005-04-21 17:06:20 -070011363
John W. Linvillecbf46852005-04-21 17:01:29 -070011364 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011365 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11366 SHASTA_EXT_LED_MODE_MASK);
John W. Linvillecbf46852005-04-21 17:01:29 -070011367 else
Linus Torvalds1da177e2005-04-16 15:20:36 -070011368 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11369
11370 switch (led_cfg) {
11371 default:
11372 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11373 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11374 break;
11375
11376 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11377 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11378 break;
11379
11380 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11381 tp->led_ctrl = LED_CTRL_MODE_MAC;
Michael Chan9ba27792005-06-06 15:16:20 -070011382
11383 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11384 * read on some older 5700/5701 bootcode.
11385 */
11386 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11387 ASIC_REV_5700 ||
11388 GET_ASIC_REV(tp->pci_chip_rev_id) ==
11389 ASIC_REV_5701)
11390 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11391
Linus Torvalds1da177e2005-04-16 15:20:36 -070011392 break;
11393
11394 case SHASTA_EXT_LED_SHARED:
11395 tp->led_ctrl = LED_CTRL_MODE_SHARED;
11396 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11397 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11398 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11399 LED_CTRL_MODE_PHY_2);
11400 break;
11401
11402 case SHASTA_EXT_LED_MAC:
11403 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11404 break;
11405
11406 case SHASTA_EXT_LED_COMBO:
11407 tp->led_ctrl = LED_CTRL_MODE_COMBO;
11408 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11409 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11410 LED_CTRL_MODE_PHY_2);
11411 break;
11412
Stephen Hemminger855e1112008-04-16 16:37:28 -070011413 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011414
11415 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11416 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11417 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11418 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11419
Matt Carlsonb2a5c192008-04-03 21:44:44 -070011420 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11421 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
Matt Carlson5f608912007-11-12 21:17:07 -080011422
Michael Chan9d26e212006-12-07 00:21:14 -080011423 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011424 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011425 if ((tp->pdev->subsystem_vendor ==
11426 PCI_VENDOR_ID_ARIMA) &&
11427 (tp->pdev->subsystem_device == 0x205a ||
11428 tp->pdev->subsystem_device == 0x2063))
11429 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11430 } else {
David S. Millerf49639e2006-06-09 11:58:36 -070011431 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011432 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11433 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011434
11435 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11436 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
John W. Linvillecbf46852005-04-21 17:01:29 -070011437 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011438 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11439 }
Matt Carlsonb2b98d42008-11-03 16:52:32 -080011440
11441 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11442 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
Matt Carlson0d3031d2007-10-10 18:02:43 -070011443 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
Matt Carlsonb2b98d42008-11-03 16:52:32 -080011444
Gary Zambranoa85feb82007-05-05 11:52:19 -070011445 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11446 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11447 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011448
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070011449 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11450 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE) &&
11451 device_may_wakeup(&tp->pdev->dev))
Matt Carlson0527ba32007-10-10 18:03:30 -070011452 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11453
Linus Torvalds1da177e2005-04-16 15:20:36 -070011454 if (cfg2 & (1 << 17))
11455 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11456
11457 /* serdes signal pre-emphasis in register 0x590 set by */
11458 /* bootcode if bit 18 is set */
11459 if (cfg2 & (1 << 18))
11460 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
Matt Carlson8ed5d972007-05-07 00:25:49 -070011461
11462 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11463 u32 cfg3;
11464
11465 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11466 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11467 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11468 }
Matt Carlsona9daf362008-05-25 23:49:44 -070011469
11470 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11471 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11472 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11473 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11474 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11475 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011476 }
Michael Chan7d0c41e2005-04-21 17:06:20 -070011477}
11478
Matt Carlsonb2a5c192008-04-03 21:44:44 -070011479static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11480{
11481 int i;
11482 u32 val;
11483
11484 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11485 tw32(OTP_CTRL, cmd);
11486
11487 /* Wait for up to 1 ms for command to execute. */
11488 for (i = 0; i < 100; i++) {
11489 val = tr32(OTP_STATUS);
11490 if (val & OTP_STATUS_CMD_DONE)
11491 break;
11492 udelay(10);
11493 }
11494
11495 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11496}
11497
11498/* Read the gphy configuration from the OTP region of the chip. The gphy
11499 * configuration is a 32-bit value that straddles the alignment boundary.
11500 * We do two 32-bit reads and then shift and merge the results.
11501 */
11502static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11503{
11504 u32 bhalf_otp, thalf_otp;
11505
11506 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11507
11508 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11509 return 0;
11510
11511 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11512
11513 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11514 return 0;
11515
11516 thalf_otp = tr32(OTP_READ_DATA);
11517
11518 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11519
11520 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11521 return 0;
11522
11523 bhalf_otp = tr32(OTP_READ_DATA);
11524
11525 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11526}
11527
Michael Chan7d0c41e2005-04-21 17:06:20 -070011528static int __devinit tg3_phy_probe(struct tg3 *tp)
11529{
11530 u32 hw_phy_id_1, hw_phy_id_2;
11531 u32 hw_phy_id, hw_phy_id_masked;
11532 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011533
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011534 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11535 return tg3_phy_init(tp);
11536
Linus Torvalds1da177e2005-04-16 15:20:36 -070011537 /* Reading the PHY ID register can conflict with ASF
11538 * firwmare access to the PHY hardware.
11539 */
11540 err = 0;
Matt Carlson0d3031d2007-10-10 18:02:43 -070011541 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11542 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011543 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11544 } else {
11545 /* Now read the physical PHY_ID from the chip and verify
11546 * that it is sane. If it doesn't look good, we fall back
11547 * to either the hard-coded table based PHY_ID and failing
11548 * that the value found in the eeprom area.
11549 */
11550 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11551 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11552
11553 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
11554 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11555 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
11556
11557 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11558 }
11559
11560 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11561 tp->phy_id = hw_phy_id;
11562 if (hw_phy_id_masked == PHY_ID_BCM8002)
11563 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
Michael Chanda6b2d02005-08-19 12:54:29 -070011564 else
11565 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011566 } else {
Michael Chan7d0c41e2005-04-21 17:06:20 -070011567 if (tp->phy_id != PHY_ID_INVALID) {
11568 /* Do nothing, phy ID already set up in
11569 * tg3_get_eeprom_hw_cfg().
11570 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070011571 } else {
11572 struct subsys_tbl_ent *p;
11573
11574 /* No eeprom signature? Try the hardcoded
11575 * subsys device table.
11576 */
11577 p = lookup_by_subsys(tp);
11578 if (!p)
11579 return -ENODEV;
11580
11581 tp->phy_id = p->phy_id;
11582 if (!tp->phy_id ||
11583 tp->phy_id == PHY_ID_BCM8002)
11584 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11585 }
11586 }
11587
Michael Chan747e8f82005-07-25 12:33:22 -070011588 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
Matt Carlson0d3031d2007-10-10 18:02:43 -070011589 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070011590 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chan3600d912006-12-07 00:21:48 -080011591 u32 bmsr, adv_reg, tg3_ctrl, mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011592
11593 tg3_readphy(tp, MII_BMSR, &bmsr);
11594 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11595 (bmsr & BMSR_LSTATUS))
11596 goto skip_phy_reset;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011597
Linus Torvalds1da177e2005-04-16 15:20:36 -070011598 err = tg3_phy_reset(tp);
11599 if (err)
11600 return err;
11601
11602 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11603 ADVERTISE_100HALF | ADVERTISE_100FULL |
11604 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11605 tg3_ctrl = 0;
11606 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11607 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11608 MII_TG3_CTRL_ADV_1000_FULL);
11609 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11610 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11611 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11612 MII_TG3_CTRL_ENABLE_AS_MASTER);
11613 }
11614
Michael Chan3600d912006-12-07 00:21:48 -080011615 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11616 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11617 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11618 if (!tg3_copper_is_advertising_all(tp, mask)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011619 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11620
11621 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11622 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11623
11624 tg3_writephy(tp, MII_BMCR,
11625 BMCR_ANENABLE | BMCR_ANRESTART);
11626 }
11627 tg3_phy_set_wirespeed(tp);
11628
11629 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11630 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11631 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11632 }
11633
11634skip_phy_reset:
11635 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11636 err = tg3_init_5401phy_dsp(tp);
11637 if (err)
11638 return err;
11639 }
11640
11641 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11642 err = tg3_init_5401phy_dsp(tp);
11643 }
11644
Michael Chan747e8f82005-07-25 12:33:22 -070011645 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011646 tp->link_config.advertising =
11647 (ADVERTISED_1000baseT_Half |
11648 ADVERTISED_1000baseT_Full |
11649 ADVERTISED_Autoneg |
11650 ADVERTISED_FIBRE);
11651 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11652 tp->link_config.advertising &=
11653 ~(ADVERTISED_1000baseT_Half |
11654 ADVERTISED_1000baseT_Full);
11655
11656 return err;
11657}
11658
11659static void __devinit tg3_read_partno(struct tg3 *tp)
11660{
11661 unsigned char vpd_data[256];
Michael Chanaf2c6a42006-11-07 14:57:51 -080011662 unsigned int i;
Michael Chan1b277772006-03-20 22:27:48 -080011663 u32 magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011664
Michael Chan18201802006-03-20 22:29:15 -080011665 if (tg3_nvram_read_swab(tp, 0x0, &magic))
David S. Millerf49639e2006-06-09 11:58:36 -070011666 goto out_not_found;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011667
Michael Chan18201802006-03-20 22:29:15 -080011668 if (magic == TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -080011669 for (i = 0; i < 256; i += 4) {
11670 u32 tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011671
Michael Chan1b277772006-03-20 22:27:48 -080011672 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11673 goto out_not_found;
11674
11675 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
11676 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
11677 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11678 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11679 }
11680 } else {
11681 int vpd_cap;
11682
11683 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11684 for (i = 0; i < 256; i += 4) {
11685 u32 tmp, j = 0;
Al Virob9fc7dc2007-12-17 22:59:57 -080011686 __le32 v;
Michael Chan1b277772006-03-20 22:27:48 -080011687 u16 tmp16;
11688
11689 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11690 i);
11691 while (j++ < 100) {
11692 pci_read_config_word(tp->pdev, vpd_cap +
11693 PCI_VPD_ADDR, &tmp16);
11694 if (tmp16 & 0x8000)
11695 break;
11696 msleep(1);
11697 }
David S. Millerf49639e2006-06-09 11:58:36 -070011698 if (!(tmp16 & 0x8000))
11699 goto out_not_found;
11700
Michael Chan1b277772006-03-20 22:27:48 -080011701 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11702 &tmp);
Al Virob9fc7dc2007-12-17 22:59:57 -080011703 v = cpu_to_le32(tmp);
11704 memcpy(&vpd_data[i], &v, 4);
Michael Chan1b277772006-03-20 22:27:48 -080011705 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011706 }
11707
11708 /* Now parse and find the part number. */
Michael Chanaf2c6a42006-11-07 14:57:51 -080011709 for (i = 0; i < 254; ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011710 unsigned char val = vpd_data[i];
Michael Chanaf2c6a42006-11-07 14:57:51 -080011711 unsigned int block_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011712
11713 if (val == 0x82 || val == 0x91) {
11714 i = (i + 3 +
11715 (vpd_data[i + 1] +
11716 (vpd_data[i + 2] << 8)));
11717 continue;
11718 }
11719
11720 if (val != 0x90)
11721 goto out_not_found;
11722
11723 block_end = (i + 3 +
11724 (vpd_data[i + 1] +
11725 (vpd_data[i + 2] << 8)));
11726 i += 3;
Michael Chanaf2c6a42006-11-07 14:57:51 -080011727
11728 if (block_end > 256)
11729 goto out_not_found;
11730
11731 while (i < (block_end - 2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011732 if (vpd_data[i + 0] == 'P' &&
11733 vpd_data[i + 1] == 'N') {
11734 int partno_len = vpd_data[i + 2];
11735
Michael Chanaf2c6a42006-11-07 14:57:51 -080011736 i += 3;
11737 if (partno_len > 24 || (partno_len + i) > 256)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011738 goto out_not_found;
11739
11740 memcpy(tp->board_part_number,
Michael Chanaf2c6a42006-11-07 14:57:51 -080011741 &vpd_data[i], partno_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011742
11743 /* Success. */
11744 return;
11745 }
Michael Chanaf2c6a42006-11-07 14:57:51 -080011746 i += 3 + vpd_data[i + 2];
Linus Torvalds1da177e2005-04-16 15:20:36 -070011747 }
11748
11749 /* Part number not found. */
11750 goto out_not_found;
11751 }
11752
11753out_not_found:
Michael Chanb5d37722006-09-27 16:06:21 -070011754 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11755 strcpy(tp->board_part_number, "BCM95906");
11756 else
11757 strcpy(tp->board_part_number, "none");
Linus Torvalds1da177e2005-04-16 15:20:36 -070011758}
11759
Matt Carlson9c8a6202007-10-21 16:16:08 -070011760static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11761{
11762 u32 val;
11763
11764 if (tg3_nvram_read_swab(tp, offset, &val) ||
11765 (val & 0xfc000000) != 0x0c000000 ||
11766 tg3_nvram_read_swab(tp, offset + 4, &val) ||
11767 val != 0)
11768 return 0;
11769
11770 return 1;
11771}
11772
Michael Chanc4e65752006-03-20 22:29:32 -080011773static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11774{
11775 u32 val, offset, start;
Matt Carlson9c8a6202007-10-21 16:16:08 -070011776 u32 ver_offset;
11777 int i, bcnt;
Michael Chanc4e65752006-03-20 22:29:32 -080011778
11779 if (tg3_nvram_read_swab(tp, 0, &val))
11780 return;
11781
11782 if (val != TG3_EEPROM_MAGIC)
11783 return;
11784
11785 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11786 tg3_nvram_read_swab(tp, 0x4, &start))
11787 return;
11788
11789 offset = tg3_nvram_logical_addr(tp, offset);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011790
11791 if (!tg3_fw_img_is_valid(tp, offset) ||
11792 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
Michael Chanc4e65752006-03-20 22:29:32 -080011793 return;
11794
Matt Carlson9c8a6202007-10-21 16:16:08 -070011795 offset = offset + ver_offset - start;
11796 for (i = 0; i < 16; i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011797 __le32 v;
11798 if (tg3_nvram_read_le(tp, offset + i, &v))
Michael Chanc4e65752006-03-20 22:29:32 -080011799 return;
11800
Al Virob9fc7dc2007-12-17 22:59:57 -080011801 memcpy(tp->fw_ver + i, &v, 4);
Michael Chanc4e65752006-03-20 22:29:32 -080011802 }
Matt Carlson9c8a6202007-10-21 16:16:08 -070011803
11804 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
Matt Carlson84af67f2007-11-12 21:08:59 -080011805 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
Matt Carlson9c8a6202007-10-21 16:16:08 -070011806 return;
11807
11808 for (offset = TG3_NVM_DIR_START;
11809 offset < TG3_NVM_DIR_END;
11810 offset += TG3_NVM_DIRENT_SIZE) {
11811 if (tg3_nvram_read_swab(tp, offset, &val))
11812 return;
11813
11814 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11815 break;
11816 }
11817
11818 if (offset == TG3_NVM_DIR_END)
11819 return;
11820
11821 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11822 start = 0x08000000;
11823 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11824 return;
11825
11826 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11827 !tg3_fw_img_is_valid(tp, offset) ||
11828 tg3_nvram_read_swab(tp, offset + 8, &val))
11829 return;
11830
11831 offset += val - start;
11832
11833 bcnt = strlen(tp->fw_ver);
11834
11835 tp->fw_ver[bcnt++] = ',';
11836 tp->fw_ver[bcnt++] = ' ';
11837
11838 for (i = 0; i < 4; i++) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011839 __le32 v;
11840 if (tg3_nvram_read_le(tp, offset, &v))
Matt Carlson9c8a6202007-10-21 16:16:08 -070011841 return;
11842
Al Virob9fc7dc2007-12-17 22:59:57 -080011843 offset += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011844
Al Virob9fc7dc2007-12-17 22:59:57 -080011845 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11846 memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011847 break;
11848 }
11849
Al Virob9fc7dc2007-12-17 22:59:57 -080011850 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11851 bcnt += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011852 }
11853
11854 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
Michael Chanc4e65752006-03-20 22:29:32 -080011855}
11856
Michael Chan7544b092007-05-05 13:08:32 -070011857static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11858
Linus Torvalds1da177e2005-04-16 15:20:36 -070011859static int __devinit tg3_get_invariants(struct tg3 *tp)
11860{
11861 static struct pci_device_id write_reorder_chipsets[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011862 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11863 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
John W. Linvillec165b002006-07-08 13:28:53 -070011864 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11865 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
Michael Chan399de502005-10-03 14:02:39 -070011866 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11867 PCI_DEVICE_ID_VIA_8385_0) },
Linus Torvalds1da177e2005-04-16 15:20:36 -070011868 { },
11869 };
11870 u32 misc_ctrl_reg;
11871 u32 cacheline_sz_reg;
11872 u32 pci_state_reg, grc_misc_cfg;
11873 u32 val;
11874 u16 pci_cmd;
Michael Chanc7835a72006-11-15 21:14:42 -080011875 int err, pcie_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011876
Linus Torvalds1da177e2005-04-16 15:20:36 -070011877 /* Force memory write invalidate off. If we leave it on,
11878 * then on 5700_BX chips we have to enable a workaround.
11879 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11880 * to match the cacheline size. The Broadcom driver have this
11881 * workaround but turns MWI off all the times so never uses
11882 * it. This seems to suggest that the workaround is insufficient.
11883 */
11884 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11885 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11886 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11887
11888 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11889 * has the register indirect write enable bit set before
11890 * we try to access any of the MMIO registers. It is also
11891 * critical that the PCI-X hw workaround situation is decided
11892 * before that as well.
11893 */
11894 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11895 &misc_ctrl_reg);
11896
11897 tp->pci_chip_rev_id = (misc_ctrl_reg >>
11898 MISC_HOST_CTRL_CHIPREV_SHIFT);
Matt Carlson795d01c2007-10-07 23:28:17 -070011899 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11900 u32 prod_id_asic_rev;
11901
11902 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11903 &prod_id_asic_rev);
11904 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11905 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011906
Michael Chanff645be2005-04-21 17:09:53 -070011907 /* Wrong chip ID in 5752 A0. This code can be removed later
11908 * as A0 is not in production.
11909 */
11910 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11911 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11912
Michael Chan68929142005-08-09 20:17:14 -070011913 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11914 * we need to disable memory and use config. cycles
11915 * only to access all registers. The 5702/03 chips
11916 * can mistakenly decode the special cycles from the
11917 * ICH chipsets as memory write cycles, causing corruption
11918 * of register and memory space. Only certain ICH bridges
11919 * will drive special cycles with non-zero data during the
11920 * address phase which can fall within the 5703's address
11921 * range. This is not an ICH bug as the PCI spec allows
11922 * non-zero address during special cycles. However, only
11923 * these ICH bridges are known to drive non-zero addresses
11924 * during special cycles.
11925 *
11926 * Since special cycles do not cross PCI bridges, we only
11927 * enable this workaround if the 5703 is on the secondary
11928 * bus of these ICH bridges.
11929 */
11930 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11931 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11932 static struct tg3_dev_id {
11933 u32 vendor;
11934 u32 device;
11935 u32 rev;
11936 } ich_chipsets[] = {
11937 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11938 PCI_ANY_ID },
11939 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11940 PCI_ANY_ID },
11941 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11942 0xa },
11943 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11944 PCI_ANY_ID },
11945 { },
11946 };
11947 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11948 struct pci_dev *bridge = NULL;
11949
11950 while (pci_id->vendor != 0) {
11951 bridge = pci_get_device(pci_id->vendor, pci_id->device,
11952 bridge);
11953 if (!bridge) {
11954 pci_id++;
11955 continue;
11956 }
11957 if (pci_id->rev != PCI_ANY_ID) {
Auke Kok44c10132007-06-08 15:46:36 -070011958 if (bridge->revision > pci_id->rev)
Michael Chan68929142005-08-09 20:17:14 -070011959 continue;
11960 }
11961 if (bridge->subordinate &&
11962 (bridge->subordinate->number ==
11963 tp->pdev->bus->number)) {
11964
11965 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11966 pci_dev_put(bridge);
11967 break;
11968 }
11969 }
11970 }
11971
Matt Carlson41588ba2008-04-19 18:12:33 -070011972 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11973 static struct tg3_dev_id {
11974 u32 vendor;
11975 u32 device;
11976 } bridge_chipsets[] = {
11977 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11978 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11979 { },
11980 };
11981 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11982 struct pci_dev *bridge = NULL;
11983
11984 while (pci_id->vendor != 0) {
11985 bridge = pci_get_device(pci_id->vendor,
11986 pci_id->device,
11987 bridge);
11988 if (!bridge) {
11989 pci_id++;
11990 continue;
11991 }
11992 if (bridge->subordinate &&
11993 (bridge->subordinate->number <=
11994 tp->pdev->bus->number) &&
11995 (bridge->subordinate->subordinate >=
11996 tp->pdev->bus->number)) {
11997 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11998 pci_dev_put(bridge);
11999 break;
12000 }
12001 }
12002 }
12003
Michael Chan4a29cc22006-03-19 13:21:12 -080012004 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12005 * DMA addresses > 40-bit. This bridge may have other additional
12006 * 57xx devices behind it in some 4-port NIC designs for example.
12007 * Any tg3 device found behind the bridge will also need the 40-bit
12008 * DMA workaround.
12009 */
Michael Chana4e2b342005-10-26 15:46:52 -070012010 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12011 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12012 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
Michael Chan4a29cc22006-03-19 13:21:12 -080012013 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
Michael Chan4cf78e42005-07-25 12:29:19 -070012014 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
Michael Chana4e2b342005-10-26 15:46:52 -070012015 }
Michael Chan4a29cc22006-03-19 13:21:12 -080012016 else {
12017 struct pci_dev *bridge = NULL;
12018
12019 do {
12020 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12021 PCI_DEVICE_ID_SERVERWORKS_EPB,
12022 bridge);
12023 if (bridge && bridge->subordinate &&
12024 (bridge->subordinate->number <=
12025 tp->pdev->bus->number) &&
12026 (bridge->subordinate->subordinate >=
12027 tp->pdev->bus->number)) {
12028 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12029 pci_dev_put(bridge);
12030 break;
12031 }
12032 } while (bridge);
12033 }
Michael Chan4cf78e42005-07-25 12:29:19 -070012034
Linus Torvalds1da177e2005-04-16 15:20:36 -070012035 /* Initialize misc host control in PCI block. */
12036 tp->misc_host_ctrl |= (misc_ctrl_reg &
12037 MISC_HOST_CTRL_CHIPREV);
12038 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12039 tp->misc_host_ctrl);
12040
12041 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12042 &cacheline_sz_reg);
12043
12044 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
12045 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
12046 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
12047 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
12048
Michael Chan7544b092007-05-05 13:08:32 -070012049 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12050 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12051 tp->pdev_peer = tg3_find_peer(tp);
12052
John W. Linville2052da92005-04-21 16:56:08 -070012053 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
Michael Chan4cf78e42005-07-25 12:29:19 -070012054 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -080012055 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chand9ab5ad2006-03-20 22:27:35 -080012056 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012057 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012058 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012059 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012060 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
Michael Chana4e2b342005-10-26 15:46:52 -070012061 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
John W. Linville6708e5c2005-04-21 17:00:52 -070012062 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12063
John W. Linville1b440c562005-04-21 17:03:18 -070012064 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12065 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12066 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12067
Michael Chan5a6f3072006-03-20 22:28:05 -080012068 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Michael Chan7544b092007-05-05 13:08:32 -070012069 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12070 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12071 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12072 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12073 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12074 tp->pdev_peer == tp->pdev))
12075 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12076
Michael Chanaf36e6b2006-03-23 01:28:06 -080012077 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012078 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012079 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012080 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012081 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012082 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan5a6f3072006-03-20 22:28:05 -080012083 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
Michael Chanfcfa0a32006-03-20 22:28:41 -080012084 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
Michael Chan52c0fd82006-06-29 20:15:54 -070012085 } else {
Michael Chan7f62ad52007-02-20 23:25:40 -080012086 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
Michael Chan52c0fd82006-06-29 20:15:54 -070012087 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12088 ASIC_REV_5750 &&
12089 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
Michael Chan7f62ad52007-02-20 23:25:40 -080012090 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
Michael Chan52c0fd82006-06-29 20:15:54 -070012091 }
Michael Chan5a6f3072006-03-20 22:28:05 -080012092 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012093
Matt Carlsonf51f3562008-05-25 23:45:08 -070012094 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12095 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan0f893dc2005-07-25 12:30:38 -070012096 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12097
Michael Chanc7835a72006-11-15 21:14:42 -080012098 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12099 if (pcie_cap != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012100 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
Matt Carlson5f5c51e2007-11-12 21:19:37 -080012101
12102 pcie_set_readrq(tp->pdev, 4096);
12103
Michael Chanc7835a72006-11-15 21:14:42 -080012104 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12105 u16 lnkctl;
12106
12107 pci_read_config_word(tp->pdev,
12108 pcie_cap + PCI_EXP_LNKCTL,
12109 &lnkctl);
12110 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
12111 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12112 }
12113 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012114
Michael Chan399de502005-10-03 14:02:39 -070012115 /* If we have an AMD 762 or VIA K8T800 chipset, write
12116 * reordering to the mailbox registers done by the host
12117 * controller can cause major troubles. We read back from
12118 * every mailbox register write to force the writes to be
12119 * posted to the chip in order.
12120 */
12121 if (pci_dev_present(write_reorder_chipsets) &&
12122 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12123 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12124
Linus Torvalds1da177e2005-04-16 15:20:36 -070012125 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12126 tp->pci_lat_timer < 64) {
12127 tp->pci_lat_timer = 64;
12128
12129 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
12130 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
12131 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
12132 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
12133
12134 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12135 cacheline_sz_reg);
12136 }
12137
Matt Carlson9974a352007-10-07 23:27:28 -070012138 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12139 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12140 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12141 if (!tp->pcix_cap) {
12142 printk(KERN_ERR PFX "Cannot find PCI-X "
12143 "capability, aborting.\n");
12144 return -EIO;
12145 }
12146 }
12147
Linus Torvalds1da177e2005-04-16 15:20:36 -070012148 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12149 &pci_state_reg);
12150
Matt Carlson9974a352007-10-07 23:27:28 -070012151 if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012152 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12153
12154 /* If this is a 5700 BX chipset, and we are in PCI-X
12155 * mode, enable register write workaround.
12156 *
12157 * The workaround is to use indirect register accesses
12158 * for all chip writes not to mailbox registers.
12159 */
12160 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12161 u32 pm_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012162
12163 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12164
12165 /* The chip can have it's power management PCI config
12166 * space registers clobbered due to this bug.
12167 * So explicitly force the chip into D0 here.
12168 */
Matt Carlson9974a352007-10-07 23:27:28 -070012169 pci_read_config_dword(tp->pdev,
12170 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012171 &pm_reg);
12172 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12173 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
Matt Carlson9974a352007-10-07 23:27:28 -070012174 pci_write_config_dword(tp->pdev,
12175 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012176 pm_reg);
12177
12178 /* Also, force SERR#/PERR# in PCI command. */
12179 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12180 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12181 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12182 }
12183 }
12184
Michael Chan087fe252005-08-09 20:17:41 -070012185 /* 5700 BX chips need to have their TX producer index mailboxes
12186 * written twice to workaround a bug.
12187 */
12188 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
12189 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12190
Linus Torvalds1da177e2005-04-16 15:20:36 -070012191 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12192 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12193 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12194 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12195
12196 /* Chip-specific fixup from Broadcom driver */
12197 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12198 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12199 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12200 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12201 }
12202
Michael Chan1ee582d2005-08-09 20:16:46 -070012203 /* Default fast path register access methods */
Michael Chan20094932005-08-09 20:16:32 -070012204 tp->read32 = tg3_read32;
Michael Chan1ee582d2005-08-09 20:16:46 -070012205 tp->write32 = tg3_write32;
Michael Chan09ee9292005-08-09 20:17:00 -070012206 tp->read32_mbox = tg3_read32;
Michael Chan20094932005-08-09 20:16:32 -070012207 tp->write32_mbox = tg3_write32;
Michael Chan1ee582d2005-08-09 20:16:46 -070012208 tp->write32_tx_mbox = tg3_write32;
12209 tp->write32_rx_mbox = tg3_write32;
12210
12211 /* Various workaround register access methods */
12212 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12213 tp->write32 = tg3_write_indirect_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070012214 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12215 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12216 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12217 /*
12218 * Back to back register writes can cause problems on these
12219 * chips, the workaround is to read back all reg writes
12220 * except those to mailbox regs.
12221 *
12222 * See tg3_write_indirect_reg32().
12223 */
Michael Chan1ee582d2005-08-09 20:16:46 -070012224 tp->write32 = tg3_write_flush_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070012225 }
12226
Michael Chan1ee582d2005-08-09 20:16:46 -070012227
12228 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12229 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12230 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12231 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12232 tp->write32_rx_mbox = tg3_write_flush_reg32;
12233 }
Michael Chan20094932005-08-09 20:16:32 -070012234
Michael Chan68929142005-08-09 20:17:14 -070012235 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12236 tp->read32 = tg3_read_indirect_reg32;
12237 tp->write32 = tg3_write_indirect_reg32;
12238 tp->read32_mbox = tg3_read_indirect_mbox;
12239 tp->write32_mbox = tg3_write_indirect_mbox;
12240 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12241 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12242
12243 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070012244 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070012245
12246 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12247 pci_cmd &= ~PCI_COMMAND_MEMORY;
12248 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12249 }
Michael Chanb5d37722006-09-27 16:06:21 -070012250 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12251 tp->read32_mbox = tg3_read32_mbox_5906;
12252 tp->write32_mbox = tg3_write32_mbox_5906;
12253 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12254 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12255 }
Michael Chan68929142005-08-09 20:17:14 -070012256
Michael Chanbbadf502006-04-06 21:46:34 -070012257 if (tp->write32 == tg3_write_indirect_reg32 ||
12258 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12259 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
David S. Millerf49639e2006-06-09 11:58:36 -070012260 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
Michael Chanbbadf502006-04-06 21:46:34 -070012261 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12262
Michael Chan7d0c41e2005-04-21 17:06:20 -070012263 /* Get eeprom hw config before calling tg3_set_power_state().
Michael Chan9d26e212006-12-07 00:21:14 -080012264 * In particular, the TG3_FLG2_IS_NIC flag must be
Michael Chan7d0c41e2005-04-21 17:06:20 -070012265 * determined before calling tg3_set_power_state() so that
12266 * we know whether or not to switch out of Vaux power.
12267 * When the flag is set, it means that GPIO1 is used for eeprom
12268 * write protect and also implies that it is a LOM where GPIOs
12269 * are not used to switch power.
Jeff Garzik6aa20a22006-09-13 13:24:59 -040012270 */
Michael Chan7d0c41e2005-04-21 17:06:20 -070012271 tg3_get_eeprom_hw_cfg(tp);
12272
Matt Carlson0d3031d2007-10-10 18:02:43 -070012273 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12274 /* Allow reads and writes to the
12275 * APE register and memory space.
12276 */
12277 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12278 PCISTATE_ALLOW_APE_SHMEM_WR;
12279 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12280 pci_state_reg);
12281 }
12282
Matt Carlson9936bcf2007-10-10 18:03:07 -070012283 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012284 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlsonbcb37f62008-11-03 16:52:09 -080012285 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsond30cdd22007-10-07 23:28:35 -070012286 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12287
Michael Chan314fba32005-04-21 17:07:04 -070012288 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12289 * GPIO1 driven high will bring 5700's external PHY out of reset.
12290 * It is also used as eeprom write protect on LOMs.
12291 */
12292 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12293 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12294 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12295 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12296 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan3e7d83b2005-04-21 17:10:36 -070012297 /* Unused GPIO3 must be driven as output on 5752 because there
12298 * are no pull-up resistors on unused GPIO pins.
12299 */
12300 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12301 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chan314fba32005-04-21 17:07:04 -070012302
Michael Chanaf36e6b2006-03-23 01:28:06 -080012303 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12304 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12305
Matt Carlson5f0c4a32008-06-09 15:41:12 -070012306 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12307 /* Turn off the debug UART. */
12308 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12309 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12310 /* Keep VMain power. */
12311 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12312 GRC_LCLCTRL_GPIO_OUTPUT0;
12313 }
12314
Linus Torvalds1da177e2005-04-16 15:20:36 -070012315 /* Force the chip into D0. */
Michael Chanbc1c7562006-03-20 17:48:03 -080012316 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012317 if (err) {
12318 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12319 pci_name(tp->pdev));
12320 return err;
12321 }
12322
12323 /* 5700 B0 chips do not support checksumming correctly due
12324 * to hardware bugs.
12325 */
12326 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12327 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12328
Linus Torvalds1da177e2005-04-16 15:20:36 -070012329 /* Derive initial jumbo mode from MTU assigned in
12330 * ether_setup() via the alloc_etherdev() call
12331 */
Michael Chan0f893dc2005-07-25 12:30:38 -070012332 if (tp->dev->mtu > ETH_DATA_LEN &&
Michael Chana4e2b342005-10-26 15:46:52 -070012333 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan0f893dc2005-07-25 12:30:38 -070012334 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012335
12336 /* Determine WakeOnLan speed to use. */
12337 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12338 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12339 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12340 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12341 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12342 } else {
12343 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12344 }
12345
12346 /* A few boards don't want Ethernet@WireSpeed phy feature */
12347 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12348 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12349 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
Michael Chan747e8f82005-07-25 12:33:22 -070012350 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
Michael Chanb5d37722006-09-27 16:06:21 -070012351 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
Michael Chan747e8f82005-07-25 12:33:22 -070012352 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
Linus Torvalds1da177e2005-04-16 15:20:36 -070012353 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12354
12355 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12356 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12357 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12358 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12359 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12360
Michael Chanc424cb22006-04-29 18:56:34 -070012361 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12362 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012363 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012364 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12365 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
Michael Chand4011ad2007-02-13 12:17:25 -080012366 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12367 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12368 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
Michael Chanc1d2a192007-01-08 19:57:20 -080012369 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12370 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
Matt Carlson57e69832008-05-25 23:48:31 -070012371 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12372 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
Michael Chanc424cb22006-04-29 18:56:34 -070012373 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12374 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012375
Matt Carlsonb2a5c192008-04-03 21:44:44 -070012376 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12377 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12378 tp->phy_otp = tg3_read_otp_phycfg(tp);
12379 if (tp->phy_otp == 0)
12380 tp->phy_otp = TG3_OTP_DEFAULT;
12381 }
12382
Matt Carlsonf51f3562008-05-25 23:45:08 -070012383 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
Matt Carlson8ef21422008-05-02 16:47:53 -070012384 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12385 else
12386 tp->mi_mode = MAC_MI_MODE_BASE;
12387
Linus Torvalds1da177e2005-04-16 15:20:36 -070012388 tp->coalesce_mode = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012389 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12390 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12391 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12392
Matt Carlson57e69832008-05-25 23:48:31 -070012393 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12394 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12395
Matt Carlson158d7ab2008-05-29 01:37:54 -070012396 err = tg3_mdio_init(tp);
12397 if (err)
12398 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012399
12400 /* Initialize data/descriptor byte/word swapping. */
12401 val = tr32(GRC_MODE);
12402 val &= GRC_MODE_HOST_STACKUP;
12403 tw32(GRC_MODE, val | tp->grc_mode);
12404
12405 tg3_switch_clocks(tp);
12406
12407 /* Clear this out for sanity. */
12408 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12409
12410 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12411 &pci_state_reg);
12412 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12413 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12414 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12415
12416 if (chiprevid == CHIPREV_ID_5701_A0 ||
12417 chiprevid == CHIPREV_ID_5701_B0 ||
12418 chiprevid == CHIPREV_ID_5701_B2 ||
12419 chiprevid == CHIPREV_ID_5701_B5) {
12420 void __iomem *sram_base;
12421
12422 /* Write some dummy words into the SRAM status block
12423 * area, see if it reads back correctly. If the return
12424 * value is bad, force enable the PCIX workaround.
12425 */
12426 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12427
12428 writel(0x00000000, sram_base);
12429 writel(0x00000000, sram_base + 4);
12430 writel(0xffffffff, sram_base + 4);
12431 if (readl(sram_base) != 0x00000000)
12432 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12433 }
12434 }
12435
12436 udelay(50);
12437 tg3_nvram_init(tp);
12438
12439 grc_misc_cfg = tr32(GRC_MISC_CFG);
12440 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12441
Linus Torvalds1da177e2005-04-16 15:20:36 -070012442 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12443 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12444 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12445 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12446
David S. Millerfac9b832005-05-18 22:46:34 -070012447 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12448 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12449 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12450 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12451 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12452 HOSTCC_MODE_CLRTICK_TXBD);
12453
12454 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12455 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12456 tp->misc_host_ctrl);
12457 }
12458
Matt Carlson3bda1252008-08-15 14:08:22 -070012459 /* Preserve the APE MAC_MODE bits */
12460 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12461 tp->mac_mode = tr32(MAC_MODE) |
12462 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12463 else
12464 tp->mac_mode = TG3_DEF_MAC_MODE;
12465
Linus Torvalds1da177e2005-04-16 15:20:36 -070012466 /* these are limited to 10/100 only */
12467 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12468 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12469 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12470 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12471 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12472 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12473 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12474 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12475 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
Michael Chan676917d2006-12-07 00:20:22 -080012476 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12477 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
Michael Chanb5d37722006-09-27 16:06:21 -070012478 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012479 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12480
12481 err = tg3_phy_probe(tp);
12482 if (err) {
12483 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12484 pci_name(tp->pdev), err);
12485 /* ... but do not return immediately ... */
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070012486 tg3_mdio_fini(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012487 }
12488
12489 tg3_read_partno(tp);
Michael Chanc4e65752006-03-20 22:29:32 -080012490 tg3_read_fw_ver(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012491
12492 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12493 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12494 } else {
12495 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12496 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12497 else
12498 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12499 }
12500
12501 /* 5700 {AX,BX} chips have a broken status block link
12502 * change bit implementation, so we must use the
12503 * status register in those cases.
12504 */
12505 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12506 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12507 else
12508 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12509
12510 /* The led_ctrl is set during tg3_phy_probe, here we might
12511 * have to force the link status polling mechanism based
12512 * upon subsystem IDs.
12513 */
12514 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
Michael Chan007a880d2007-05-31 14:49:51 -070012515 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070012516 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12517 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12518 TG3_FLAG_USE_LINKCHG_REG);
12519 }
12520
12521 /* For all SERDES we poll the MAC status register. */
12522 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12523 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12524 else
12525 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12526
Michael Chan5a6f3072006-03-20 22:28:05 -080012527 /* All chips before 5787 can get confused if TX buffers
Linus Torvalds1da177e2005-04-16 15:20:36 -070012528 * straddle the 4GB address boundary in some cases.
12529 */
Michael Chanaf36e6b2006-03-23 01:28:06 -080012530 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012531 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012532 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012533 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012534 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012535 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Michael Chan5a6f3072006-03-20 22:28:05 -080012536 tp->dev->hard_start_xmit = tg3_start_xmit;
12537 else
12538 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012539
12540 tp->rx_offset = 2;
12541 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12542 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12543 tp->rx_offset = 0;
12544
Michael Chanf92905d2006-06-29 20:14:29 -070012545 tp->rx_std_max_post = TG3_RX_RING_SIZE;
12546
12547 /* Increment the rx prod index on the rx std ring by at most
12548 * 8 for these chips to workaround hw errata.
12549 */
12550 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12551 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12552 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12553 tp->rx_std_max_post = 8;
12554
Matt Carlson8ed5d972007-05-07 00:25:49 -070012555 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12556 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12557 PCIE_PWR_MGMT_L1_THRESH_MSK;
12558
Linus Torvalds1da177e2005-04-16 15:20:36 -070012559 return err;
12560}
12561
David S. Miller49b6e95f2007-03-29 01:38:42 -070012562#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012563static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12564{
12565 struct net_device *dev = tp->dev;
12566 struct pci_dev *pdev = tp->pdev;
David S. Miller49b6e95f2007-03-29 01:38:42 -070012567 struct device_node *dp = pci_device_to_OF_node(pdev);
David S. Miller374d4ca2007-03-29 01:57:57 -070012568 const unsigned char *addr;
David S. Miller49b6e95f2007-03-29 01:38:42 -070012569 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012570
David S. Miller49b6e95f2007-03-29 01:38:42 -070012571 addr = of_get_property(dp, "local-mac-address", &len);
12572 if (addr && len == 6) {
12573 memcpy(dev->dev_addr, addr, 6);
12574 memcpy(dev->perm_addr, dev->dev_addr, 6);
12575 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012576 }
12577 return -ENODEV;
12578}
12579
12580static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12581{
12582 struct net_device *dev = tp->dev;
12583
12584 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
John W. Linville2ff43692005-09-12 14:44:20 -070012585 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012586 return 0;
12587}
12588#endif
12589
12590static int __devinit tg3_get_device_address(struct tg3 *tp)
12591{
12592 struct net_device *dev = tp->dev;
12593 u32 hi, lo, mac_offset;
Michael Chan008652b2006-03-27 23:14:53 -080012594 int addr_ok = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012595
David S. Miller49b6e95f2007-03-29 01:38:42 -070012596#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012597 if (!tg3_get_macaddr_sparc(tp))
12598 return 0;
12599#endif
12600
12601 mac_offset = 0x7c;
David S. Millerf49639e2006-06-09 11:58:36 -070012602 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
Michael Chana4e2b342005-10-26 15:46:52 -070012603 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012604 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12605 mac_offset = 0xcc;
12606 if (tg3_nvram_lock(tp))
12607 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12608 else
12609 tg3_nvram_unlock(tp);
12610 }
Michael Chanb5d37722006-09-27 16:06:21 -070012611 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12612 mac_offset = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012613
12614 /* First try to get it from MAC address mailbox. */
12615 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12616 if ((hi >> 16) == 0x484b) {
12617 dev->dev_addr[0] = (hi >> 8) & 0xff;
12618 dev->dev_addr[1] = (hi >> 0) & 0xff;
12619
12620 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12621 dev->dev_addr[2] = (lo >> 24) & 0xff;
12622 dev->dev_addr[3] = (lo >> 16) & 0xff;
12623 dev->dev_addr[4] = (lo >> 8) & 0xff;
12624 dev->dev_addr[5] = (lo >> 0) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012625
Michael Chan008652b2006-03-27 23:14:53 -080012626 /* Some old bootcode may report a 0 MAC address in SRAM */
12627 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12628 }
12629 if (!addr_ok) {
12630 /* Next, try NVRAM. */
David S. Millerf49639e2006-06-09 11:58:36 -070012631 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
Michael Chan008652b2006-03-27 23:14:53 -080012632 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12633 dev->dev_addr[0] = ((hi >> 16) & 0xff);
12634 dev->dev_addr[1] = ((hi >> 24) & 0xff);
12635 dev->dev_addr[2] = ((lo >> 0) & 0xff);
12636 dev->dev_addr[3] = ((lo >> 8) & 0xff);
12637 dev->dev_addr[4] = ((lo >> 16) & 0xff);
12638 dev->dev_addr[5] = ((lo >> 24) & 0xff);
12639 }
12640 /* Finally just fetch it out of the MAC control regs. */
12641 else {
12642 hi = tr32(MAC_ADDR_0_HIGH);
12643 lo = tr32(MAC_ADDR_0_LOW);
12644
12645 dev->dev_addr[5] = lo & 0xff;
12646 dev->dev_addr[4] = (lo >> 8) & 0xff;
12647 dev->dev_addr[3] = (lo >> 16) & 0xff;
12648 dev->dev_addr[2] = (lo >> 24) & 0xff;
12649 dev->dev_addr[1] = hi & 0xff;
12650 dev->dev_addr[0] = (hi >> 8) & 0xff;
12651 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012652 }
12653
12654 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
David S. Miller7582a332008-03-20 15:53:15 -070012655#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012656 if (!tg3_get_default_macaddr_sparc(tp))
12657 return 0;
12658#endif
12659 return -EINVAL;
12660 }
John W. Linville2ff43692005-09-12 14:44:20 -070012661 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012662 return 0;
12663}
12664
David S. Miller59e6b432005-05-18 22:50:10 -070012665#define BOUNDARY_SINGLE_CACHELINE 1
12666#define BOUNDARY_MULTI_CACHELINE 2
12667
12668static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12669{
12670 int cacheline_size;
12671 u8 byte;
12672 int goal;
12673
12674 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12675 if (byte == 0)
12676 cacheline_size = 1024;
12677 else
12678 cacheline_size = (int) byte * 4;
12679
12680 /* On 5703 and later chips, the boundary bits have no
12681 * effect.
12682 */
12683 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12684 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12685 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12686 goto out;
12687
12688#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12689 goal = BOUNDARY_MULTI_CACHELINE;
12690#else
12691#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12692 goal = BOUNDARY_SINGLE_CACHELINE;
12693#else
12694 goal = 0;
12695#endif
12696#endif
12697
12698 if (!goal)
12699 goto out;
12700
12701 /* PCI controllers on most RISC systems tend to disconnect
12702 * when a device tries to burst across a cache-line boundary.
12703 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12704 *
12705 * Unfortunately, for PCI-E there are only limited
12706 * write-side controls for this, and thus for reads
12707 * we will still get the disconnects. We'll also waste
12708 * these PCI cycles for both read and write for chips
12709 * other than 5700 and 5701 which do not implement the
12710 * boundary bits.
12711 */
12712 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12713 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12714 switch (cacheline_size) {
12715 case 16:
12716 case 32:
12717 case 64:
12718 case 128:
12719 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12720 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12721 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12722 } else {
12723 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12724 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12725 }
12726 break;
12727
12728 case 256:
12729 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12730 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12731 break;
12732
12733 default:
12734 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12735 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12736 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012737 }
David S. Miller59e6b432005-05-18 22:50:10 -070012738 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12739 switch (cacheline_size) {
12740 case 16:
12741 case 32:
12742 case 64:
12743 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12744 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12745 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12746 break;
12747 }
12748 /* fallthrough */
12749 case 128:
12750 default:
12751 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12752 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12753 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012754 }
David S. Miller59e6b432005-05-18 22:50:10 -070012755 } else {
12756 switch (cacheline_size) {
12757 case 16:
12758 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12759 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12760 DMA_RWCTRL_WRITE_BNDRY_16);
12761 break;
12762 }
12763 /* fallthrough */
12764 case 32:
12765 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12766 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12767 DMA_RWCTRL_WRITE_BNDRY_32);
12768 break;
12769 }
12770 /* fallthrough */
12771 case 64:
12772 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12773 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12774 DMA_RWCTRL_WRITE_BNDRY_64);
12775 break;
12776 }
12777 /* fallthrough */
12778 case 128:
12779 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12780 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12781 DMA_RWCTRL_WRITE_BNDRY_128);
12782 break;
12783 }
12784 /* fallthrough */
12785 case 256:
12786 val |= (DMA_RWCTRL_READ_BNDRY_256 |
12787 DMA_RWCTRL_WRITE_BNDRY_256);
12788 break;
12789 case 512:
12790 val |= (DMA_RWCTRL_READ_BNDRY_512 |
12791 DMA_RWCTRL_WRITE_BNDRY_512);
12792 break;
12793 case 1024:
12794 default:
12795 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12796 DMA_RWCTRL_WRITE_BNDRY_1024);
12797 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012798 }
David S. Miller59e6b432005-05-18 22:50:10 -070012799 }
12800
12801out:
12802 return val;
12803}
12804
Linus Torvalds1da177e2005-04-16 15:20:36 -070012805static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12806{
12807 struct tg3_internal_buffer_desc test_desc;
12808 u32 sram_dma_descs;
12809 int i, ret;
12810
12811 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12812
12813 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12814 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12815 tw32(RDMAC_STATUS, 0);
12816 tw32(WDMAC_STATUS, 0);
12817
12818 tw32(BUFMGR_MODE, 0);
12819 tw32(FTQ_RESET, 0);
12820
12821 test_desc.addr_hi = ((u64) buf_dma) >> 32;
12822 test_desc.addr_lo = buf_dma & 0xffffffff;
12823 test_desc.nic_mbuf = 0x00002100;
12824 test_desc.len = size;
12825
12826 /*
12827 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12828 * the *second* time the tg3 driver was getting loaded after an
12829 * initial scan.
12830 *
12831 * Broadcom tells me:
12832 * ...the DMA engine is connected to the GRC block and a DMA
12833 * reset may affect the GRC block in some unpredictable way...
12834 * The behavior of resets to individual blocks has not been tested.
12835 *
12836 * Broadcom noted the GRC reset will also reset all sub-components.
12837 */
12838 if (to_device) {
12839 test_desc.cqid_sqid = (13 << 8) | 2;
12840
12841 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12842 udelay(40);
12843 } else {
12844 test_desc.cqid_sqid = (16 << 8) | 7;
12845
12846 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12847 udelay(40);
12848 }
12849 test_desc.flags = 0x00000005;
12850
12851 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12852 u32 val;
12853
12854 val = *(((u32 *)&test_desc) + i);
12855 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12856 sram_dma_descs + (i * sizeof(u32)));
12857 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12858 }
12859 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12860
12861 if (to_device) {
12862 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12863 } else {
12864 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12865 }
12866
12867 ret = -ENODEV;
12868 for (i = 0; i < 40; i++) {
12869 u32 val;
12870
12871 if (to_device)
12872 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12873 else
12874 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12875 if ((val & 0xffff) == sram_dma_descs) {
12876 ret = 0;
12877 break;
12878 }
12879
12880 udelay(100);
12881 }
12882
12883 return ret;
12884}
12885
David S. Millerded73402005-05-23 13:59:47 -070012886#define TEST_BUFFER_SIZE 0x2000
Linus Torvalds1da177e2005-04-16 15:20:36 -070012887
12888static int __devinit tg3_test_dma(struct tg3 *tp)
12889{
12890 dma_addr_t buf_dma;
David S. Miller59e6b432005-05-18 22:50:10 -070012891 u32 *buf, saved_dma_rwctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012892 int ret;
12893
12894 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12895 if (!buf) {
12896 ret = -ENOMEM;
12897 goto out_nofree;
12898 }
12899
12900 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12901 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12902
David S. Miller59e6b432005-05-18 22:50:10 -070012903 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012904
12905 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12906 /* DMA read watermark not used on PCIE */
12907 tp->dma_rwctrl |= 0x00180000;
12908 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
Michael Chan85e94ce2005-04-21 17:05:28 -070012909 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12910 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012911 tp->dma_rwctrl |= 0x003f0000;
12912 else
12913 tp->dma_rwctrl |= 0x003f000f;
12914 } else {
12915 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12916 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12917 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
Michael Chan49afdeb2007-02-13 12:17:03 -080012918 u32 read_water = 0x7;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012919
Michael Chan4a29cc22006-03-19 13:21:12 -080012920 /* If the 5704 is behind the EPB bridge, we can
12921 * do the less restrictive ONE_DMA workaround for
12922 * better performance.
12923 */
12924 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12925 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12926 tp->dma_rwctrl |= 0x8000;
12927 else if (ccval == 0x6 || ccval == 0x7)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012928 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12929
Michael Chan49afdeb2007-02-13 12:17:03 -080012930 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12931 read_water = 4;
David S. Miller59e6b432005-05-18 22:50:10 -070012932 /* Set bit 23 to enable PCIX hw bug fix */
Michael Chan49afdeb2007-02-13 12:17:03 -080012933 tp->dma_rwctrl |=
12934 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12935 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12936 (1 << 23);
Michael Chan4cf78e42005-07-25 12:29:19 -070012937 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12938 /* 5780 always in PCIX mode */
12939 tp->dma_rwctrl |= 0x00144000;
Michael Chana4e2b342005-10-26 15:46:52 -070012940 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12941 /* 5714 always in PCIX mode */
12942 tp->dma_rwctrl |= 0x00148000;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012943 } else {
12944 tp->dma_rwctrl |= 0x001b000f;
12945 }
12946 }
12947
12948 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12949 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12950 tp->dma_rwctrl &= 0xfffffff0;
12951
12952 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12953 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12954 /* Remove this if it causes problems for some boards. */
12955 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12956
12957 /* On 5700/5701 chips, we need to set this bit.
12958 * Otherwise the chip will issue cacheline transactions
12959 * to streamable DMA memory with not all the byte
12960 * enables turned on. This is an error on several
12961 * RISC PCI controllers, in particular sparc64.
12962 *
12963 * On 5703/5704 chips, this bit has been reassigned
12964 * a different meaning. In particular, it is used
12965 * on those chips to enable a PCI-X workaround.
12966 */
12967 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12968 }
12969
12970 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12971
12972#if 0
12973 /* Unneeded, already done by tg3_get_invariants. */
12974 tg3_switch_clocks(tp);
12975#endif
12976
12977 ret = 0;
12978 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12979 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12980 goto out;
12981
David S. Miller59e6b432005-05-18 22:50:10 -070012982 /* It is best to perform DMA test with maximum write burst size
12983 * to expose the 5700/5701 write DMA bug.
12984 */
12985 saved_dma_rwctrl = tp->dma_rwctrl;
12986 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12987 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12988
Linus Torvalds1da177e2005-04-16 15:20:36 -070012989 while (1) {
12990 u32 *p = buf, i;
12991
12992 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12993 p[i] = i;
12994
12995 /* Send the buffer to the chip. */
12996 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12997 if (ret) {
12998 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12999 break;
13000 }
13001
13002#if 0
13003 /* validate data reached card RAM correctly. */
13004 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13005 u32 val;
13006 tg3_read_mem(tp, 0x2100 + (i*4), &val);
13007 if (le32_to_cpu(val) != p[i]) {
13008 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
13009 /* ret = -ENODEV here? */
13010 }
13011 p[i] = 0;
13012 }
13013#endif
13014 /* Now read it back. */
13015 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13016 if (ret) {
13017 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13018
13019 break;
13020 }
13021
13022 /* Verify it. */
13023 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13024 if (p[i] == i)
13025 continue;
13026
David S. Miller59e6b432005-05-18 22:50:10 -070013027 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13028 DMA_RWCTRL_WRITE_BNDRY_16) {
13029 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013030 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13031 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13032 break;
13033 } else {
13034 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13035 ret = -ENODEV;
13036 goto out;
13037 }
13038 }
13039
13040 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13041 /* Success. */
13042 ret = 0;
13043 break;
13044 }
13045 }
David S. Miller59e6b432005-05-18 22:50:10 -070013046 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13047 DMA_RWCTRL_WRITE_BNDRY_16) {
Michael Chan6d1cfba2005-06-08 14:13:14 -070013048 static struct pci_device_id dma_wait_state_chipsets[] = {
13049 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13050 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13051 { },
13052 };
13053
David S. Miller59e6b432005-05-18 22:50:10 -070013054 /* DMA test passed without adjusting DMA boundary,
Michael Chan6d1cfba2005-06-08 14:13:14 -070013055 * now look for chipsets that are known to expose the
13056 * DMA bug without failing the test.
David S. Miller59e6b432005-05-18 22:50:10 -070013057 */
Michael Chan6d1cfba2005-06-08 14:13:14 -070013058 if (pci_dev_present(dma_wait_state_chipsets)) {
13059 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13060 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13061 }
13062 else
13063 /* Safe to use the calculated DMA boundary. */
13064 tp->dma_rwctrl = saved_dma_rwctrl;
13065
David S. Miller59e6b432005-05-18 22:50:10 -070013066 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13067 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013068
13069out:
13070 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13071out_nofree:
13072 return ret;
13073}
13074
13075static void __devinit tg3_init_link_config(struct tg3 *tp)
13076{
13077 tp->link_config.advertising =
13078 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13079 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13080 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13081 ADVERTISED_Autoneg | ADVERTISED_MII);
13082 tp->link_config.speed = SPEED_INVALID;
13083 tp->link_config.duplex = DUPLEX_INVALID;
13084 tp->link_config.autoneg = AUTONEG_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013085 tp->link_config.active_speed = SPEED_INVALID;
13086 tp->link_config.active_duplex = DUPLEX_INVALID;
13087 tp->link_config.phy_is_low_power = 0;
13088 tp->link_config.orig_speed = SPEED_INVALID;
13089 tp->link_config.orig_duplex = DUPLEX_INVALID;
13090 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13091}
13092
13093static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13094{
Michael Chanfdfec172005-07-25 12:31:48 -070013095 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13096 tp->bufmgr_config.mbuf_read_dma_low_water =
13097 DEFAULT_MB_RDMA_LOW_WATER_5705;
13098 tp->bufmgr_config.mbuf_mac_rx_low_water =
13099 DEFAULT_MB_MACRX_LOW_WATER_5705;
13100 tp->bufmgr_config.mbuf_high_water =
13101 DEFAULT_MB_HIGH_WATER_5705;
Michael Chanb5d37722006-09-27 16:06:21 -070013102 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13103 tp->bufmgr_config.mbuf_mac_rx_low_water =
13104 DEFAULT_MB_MACRX_LOW_WATER_5906;
13105 tp->bufmgr_config.mbuf_high_water =
13106 DEFAULT_MB_HIGH_WATER_5906;
13107 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013108
Michael Chanfdfec172005-07-25 12:31:48 -070013109 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13110 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13111 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13112 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13113 tp->bufmgr_config.mbuf_high_water_jumbo =
13114 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13115 } else {
13116 tp->bufmgr_config.mbuf_read_dma_low_water =
13117 DEFAULT_MB_RDMA_LOW_WATER;
13118 tp->bufmgr_config.mbuf_mac_rx_low_water =
13119 DEFAULT_MB_MACRX_LOW_WATER;
13120 tp->bufmgr_config.mbuf_high_water =
13121 DEFAULT_MB_HIGH_WATER;
13122
13123 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13124 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13125 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13126 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13127 tp->bufmgr_config.mbuf_high_water_jumbo =
13128 DEFAULT_MB_HIGH_WATER_JUMBO;
13129 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013130
13131 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13132 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13133}
13134
13135static char * __devinit tg3_phy_string(struct tg3 *tp)
13136{
13137 switch (tp->phy_id & PHY_ID_MASK) {
13138 case PHY_ID_BCM5400: return "5400";
13139 case PHY_ID_BCM5401: return "5401";
13140 case PHY_ID_BCM5411: return "5411";
13141 case PHY_ID_BCM5701: return "5701";
13142 case PHY_ID_BCM5703: return "5703";
13143 case PHY_ID_BCM5704: return "5704";
13144 case PHY_ID_BCM5705: return "5705";
13145 case PHY_ID_BCM5750: return "5750";
Michael Chan85e94ce2005-04-21 17:05:28 -070013146 case PHY_ID_BCM5752: return "5752";
Michael Chana4e2b342005-10-26 15:46:52 -070013147 case PHY_ID_BCM5714: return "5714";
Michael Chan4cf78e42005-07-25 12:29:19 -070013148 case PHY_ID_BCM5780: return "5780";
Michael Chanaf36e6b2006-03-23 01:28:06 -080013149 case PHY_ID_BCM5755: return "5755";
Michael Chand9ab5ad2006-03-20 22:27:35 -080013150 case PHY_ID_BCM5787: return "5787";
Matt Carlsond30cdd22007-10-07 23:28:35 -070013151 case PHY_ID_BCM5784: return "5784";
Michael Chan126a3362006-09-27 16:03:07 -070013152 case PHY_ID_BCM5756: return "5722/5756";
Michael Chanb5d37722006-09-27 16:06:21 -070013153 case PHY_ID_BCM5906: return "5906";
Matt Carlson9936bcf2007-10-10 18:03:07 -070013154 case PHY_ID_BCM5761: return "5761";
Linus Torvalds1da177e2005-04-16 15:20:36 -070013155 case PHY_ID_BCM8002: return "8002/serdes";
13156 case 0: return "serdes";
13157 default: return "unknown";
Stephen Hemminger855e1112008-04-16 16:37:28 -070013158 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013159}
13160
Michael Chanf9804dd2005-09-27 12:13:10 -070013161static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13162{
13163 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13164 strcpy(str, "PCI Express");
13165 return str;
13166 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13167 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13168
13169 strcpy(str, "PCIX:");
13170
13171 if ((clock_ctrl == 7) ||
13172 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13173 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13174 strcat(str, "133MHz");
13175 else if (clock_ctrl == 0)
13176 strcat(str, "33MHz");
13177 else if (clock_ctrl == 2)
13178 strcat(str, "50MHz");
13179 else if (clock_ctrl == 4)
13180 strcat(str, "66MHz");
13181 else if (clock_ctrl == 6)
13182 strcat(str, "100MHz");
Michael Chanf9804dd2005-09-27 12:13:10 -070013183 } else {
13184 strcpy(str, "PCI:");
13185 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13186 strcat(str, "66MHz");
13187 else
13188 strcat(str, "33MHz");
13189 }
13190 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13191 strcat(str, ":32-bit");
13192 else
13193 strcat(str, ":64-bit");
13194 return str;
13195}
13196
Michael Chan8c2dc7e2005-12-19 16:26:02 -080013197static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013198{
13199 struct pci_dev *peer;
13200 unsigned int func, devnr = tp->pdev->devfn & ~7;
13201
13202 for (func = 0; func < 8; func++) {
13203 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13204 if (peer && peer != tp->pdev)
13205 break;
13206 pci_dev_put(peer);
13207 }
Michael Chan16fe9d72005-12-13 21:09:54 -080013208 /* 5704 can be configured in single-port mode, set peer to
13209 * tp->pdev in that case.
13210 */
13211 if (!peer) {
13212 peer = tp->pdev;
13213 return peer;
13214 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013215
13216 /*
13217 * We don't need to keep the refcount elevated; there's no way
13218 * to remove one half of this device without removing the other
13219 */
13220 pci_dev_put(peer);
13221
13222 return peer;
13223}
13224
David S. Miller15f98502005-05-18 22:49:26 -070013225static void __devinit tg3_init_coal(struct tg3 *tp)
13226{
13227 struct ethtool_coalesce *ec = &tp->coal;
13228
13229 memset(ec, 0, sizeof(*ec));
13230 ec->cmd = ETHTOOL_GCOALESCE;
13231 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13232 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13233 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13234 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13235 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13236 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13237 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13238 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13239 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13240
13241 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13242 HOSTCC_MODE_CLRTICK_TXBD)) {
13243 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13244 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13245 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13246 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13247 }
Michael Chand244c892005-07-05 14:42:33 -070013248
13249 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13250 ec->rx_coalesce_usecs_irq = 0;
13251 ec->tx_coalesce_usecs_irq = 0;
13252 ec->stats_block_coalesce_usecs = 0;
13253 }
David S. Miller15f98502005-05-18 22:49:26 -070013254}
13255
Linus Torvalds1da177e2005-04-16 15:20:36 -070013256static int __devinit tg3_init_one(struct pci_dev *pdev,
13257 const struct pci_device_id *ent)
13258{
13259 static int tg3_version_printed = 0;
Matt Carlson63532392008-11-03 16:49:57 -080013260 resource_size_t tg3reg_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013261 struct net_device *dev;
13262 struct tg3 *tp;
Joe Perchesd6645372007-12-20 04:06:59 -080013263 int err, pm_cap;
Michael Chanf9804dd2005-09-27 12:13:10 -070013264 char str[40];
Michael Chan72f2afb2006-03-06 19:28:35 -080013265 u64 dma_mask, persist_dma_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013266
13267 if (tg3_version_printed++ == 0)
13268 printk(KERN_INFO "%s", version);
13269
13270 err = pci_enable_device(pdev);
13271 if (err) {
13272 printk(KERN_ERR PFX "Cannot enable PCI device, "
13273 "aborting.\n");
13274 return err;
13275 }
13276
Matt Carlson63532392008-11-03 16:49:57 -080013277 if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013278 printk(KERN_ERR PFX "Cannot find proper PCI device "
13279 "base address, aborting.\n");
13280 err = -ENODEV;
13281 goto err_out_disable_pdev;
13282 }
13283
13284 err = pci_request_regions(pdev, DRV_MODULE_NAME);
13285 if (err) {
13286 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13287 "aborting.\n");
13288 goto err_out_disable_pdev;
13289 }
13290
13291 pci_set_master(pdev);
13292
13293 /* Find power-management capability. */
13294 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13295 if (pm_cap == 0) {
13296 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13297 "aborting.\n");
13298 err = -EIO;
13299 goto err_out_free_res;
13300 }
13301
Linus Torvalds1da177e2005-04-16 15:20:36 -070013302 dev = alloc_etherdev(sizeof(*tp));
13303 if (!dev) {
13304 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13305 err = -ENOMEM;
13306 goto err_out_free_res;
13307 }
13308
Linus Torvalds1da177e2005-04-16 15:20:36 -070013309 SET_NETDEV_DEV(dev, &pdev->dev);
13310
Linus Torvalds1da177e2005-04-16 15:20:36 -070013311#if TG3_VLAN_TAG_USED
13312 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13313 dev->vlan_rx_register = tg3_vlan_rx_register;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013314#endif
13315
13316 tp = netdev_priv(dev);
13317 tp->pdev = pdev;
13318 tp->dev = dev;
13319 tp->pm_cap = pm_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013320 tp->rx_mode = TG3_DEF_RX_MODE;
13321 tp->tx_mode = TG3_DEF_TX_MODE;
Matt Carlson8ef21422008-05-02 16:47:53 -070013322
Linus Torvalds1da177e2005-04-16 15:20:36 -070013323 if (tg3_debug > 0)
13324 tp->msg_enable = tg3_debug;
13325 else
13326 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13327
13328 /* The word/byte swap controls here control register access byte
13329 * swapping. DMA data byte swapping is controlled in the GRC_MODE
13330 * setting below.
13331 */
13332 tp->misc_host_ctrl =
13333 MISC_HOST_CTRL_MASK_PCI_INT |
13334 MISC_HOST_CTRL_WORD_SWAP |
13335 MISC_HOST_CTRL_INDIR_ACCESS |
13336 MISC_HOST_CTRL_PCISTATE_RW;
13337
13338 /* The NONFRM (non-frame) byte/word swap controls take effect
13339 * on descriptor entries, anything which isn't packet data.
13340 *
13341 * The StrongARM chips on the board (one for tx, one for rx)
13342 * are running in big-endian mode.
13343 */
13344 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13345 GRC_MODE_WSWAP_NONFRM_DATA);
13346#ifdef __BIG_ENDIAN
13347 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13348#endif
13349 spin_lock_init(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013350 spin_lock_init(&tp->indirect_lock);
David Howellsc4028952006-11-22 14:57:56 +000013351 INIT_WORK(&tp->reset_task, tg3_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013352
Matt Carlson63532392008-11-03 16:49:57 -080013353 dev->mem_start = pci_resource_start(pdev, BAR_0);
13354 tg3reg_len = pci_resource_len(pdev, BAR_0);
13355 dev->mem_end = dev->mem_start + tg3reg_len;
13356
13357 tp->regs = ioremap_nocache(dev->mem_start, tg3reg_len);
Andy Gospodarekab0049b2007-09-06 20:42:14 +010013358 if (!tp->regs) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013359 printk(KERN_ERR PFX "Cannot map device registers, "
13360 "aborting.\n");
13361 err = -ENOMEM;
13362 goto err_out_free_dev;
13363 }
13364
13365 tg3_init_link_config(tp);
13366
Linus Torvalds1da177e2005-04-16 15:20:36 -070013367 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13368 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13369 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13370
13371 dev->open = tg3_open;
13372 dev->stop = tg3_close;
13373 dev->get_stats = tg3_get_stats;
13374 dev->set_multicast_list = tg3_set_rx_mode;
13375 dev->set_mac_address = tg3_set_mac_addr;
13376 dev->do_ioctl = tg3_ioctl;
13377 dev->tx_timeout = tg3_tx_timeout;
Stephen Hemmingerbea33482007-10-03 16:41:36 -070013378 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013379 dev->ethtool_ops = &tg3_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013380 dev->watchdog_timeo = TG3_TX_TIMEOUT;
13381 dev->change_mtu = tg3_change_mtu;
13382 dev->irq = pdev->irq;
13383#ifdef CONFIG_NET_POLL_CONTROLLER
13384 dev->poll_controller = tg3_poll_controller;
13385#endif
13386
13387 err = tg3_get_invariants(tp);
13388 if (err) {
13389 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13390 "aborting.\n");
13391 goto err_out_iounmap;
13392 }
13393
Michael Chan4a29cc22006-03-19 13:21:12 -080013394 /* The EPB bridge inside 5714, 5715, and 5780 and any
13395 * device behind the EPB cannot support DMA addresses > 40-bit.
Michael Chan72f2afb2006-03-06 19:28:35 -080013396 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13397 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13398 * do DMA address check in tg3_start_xmit().
13399 */
Michael Chan4a29cc22006-03-19 13:21:12 -080013400 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13401 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13402 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
Michael Chan72f2afb2006-03-06 19:28:35 -080013403 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13404#ifdef CONFIG_HIGHMEM
13405 dma_mask = DMA_64BIT_MASK;
13406#endif
Michael Chan4a29cc22006-03-19 13:21:12 -080013407 } else
Michael Chan72f2afb2006-03-06 19:28:35 -080013408 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13409
13410 /* Configure DMA attributes. */
13411 if (dma_mask > DMA_32BIT_MASK) {
13412 err = pci_set_dma_mask(pdev, dma_mask);
13413 if (!err) {
13414 dev->features |= NETIF_F_HIGHDMA;
13415 err = pci_set_consistent_dma_mask(pdev,
13416 persist_dma_mask);
13417 if (err < 0) {
13418 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13419 "DMA for consistent allocations\n");
13420 goto err_out_iounmap;
13421 }
13422 }
13423 }
13424 if (err || dma_mask == DMA_32BIT_MASK) {
13425 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13426 if (err) {
13427 printk(KERN_ERR PFX "No usable DMA configuration, "
13428 "aborting.\n");
13429 goto err_out_iounmap;
13430 }
13431 }
13432
Michael Chanfdfec172005-07-25 12:31:48 -070013433 tg3_init_bufmgr_config(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013434
Linus Torvalds1da177e2005-04-16 15:20:36 -070013435 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13436 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13437 }
13438 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13439 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13440 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
Michael Chanc7835a72006-11-15 21:14:42 -080013441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -070013442 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13443 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13444 } else {
Michael Chan7f62ad52007-02-20 23:25:40 -080013445 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013446 }
13447
Michael Chan4e3a7aa2006-03-20 17:47:44 -080013448 /* TSO is on by default on chips that support hardware TSO.
13449 * Firmware TSO on older chips gives lower performance, so it
13450 * is off by default, but can be enabled using ethtool.
13451 */
Michael Chanb0026622006-07-03 19:42:14 -070013452 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013453 dev->features |= NETIF_F_TSO;
Michael Chanb5d37722006-09-27 16:06:21 -070013454 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13455 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
Michael Chanb0026622006-07-03 19:42:14 -070013456 dev->features |= NETIF_F_TSO6;
Matt Carlson57e69832008-05-25 23:48:31 -070013457 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13458 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13459 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13460 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlson9936bcf2007-10-10 18:03:07 -070013461 dev->features |= NETIF_F_TSO_ECN;
Michael Chanb0026622006-07-03 19:42:14 -070013462 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013463
Linus Torvalds1da177e2005-04-16 15:20:36 -070013464
13465 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13466 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13467 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13468 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13469 tp->rx_pending = 63;
13470 }
13471
Linus Torvalds1da177e2005-04-16 15:20:36 -070013472 err = tg3_get_device_address(tp);
13473 if (err) {
13474 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13475 "aborting.\n");
13476 goto err_out_iounmap;
13477 }
13478
Matt Carlson0d3031d2007-10-10 18:02:43 -070013479 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
Matt Carlson63532392008-11-03 16:49:57 -080013480 if (!(pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM)) {
Matt Carlson0d3031d2007-10-10 18:02:43 -070013481 printk(KERN_ERR PFX "Cannot find proper PCI device "
13482 "base address for APE, aborting.\n");
13483 err = -ENODEV;
13484 goto err_out_iounmap;
13485 }
13486
Matt Carlson63532392008-11-03 16:49:57 -080013487 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
Al Viro79ea13c2008-01-24 02:06:46 -080013488 if (!tp->aperegs) {
Matt Carlson0d3031d2007-10-10 18:02:43 -070013489 printk(KERN_ERR PFX "Cannot map APE registers, "
13490 "aborting.\n");
13491 err = -ENOMEM;
13492 goto err_out_iounmap;
13493 }
13494
13495 tg3_ape_lock_init(tp);
13496 }
13497
Matt Carlsonc88864d2007-11-12 21:07:01 -080013498 /*
13499 * Reset chip in case UNDI or EFI driver did not shutdown
13500 * DMA self test will enable WDMAC and we'll see (spurious)
13501 * pending DMA on the PCI bus at that point.
13502 */
13503 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13504 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13505 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13506 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13507 }
13508
13509 err = tg3_test_dma(tp);
13510 if (err) {
13511 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13512 goto err_out_apeunmap;
13513 }
13514
13515 /* Tigon3 can do ipv4 only... and some chips have buggy
13516 * checksumming.
13517 */
13518 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13519 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13520 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13521 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13522 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070013523 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13524 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsonc88864d2007-11-12 21:07:01 -080013525 dev->features |= NETIF_F_IPV6_CSUM;
13526
13527 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13528 } else
13529 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13530
13531 /* flow control autonegotiation is default behavior */
13532 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
Matt Carlson8d018622007-12-20 20:05:44 -080013533 tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
Matt Carlsonc88864d2007-11-12 21:07:01 -080013534
13535 tg3_init_coal(tp);
13536
Michael Chanc49a1562006-12-17 17:07:29 -080013537 pci_set_drvdata(pdev, dev);
13538
Linus Torvalds1da177e2005-04-16 15:20:36 -070013539 err = register_netdev(dev);
13540 if (err) {
13541 printk(KERN_ERR PFX "Cannot register net device, "
13542 "aborting.\n");
Matt Carlson0d3031d2007-10-10 18:02:43 -070013543 goto err_out_apeunmap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013544 }
13545
Matt Carlsondf59c942008-11-03 16:52:56 -080013546 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070013547 dev->name,
13548 tp->board_part_number,
13549 tp->pci_chip_rev_id,
Michael Chanf9804dd2005-09-27 12:13:10 -070013550 tg3_bus_string(tp, str),
Johannes Berge1749612008-10-27 15:59:26 -070013551 dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013552
Matt Carlsondf59c942008-11-03 16:52:56 -080013553 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
13554 printk(KERN_INFO
13555 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
13556 tp->dev->name,
13557 tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
13558 tp->mdio_bus->phy_map[PHY_ADDR]->dev.bus_id);
13559 else
13560 printk(KERN_INFO
13561 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
13562 tp->dev->name, tg3_phy_string(tp),
13563 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13564 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13565 "10/100/1000Base-T")),
13566 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
13567
13568 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070013569 dev->name,
13570 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13571 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13572 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13573 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -070013574 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
Michael Chan4a29cc22006-03-19 13:21:12 -080013575 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13576 dev->name, tp->dma_rwctrl,
13577 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13578 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
Linus Torvalds1da177e2005-04-16 15:20:36 -070013579
13580 return 0;
13581
Matt Carlson0d3031d2007-10-10 18:02:43 -070013582err_out_apeunmap:
13583 if (tp->aperegs) {
13584 iounmap(tp->aperegs);
13585 tp->aperegs = NULL;
13586 }
13587
Linus Torvalds1da177e2005-04-16 15:20:36 -070013588err_out_iounmap:
Michael Chan68929142005-08-09 20:17:14 -070013589 if (tp->regs) {
13590 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070013591 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070013592 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013593
13594err_out_free_dev:
13595 free_netdev(dev);
13596
13597err_out_free_res:
13598 pci_release_regions(pdev);
13599
13600err_out_disable_pdev:
13601 pci_disable_device(pdev);
13602 pci_set_drvdata(pdev, NULL);
13603 return err;
13604}
13605
13606static void __devexit tg3_remove_one(struct pci_dev *pdev)
13607{
13608 struct net_device *dev = pci_get_drvdata(pdev);
13609
13610 if (dev) {
13611 struct tg3 *tp = netdev_priv(dev);
13612
Michael Chan7faa0062006-02-02 17:29:28 -080013613 flush_scheduled_work();
Matt Carlson158d7ab2008-05-29 01:37:54 -070013614
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013615 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13616 tg3_phy_fini(tp);
Matt Carlson158d7ab2008-05-29 01:37:54 -070013617 tg3_mdio_fini(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013618 }
Matt Carlson158d7ab2008-05-29 01:37:54 -070013619
Linus Torvalds1da177e2005-04-16 15:20:36 -070013620 unregister_netdev(dev);
Matt Carlson0d3031d2007-10-10 18:02:43 -070013621 if (tp->aperegs) {
13622 iounmap(tp->aperegs);
13623 tp->aperegs = NULL;
13624 }
Michael Chan68929142005-08-09 20:17:14 -070013625 if (tp->regs) {
13626 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070013627 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070013628 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013629 free_netdev(dev);
13630 pci_release_regions(pdev);
13631 pci_disable_device(pdev);
13632 pci_set_drvdata(pdev, NULL);
13633 }
13634}
13635
13636static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13637{
13638 struct net_device *dev = pci_get_drvdata(pdev);
13639 struct tg3 *tp = netdev_priv(dev);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070013640 pci_power_t target_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013641 int err;
13642
Michael Chan3e0c95f2007-08-03 20:56:54 -070013643 /* PCI register 4 needs to be saved whether netif_running() or not.
13644 * MSI address and data need to be saved if using MSI and
13645 * netif_running().
13646 */
13647 pci_save_state(pdev);
13648
Linus Torvalds1da177e2005-04-16 15:20:36 -070013649 if (!netif_running(dev))
13650 return 0;
13651
Michael Chan7faa0062006-02-02 17:29:28 -080013652 flush_scheduled_work();
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013653 tg3_phy_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013654 tg3_netif_stop(tp);
13655
13656 del_timer_sync(&tp->timer);
13657
David S. Millerf47c11e2005-06-24 20:18:35 -070013658 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013659 tg3_disable_ints(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -070013660 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013661
13662 netif_device_detach(dev);
13663
David S. Millerf47c11e2005-06-24 20:18:35 -070013664 tg3_full_lock(tp, 0);
Michael Chan944d9802005-05-29 14:57:48 -070013665 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan6a9eba12005-12-13 21:08:58 -080013666 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
David S. Millerf47c11e2005-06-24 20:18:35 -070013667 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013668
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070013669 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13670
13671 err = tg3_set_power_state(tp, target_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013672 if (err) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013673 int err2;
13674
David S. Millerf47c11e2005-06-24 20:18:35 -070013675 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013676
Michael Chan6a9eba12005-12-13 21:08:58 -080013677 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013678 err2 = tg3_restart_hw(tp, 1);
13679 if (err2)
Michael Chanb9ec6c12006-07-25 16:37:27 -070013680 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013681
13682 tp->timer.expires = jiffies + tp->timer_offset;
13683 add_timer(&tp->timer);
13684
13685 netif_device_attach(dev);
13686 tg3_netif_start(tp);
13687
Michael Chanb9ec6c12006-07-25 16:37:27 -070013688out:
David S. Millerf47c11e2005-06-24 20:18:35 -070013689 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013690
13691 if (!err2)
13692 tg3_phy_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013693 }
13694
13695 return err;
13696}
13697
13698static int tg3_resume(struct pci_dev *pdev)
13699{
13700 struct net_device *dev = pci_get_drvdata(pdev);
13701 struct tg3 *tp = netdev_priv(dev);
13702 int err;
13703
Michael Chan3e0c95f2007-08-03 20:56:54 -070013704 pci_restore_state(tp->pdev);
13705
Linus Torvalds1da177e2005-04-16 15:20:36 -070013706 if (!netif_running(dev))
13707 return 0;
13708
Michael Chanbc1c7562006-03-20 17:48:03 -080013709 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013710 if (err)
13711 return err;
13712
13713 netif_device_attach(dev);
13714
David S. Millerf47c11e2005-06-24 20:18:35 -070013715 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013716
Michael Chan6a9eba12005-12-13 21:08:58 -080013717 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Michael Chanb9ec6c12006-07-25 16:37:27 -070013718 err = tg3_restart_hw(tp, 1);
13719 if (err)
13720 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013721
13722 tp->timer.expires = jiffies + tp->timer_offset;
13723 add_timer(&tp->timer);
13724
Linus Torvalds1da177e2005-04-16 15:20:36 -070013725 tg3_netif_start(tp);
13726
Michael Chanb9ec6c12006-07-25 16:37:27 -070013727out:
David S. Millerf47c11e2005-06-24 20:18:35 -070013728 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013729
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013730 if (!err)
13731 tg3_phy_start(tp);
13732
Michael Chanb9ec6c12006-07-25 16:37:27 -070013733 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013734}
13735
13736static struct pci_driver tg3_driver = {
13737 .name = DRV_MODULE_NAME,
13738 .id_table = tg3_pci_tbl,
13739 .probe = tg3_init_one,
13740 .remove = __devexit_p(tg3_remove_one),
13741 .suspend = tg3_suspend,
13742 .resume = tg3_resume
13743};
13744
13745static int __init tg3_init(void)
13746{
Jeff Garzik29917622006-08-19 17:48:59 -040013747 return pci_register_driver(&tg3_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013748}
13749
13750static void __exit tg3_cleanup(void)
13751{
13752 pci_unregister_driver(&tg3_driver);
13753}
13754
13755module_init(tg3_init);
13756module_exit(tg3_cleanup);