blob: e05849ee9000fe93dab53ca869097f43c04af95f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
Michael Chan65610fb2007-02-13 12:18:46 -08007 * Copyright (C) 2005-2007 Broadcom Corporation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Firmware is:
Michael Chan49cabf42005-06-06 15:15:17 -070010 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/compiler.h>
24#include <linux/slab.h>
25#include <linux/delay.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020026#include <linux/in.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
Matt Carlson158d7ab2008-05-29 01:37:54 -070035#include <linux/phy.h>
Matt Carlsona9daf362008-05-25 23:49:44 -070036#include <linux/brcmphy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/if_vlan.h>
38#include <linux/ip.h>
39#include <linux/tcp.h>
40#include <linux/workqueue.h>
Michael Chan61487482005-09-05 17:53:19 -070041#include <linux/prefetch.h>
Tobias Klauserf9a5f7d2005-10-29 15:09:26 +020042#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44#include <net/checksum.h>
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -030045#include <net/ip.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47#include <asm/system.h>
48#include <asm/io.h>
49#include <asm/byteorder.h>
50#include <asm/uaccess.h>
51
David S. Miller49b6e95f2007-03-29 01:38:42 -070052#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <asm/idprom.h>
David S. Miller49b6e95f2007-03-29 01:38:42 -070054#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#endif
56
Matt Carlson63532392008-11-03 16:49:57 -080057#define BAR_0 0
58#define BAR_2 2
59
Linus Torvalds1da177e2005-04-16 15:20:36 -070060#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
61#define TG3_VLAN_TAG_USED 1
62#else
63#define TG3_VLAN_TAG_USED 0
64#endif
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#define TG3_TSO_SUPPORT 1
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
68#include "tg3.h"
69
70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": "
Matt Carlsonfa228b32008-11-03 16:58:53 -080072#define DRV_MODULE_VERSION "3.95"
73#define DRV_MODULE_RELDATE "November 3, 2008"
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0
77#define TG3_DEF_TX_MODE 0
78#define TG3_DEF_MSG_ENABLE \
79 (NETIF_MSG_DRV | \
80 NETIF_MSG_PROBE | \
81 NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | \
83 NETIF_MSG_IFDOWN | \
84 NETIF_MSG_IFUP | \
85 NETIF_MSG_RX_ERR | \
86 NETIF_MSG_TX_ERR)
87
88/* length of time before we decide the hardware is borked,
89 * and dev->tx_timeout() should be called to fix the problem
90 */
91#define TG3_TX_TIMEOUT (5 * HZ)
92
93/* hardware minimum and maximum for a single frame's data payload */
94#define TG3_MIN_MTU 60
95#define TG3_MAX_MTU(tp) \
Michael Chan0f893dc2005-07-25 12:30:38 -070096 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
98/* These numbers seem to be hard coded in the NIC firmware somehow.
99 * You can't change the ring sizes, but you can change where you place
100 * them in the NIC onboard memory.
101 */
102#define TG3_RX_RING_SIZE 512
103#define TG3_DEF_RX_RING_PENDING 200
104#define TG3_RX_JUMBO_RING_SIZE 256
105#define TG3_DEF_RX_JUMBO_RING_PENDING 100
106
107/* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
112 */
113#define TG3_RX_RCB_RING_SIZE(tp) \
114 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
115
116#define TG3_TX_RING_SIZE 512
117#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
118
119#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120 TG3_RX_RING_SIZE)
121#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 TG3_RX_JUMBO_RING_SIZE)
123#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RCB_RING_SIZE(tp))
125#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
126 TG3_TX_RING_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128
129#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
130#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
131
132/* minimum number of free TX descriptors required to wake up TX process */
Ranjit Manomohan42952232006-10-18 20:54:26 -0700133#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
135/* number of ETHTOOL_GSTATS u64's */
136#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
137
Michael Chan4cafd3f2005-05-29 14:56:34 -0700138#define TG3_NUM_TEST 6
139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140static char version[] __devinitdata =
141 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
142
143MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
144MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
145MODULE_LICENSE("GPL");
146MODULE_VERSION(DRV_MODULE_VERSION);
147
148static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
149module_param(tg3_debug, int, 0);
150MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
151
152static struct pci_device_id tg3_pci_tbl[] = {
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
Michael Chan126a3362006-09-27 16:03:07 -0700177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
Michael Chan126a3362006-09-27 16:03:07 -0700192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
Michael Chan676917d2006-12-07 00:20:22 -0800196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
Michael Chanb5d37722006-09-27 16:06:21 -0700204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
Matt Carlsond30cdd22007-10-07 23:28:35 -0700206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
Matt Carlson6c7af272007-10-21 16:12:02 -0700208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
Matt Carlson9936bcf2007-10-10 18:03:07 -0700209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
Matt Carlsonc88e6682008-11-03 16:49:18 -0800211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
Matt Carlson57e69832008-05-25 23:48:31 -0700213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700214 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
215 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
216 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
217 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
218 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
219 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
220 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
221 {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222};
223
224MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
225
Andreas Mohr50da8592006-08-14 23:54:30 -0700226static const struct {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 const char string[ETH_GSTRING_LEN];
228} ethtool_stats_keys[TG3_NUM_STATS] = {
229 { "rx_octets" },
230 { "rx_fragments" },
231 { "rx_ucast_packets" },
232 { "rx_mcast_packets" },
233 { "rx_bcast_packets" },
234 { "rx_fcs_errors" },
235 { "rx_align_errors" },
236 { "rx_xon_pause_rcvd" },
237 { "rx_xoff_pause_rcvd" },
238 { "rx_mac_ctrl_rcvd" },
239 { "rx_xoff_entered" },
240 { "rx_frame_too_long_errors" },
241 { "rx_jabbers" },
242 { "rx_undersize_packets" },
243 { "rx_in_length_errors" },
244 { "rx_out_length_errors" },
245 { "rx_64_or_less_octet_packets" },
246 { "rx_65_to_127_octet_packets" },
247 { "rx_128_to_255_octet_packets" },
248 { "rx_256_to_511_octet_packets" },
249 { "rx_512_to_1023_octet_packets" },
250 { "rx_1024_to_1522_octet_packets" },
251 { "rx_1523_to_2047_octet_packets" },
252 { "rx_2048_to_4095_octet_packets" },
253 { "rx_4096_to_8191_octet_packets" },
254 { "rx_8192_to_9022_octet_packets" },
255
256 { "tx_octets" },
257 { "tx_collisions" },
258
259 { "tx_xon_sent" },
260 { "tx_xoff_sent" },
261 { "tx_flow_control" },
262 { "tx_mac_errors" },
263 { "tx_single_collisions" },
264 { "tx_mult_collisions" },
265 { "tx_deferred" },
266 { "tx_excessive_collisions" },
267 { "tx_late_collisions" },
268 { "tx_collide_2times" },
269 { "tx_collide_3times" },
270 { "tx_collide_4times" },
271 { "tx_collide_5times" },
272 { "tx_collide_6times" },
273 { "tx_collide_7times" },
274 { "tx_collide_8times" },
275 { "tx_collide_9times" },
276 { "tx_collide_10times" },
277 { "tx_collide_11times" },
278 { "tx_collide_12times" },
279 { "tx_collide_13times" },
280 { "tx_collide_14times" },
281 { "tx_collide_15times" },
282 { "tx_ucast_packets" },
283 { "tx_mcast_packets" },
284 { "tx_bcast_packets" },
285 { "tx_carrier_sense_errors" },
286 { "tx_discards" },
287 { "tx_errors" },
288
289 { "dma_writeq_full" },
290 { "dma_write_prioq_full" },
291 { "rxbds_empty" },
292 { "rx_discards" },
293 { "rx_errors" },
294 { "rx_threshold_hit" },
295
296 { "dma_readq_full" },
297 { "dma_read_prioq_full" },
298 { "tx_comp_queue_full" },
299
300 { "ring_set_send_prod_index" },
301 { "ring_status_update" },
302 { "nic_irqs" },
303 { "nic_avoided_irqs" },
304 { "nic_tx_threshold_hit" }
305};
306
Andreas Mohr50da8592006-08-14 23:54:30 -0700307static const struct {
Michael Chan4cafd3f2005-05-29 14:56:34 -0700308 const char string[ETH_GSTRING_LEN];
309} ethtool_test_keys[TG3_NUM_TEST] = {
310 { "nvram test (online) " },
311 { "link test (online) " },
312 { "register test (offline)" },
313 { "memory test (offline)" },
314 { "loopback test (offline)" },
315 { "interrupt test (offline)" },
316};
317
Michael Chanb401e9e2005-12-19 16:27:04 -0800318static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
319{
320 writel(val, tp->regs + off);
321}
322
323static u32 tg3_read32(struct tg3 *tp, u32 off)
324{
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400325 return (readl(tp->regs + off));
Michael Chanb401e9e2005-12-19 16:27:04 -0800326}
327
Matt Carlson0d3031d2007-10-10 18:02:43 -0700328static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
329{
330 writel(val, tp->aperegs + off);
331}
332
333static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
334{
335 return (readl(tp->aperegs + off));
336}
337
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
339{
Michael Chan68929142005-08-09 20:17:14 -0700340 unsigned long flags;
341
342 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700343 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
344 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
Michael Chan68929142005-08-09 20:17:14 -0700345 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700346}
347
348static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
349{
350 writel(val, tp->regs + off);
351 readl(tp->regs + off);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352}
353
Michael Chan68929142005-08-09 20:17:14 -0700354static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
355{
356 unsigned long flags;
357 u32 val;
358
359 spin_lock_irqsave(&tp->indirect_lock, flags);
360 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
361 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
362 spin_unlock_irqrestore(&tp->indirect_lock, flags);
363 return val;
364}
365
366static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
367{
368 unsigned long flags;
369
370 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
371 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
372 TG3_64BIT_REG_LOW, val);
373 return;
374 }
375 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
376 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
377 TG3_64BIT_REG_LOW, val);
378 return;
379 }
380
381 spin_lock_irqsave(&tp->indirect_lock, flags);
382 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
383 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
384 spin_unlock_irqrestore(&tp->indirect_lock, flags);
385
386 /* In indirect mode when disabling interrupts, we also need
387 * to clear the interrupt bit in the GRC local ctrl register.
388 */
389 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
390 (val == 0x1)) {
391 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
392 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
393 }
394}
395
396static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
397{
398 unsigned long flags;
399 u32 val;
400
401 spin_lock_irqsave(&tp->indirect_lock, flags);
402 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
403 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
404 spin_unlock_irqrestore(&tp->indirect_lock, flags);
405 return val;
406}
407
Michael Chanb401e9e2005-12-19 16:27:04 -0800408/* usec_wait specifies the wait time in usec when writing to certain registers
409 * where it is unsafe to read back the register without some delay.
410 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
411 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
412 */
413static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414{
Michael Chanb401e9e2005-12-19 16:27:04 -0800415 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
416 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
417 /* Non-posted methods */
418 tp->write32(tp, off, val);
419 else {
420 /* Posted method */
421 tg3_write32(tp, off, val);
422 if (usec_wait)
423 udelay(usec_wait);
424 tp->read32(tp, off);
425 }
426 /* Wait again after the read for the posted method to guarantee that
427 * the wait time is met.
428 */
429 if (usec_wait)
430 udelay(usec_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431}
432
Michael Chan09ee9292005-08-09 20:17:00 -0700433static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
434{
435 tp->write32_mbox(tp, off, val);
Michael Chan68929142005-08-09 20:17:14 -0700436 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
437 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
438 tp->read32_mbox(tp, off);
Michael Chan09ee9292005-08-09 20:17:00 -0700439}
440
Michael Chan20094932005-08-09 20:16:32 -0700441static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442{
443 void __iomem *mbox = tp->regs + off;
444 writel(val, mbox);
445 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
446 writel(val, mbox);
447 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
448 readl(mbox);
449}
450
Michael Chanb5d37722006-09-27 16:06:21 -0700451static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
452{
453 return (readl(tp->regs + off + GRCMBOX_BASE));
454}
455
456static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
457{
458 writel(val, tp->regs + off + GRCMBOX_BASE);
459}
460
Michael Chan20094932005-08-09 20:16:32 -0700461#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700462#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
Michael Chan20094932005-08-09 20:16:32 -0700463#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
464#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700465#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
Michael Chan20094932005-08-09 20:16:32 -0700466
467#define tw32(reg,val) tp->write32(tp, reg, val)
Michael Chanb401e9e2005-12-19 16:27:04 -0800468#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
469#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
Michael Chan20094932005-08-09 20:16:32 -0700470#define tr32(reg) tp->read32(tp, reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
472static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
473{
Michael Chan68929142005-08-09 20:17:14 -0700474 unsigned long flags;
475
Michael Chanb5d37722006-09-27 16:06:21 -0700476 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
477 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
478 return;
479
Michael Chan68929142005-08-09 20:17:14 -0700480 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700481 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
482 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
483 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484
Michael Chanbbadf502006-04-06 21:46:34 -0700485 /* Always leave this as zero. */
486 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
487 } else {
488 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
489 tw32_f(TG3PCI_MEM_WIN_DATA, val);
490
491 /* Always leave this as zero. */
492 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
493 }
Michael Chan68929142005-08-09 20:17:14 -0700494 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495}
496
497static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
498{
Michael Chan68929142005-08-09 20:17:14 -0700499 unsigned long flags;
500
Michael Chanb5d37722006-09-27 16:06:21 -0700501 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
502 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
503 *val = 0;
504 return;
505 }
506
Michael Chan68929142005-08-09 20:17:14 -0700507 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700508 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
509 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
510 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511
Michael Chanbbadf502006-04-06 21:46:34 -0700512 /* Always leave this as zero. */
513 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
514 } else {
515 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
516 *val = tr32(TG3PCI_MEM_WIN_DATA);
517
518 /* Always leave this as zero. */
519 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
520 }
Michael Chan68929142005-08-09 20:17:14 -0700521 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522}
523
Matt Carlson0d3031d2007-10-10 18:02:43 -0700524static void tg3_ape_lock_init(struct tg3 *tp)
525{
526 int i;
527
528 /* Make sure the driver hasn't any stale locks. */
529 for (i = 0; i < 8; i++)
530 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
531 APE_LOCK_GRANT_DRIVER);
532}
533
534static int tg3_ape_lock(struct tg3 *tp, int locknum)
535{
536 int i, off;
537 int ret = 0;
538 u32 status;
539
540 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
541 return 0;
542
543 switch (locknum) {
Matt Carlson77b483f2008-08-15 14:07:24 -0700544 case TG3_APE_LOCK_GRC:
Matt Carlson0d3031d2007-10-10 18:02:43 -0700545 case TG3_APE_LOCK_MEM:
546 break;
547 default:
548 return -EINVAL;
549 }
550
551 off = 4 * locknum;
552
553 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
554
555 /* Wait for up to 1 millisecond to acquire lock. */
556 for (i = 0; i < 100; i++) {
557 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
558 if (status == APE_LOCK_GRANT_DRIVER)
559 break;
560 udelay(10);
561 }
562
563 if (status != APE_LOCK_GRANT_DRIVER) {
564 /* Revoke the lock request. */
565 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
566 APE_LOCK_GRANT_DRIVER);
567
568 ret = -EBUSY;
569 }
570
571 return ret;
572}
573
574static void tg3_ape_unlock(struct tg3 *tp, int locknum)
575{
576 int off;
577
578 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
579 return;
580
581 switch (locknum) {
Matt Carlson77b483f2008-08-15 14:07:24 -0700582 case TG3_APE_LOCK_GRC:
Matt Carlson0d3031d2007-10-10 18:02:43 -0700583 case TG3_APE_LOCK_MEM:
584 break;
585 default:
586 return;
587 }
588
589 off = 4 * locknum;
590 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
591}
592
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593static void tg3_disable_ints(struct tg3 *tp)
594{
595 tw32(TG3PCI_MISC_HOST_CTRL,
596 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700597 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598}
599
600static inline void tg3_cond_int(struct tg3 *tp)
601{
Michael Chan38f38432005-09-05 17:53:32 -0700602 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
603 (tp->hw_status->status & SD_STATUS_UPDATED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
Michael Chanb5d37722006-09-27 16:06:21 -0700605 else
606 tw32(HOSTCC_MODE, tp->coalesce_mode |
607 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608}
609
610static void tg3_enable_ints(struct tg3 *tp)
611{
Michael Chanbbe832c2005-06-24 20:20:04 -0700612 tp->irq_sync = 0;
613 wmb();
614
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 tw32(TG3PCI_MISC_HOST_CTRL,
616 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700617 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
618 (tp->last_tag << 24));
Michael Chanfcfa0a32006-03-20 22:28:41 -0800619 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
620 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
621 (tp->last_tag << 24));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 tg3_cond_int(tp);
623}
624
Michael Chan04237dd2005-04-25 15:17:17 -0700625static inline unsigned int tg3_has_work(struct tg3 *tp)
626{
627 struct tg3_hw_status *sblk = tp->hw_status;
628 unsigned int work_exists = 0;
629
630 /* check for phy events */
631 if (!(tp->tg3_flags &
632 (TG3_FLAG_USE_LINKCHG_REG |
633 TG3_FLAG_POLL_SERDES))) {
634 if (sblk->status & SD_STATUS_LINK_CHG)
635 work_exists = 1;
636 }
637 /* check for RX/TX work to do */
638 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
639 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
640 work_exists = 1;
641
642 return work_exists;
643}
644
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645/* tg3_restart_ints
Michael Chan04237dd2005-04-25 15:17:17 -0700646 * similar to tg3_enable_ints, but it accurately determines whether there
647 * is new work pending and can return without flushing the PIO write
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400648 * which reenables interrupts
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 */
650static void tg3_restart_ints(struct tg3 *tp)
651{
David S. Millerfac9b832005-05-18 22:46:34 -0700652 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
653 tp->last_tag << 24);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 mmiowb();
655
David S. Millerfac9b832005-05-18 22:46:34 -0700656 /* When doing tagged status, this work check is unnecessary.
657 * The last_tag we write above tells the chip which piece of
658 * work we've completed.
659 */
660 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
661 tg3_has_work(tp))
Michael Chan04237dd2005-04-25 15:17:17 -0700662 tw32(HOSTCC_MODE, tp->coalesce_mode |
663 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664}
665
666static inline void tg3_netif_stop(struct tg3 *tp)
667{
Michael Chanbbe832c2005-06-24 20:20:04 -0700668 tp->dev->trans_start = jiffies; /* prevent tx timeout */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700669 napi_disable(&tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 netif_tx_disable(tp->dev);
671}
672
673static inline void tg3_netif_start(struct tg3 *tp)
674{
675 netif_wake_queue(tp->dev);
676 /* NOTE: unconditional netif_wake_queue is only appropriate
677 * so long as all callers are assured to have free tx slots
678 * (such as after tg3_init_hw)
679 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700680 napi_enable(&tp->napi);
David S. Millerf47c11e2005-06-24 20:18:35 -0700681 tp->hw_status->status |= SD_STATUS_UPDATED;
682 tg3_enable_ints(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683}
684
685static void tg3_switch_clocks(struct tg3 *tp)
686{
687 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
688 u32 orig_clock_ctrl;
689
Matt Carlson795d01c2007-10-07 23:28:17 -0700690 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
691 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan4cf78e42005-07-25 12:29:19 -0700692 return;
693
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 orig_clock_ctrl = clock_ctrl;
695 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
696 CLOCK_CTRL_CLKRUN_OENABLE |
697 0x1f);
698 tp->pci_clock_ctrl = clock_ctrl;
699
700 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
701 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800702 tw32_wait_f(TG3PCI_CLOCK_CTRL,
703 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 }
705 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800706 tw32_wait_f(TG3PCI_CLOCK_CTRL,
707 clock_ctrl |
708 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
709 40);
710 tw32_wait_f(TG3PCI_CLOCK_CTRL,
711 clock_ctrl | (CLOCK_CTRL_ALTCLK),
712 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 }
Michael Chanb401e9e2005-12-19 16:27:04 -0800714 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715}
716
717#define PHY_BUSY_LOOPS 5000
718
719static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
720{
721 u32 frame_val;
722 unsigned int loops;
723 int ret;
724
725 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
726 tw32_f(MAC_MI_MODE,
727 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
728 udelay(80);
729 }
730
731 *val = 0x0;
732
733 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
734 MI_COM_PHY_ADDR_MASK);
735 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
736 MI_COM_REG_ADDR_MASK);
737 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400738
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 tw32_f(MAC_MI_COM, frame_val);
740
741 loops = PHY_BUSY_LOOPS;
742 while (loops != 0) {
743 udelay(10);
744 frame_val = tr32(MAC_MI_COM);
745
746 if ((frame_val & MI_COM_BUSY) == 0) {
747 udelay(5);
748 frame_val = tr32(MAC_MI_COM);
749 break;
750 }
751 loops -= 1;
752 }
753
754 ret = -EBUSY;
755 if (loops != 0) {
756 *val = frame_val & MI_COM_DATA_MASK;
757 ret = 0;
758 }
759
760 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
761 tw32_f(MAC_MI_MODE, tp->mi_mode);
762 udelay(80);
763 }
764
765 return ret;
766}
767
768static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
769{
770 u32 frame_val;
771 unsigned int loops;
772 int ret;
773
Michael Chanb5d37722006-09-27 16:06:21 -0700774 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
775 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
776 return 0;
777
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
779 tw32_f(MAC_MI_MODE,
780 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
781 udelay(80);
782 }
783
784 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
785 MI_COM_PHY_ADDR_MASK);
786 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
787 MI_COM_REG_ADDR_MASK);
788 frame_val |= (val & MI_COM_DATA_MASK);
789 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400790
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 tw32_f(MAC_MI_COM, frame_val);
792
793 loops = PHY_BUSY_LOOPS;
794 while (loops != 0) {
795 udelay(10);
796 frame_val = tr32(MAC_MI_COM);
797 if ((frame_val & MI_COM_BUSY) == 0) {
798 udelay(5);
799 frame_val = tr32(MAC_MI_COM);
800 break;
801 }
802 loops -= 1;
803 }
804
805 ret = -EBUSY;
806 if (loops != 0)
807 ret = 0;
808
809 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
810 tw32_f(MAC_MI_MODE, tp->mi_mode);
811 udelay(80);
812 }
813
814 return ret;
815}
816
Matt Carlson95e28692008-05-25 23:44:14 -0700817static int tg3_bmcr_reset(struct tg3 *tp)
818{
819 u32 phy_control;
820 int limit, err;
821
822 /* OK, reset it, and poll the BMCR_RESET bit until it
823 * clears or we time out.
824 */
825 phy_control = BMCR_RESET;
826 err = tg3_writephy(tp, MII_BMCR, phy_control);
827 if (err != 0)
828 return -EBUSY;
829
830 limit = 5000;
831 while (limit--) {
832 err = tg3_readphy(tp, MII_BMCR, &phy_control);
833 if (err != 0)
834 return -EBUSY;
835
836 if ((phy_control & BMCR_RESET) == 0) {
837 udelay(40);
838 break;
839 }
840 udelay(10);
841 }
842 if (limit <= 0)
843 return -EBUSY;
844
845 return 0;
846}
847
Matt Carlson158d7ab2008-05-29 01:37:54 -0700848static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
849{
850 struct tg3 *tp = (struct tg3 *)bp->priv;
851 u32 val;
852
853 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
854 return -EAGAIN;
855
856 if (tg3_readphy(tp, reg, &val))
857 return -EIO;
858
859 return val;
860}
861
862static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
863{
864 struct tg3 *tp = (struct tg3 *)bp->priv;
865
866 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
867 return -EAGAIN;
868
869 if (tg3_writephy(tp, reg, val))
870 return -EIO;
871
872 return 0;
873}
874
875static int tg3_mdio_reset(struct mii_bus *bp)
876{
877 return 0;
878}
879
Matt Carlson9c61d6b2008-11-03 16:54:56 -0800880static void tg3_mdio_config_5785(struct tg3 *tp)
Matt Carlsona9daf362008-05-25 23:49:44 -0700881{
882 u32 val;
Matt Carlsonfcb389d2008-11-03 16:55:44 -0800883 struct phy_device *phydev;
Matt Carlsona9daf362008-05-25 23:49:44 -0700884
Matt Carlsonfcb389d2008-11-03 16:55:44 -0800885 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
886 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
887 case TG3_PHY_ID_BCM50610:
888 val = MAC_PHYCFG2_50610_LED_MODES;
889 break;
890 case TG3_PHY_ID_BCMAC131:
891 val = MAC_PHYCFG2_AC131_LED_MODES;
892 break;
893 case TG3_PHY_ID_RTL8211C:
894 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
895 break;
896 case TG3_PHY_ID_RTL8201E:
897 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
898 break;
899 default:
Matt Carlsona9daf362008-05-25 23:49:44 -0700900 return;
Matt Carlsonfcb389d2008-11-03 16:55:44 -0800901 }
902
903 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
904 tw32(MAC_PHYCFG2, val);
905
906 val = tr32(MAC_PHYCFG1);
907 val &= ~MAC_PHYCFG1_RGMII_INT;
908 tw32(MAC_PHYCFG1, val);
909
910 return;
911 }
912
913 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
914 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
915 MAC_PHYCFG2_FMODE_MASK_MASK |
916 MAC_PHYCFG2_GMODE_MASK_MASK |
917 MAC_PHYCFG2_ACT_MASK_MASK |
918 MAC_PHYCFG2_QUAL_MASK_MASK |
919 MAC_PHYCFG2_INBAND_ENABLE;
920
921 tw32(MAC_PHYCFG2, val);
Matt Carlsona9daf362008-05-25 23:49:44 -0700922
923 val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
924 MAC_PHYCFG1_RGMII_SND_STAT_EN);
925 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
926 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
927 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
928 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
929 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
930 }
931 tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
932
Matt Carlsona9daf362008-05-25 23:49:44 -0700933 val = tr32(MAC_EXT_RGMII_MODE);
934 val &= ~(MAC_RGMII_MODE_RX_INT_B |
935 MAC_RGMII_MODE_RX_QUALITY |
936 MAC_RGMII_MODE_RX_ACTIVITY |
937 MAC_RGMII_MODE_RX_ENG_DET |
938 MAC_RGMII_MODE_TX_ENABLE |
939 MAC_RGMII_MODE_TX_LOWPWR |
940 MAC_RGMII_MODE_TX_RESET);
Matt Carlsonfcb389d2008-11-03 16:55:44 -0800941 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
Matt Carlsona9daf362008-05-25 23:49:44 -0700942 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
943 val |= MAC_RGMII_MODE_RX_INT_B |
944 MAC_RGMII_MODE_RX_QUALITY |
945 MAC_RGMII_MODE_RX_ACTIVITY |
946 MAC_RGMII_MODE_RX_ENG_DET;
947 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
948 val |= MAC_RGMII_MODE_TX_ENABLE |
949 MAC_RGMII_MODE_TX_LOWPWR |
950 MAC_RGMII_MODE_TX_RESET;
951 }
952 tw32(MAC_EXT_RGMII_MODE, val);
953}
954
Matt Carlson158d7ab2008-05-29 01:37:54 -0700955static void tg3_mdio_start(struct tg3 *tp)
956{
957 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700958 mutex_lock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700959 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700960 mutex_unlock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700961 }
962
963 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
964 tw32_f(MAC_MI_MODE, tp->mi_mode);
965 udelay(80);
Matt Carlsona9daf362008-05-25 23:49:44 -0700966
Matt Carlson9c61d6b2008-11-03 16:54:56 -0800967 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
968 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
969 tg3_mdio_config_5785(tp);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700970}
971
972static void tg3_mdio_stop(struct tg3 *tp)
973{
974 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700975 mutex_lock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700976 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700977 mutex_unlock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700978 }
979}
980
981static int tg3_mdio_init(struct tg3 *tp)
982{
983 int i;
984 u32 reg;
Matt Carlsona9daf362008-05-25 23:49:44 -0700985 struct phy_device *phydev;
Matt Carlson158d7ab2008-05-29 01:37:54 -0700986
987 tg3_mdio_start(tp);
988
989 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
990 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
991 return 0;
992
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700993 tp->mdio_bus = mdiobus_alloc();
994 if (tp->mdio_bus == NULL)
995 return -ENOMEM;
Matt Carlson158d7ab2008-05-29 01:37:54 -0700996
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700997 tp->mdio_bus->name = "tg3 mdio bus";
998 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
Matt Carlson158d7ab2008-05-29 01:37:54 -0700999 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001000 tp->mdio_bus->priv = tp;
1001 tp->mdio_bus->parent = &tp->pdev->dev;
1002 tp->mdio_bus->read = &tg3_mdio_read;
1003 tp->mdio_bus->write = &tg3_mdio_write;
1004 tp->mdio_bus->reset = &tg3_mdio_reset;
1005 tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
1006 tp->mdio_bus->irq = &tp->mdio_irq[0];
Matt Carlson158d7ab2008-05-29 01:37:54 -07001007
1008 for (i = 0; i < PHY_MAX_ADDR; i++)
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001009 tp->mdio_bus->irq[i] = PHY_POLL;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001010
1011 /* The bus registration will look for all the PHYs on the mdio bus.
1012 * Unfortunately, it does not ensure the PHY is powered up before
1013 * accessing the PHY ID registers. A chip reset is the
1014 * quickest way to bring the device back to an operational state..
1015 */
1016 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1017 tg3_bmcr_reset(tp);
1018
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001019 i = mdiobus_register(tp->mdio_bus);
Matt Carlsona9daf362008-05-25 23:49:44 -07001020 if (i) {
Matt Carlson158d7ab2008-05-29 01:37:54 -07001021 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1022 tp->dev->name, i);
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001023 mdiobus_free(tp->mdio_bus);
Matt Carlsona9daf362008-05-25 23:49:44 -07001024 return i;
1025 }
Matt Carlson158d7ab2008-05-29 01:37:54 -07001026
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001027 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsona9daf362008-05-25 23:49:44 -07001028
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001029 if (!phydev || !phydev->drv) {
1030 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1031 mdiobus_unregister(tp->mdio_bus);
1032 mdiobus_free(tp->mdio_bus);
1033 return -ENODEV;
1034 }
1035
1036 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
Matt Carlsona9daf362008-05-25 23:49:44 -07001037 case TG3_PHY_ID_BCM50610:
Matt Carlsona9daf362008-05-25 23:49:44 -07001038 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1039 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1040 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1041 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1042 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1043 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001044 /* fallthru */
1045 case TG3_PHY_ID_RTL8211C:
1046 phydev->interface = PHY_INTERFACE_MODE_RGMII;
Matt Carlsona9daf362008-05-25 23:49:44 -07001047 break;
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001048 case TG3_PHY_ID_RTL8201E:
Matt Carlsona9daf362008-05-25 23:49:44 -07001049 case TG3_PHY_ID_BCMAC131:
1050 phydev->interface = PHY_INTERFACE_MODE_MII;
1051 break;
1052 }
1053
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001054 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1055
1056 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1057 tg3_mdio_config_5785(tp);
Matt Carlsona9daf362008-05-25 23:49:44 -07001058
1059 return 0;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001060}
1061
1062static void tg3_mdio_fini(struct tg3 *tp)
1063{
1064 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1065 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001066 mdiobus_unregister(tp->mdio_bus);
1067 mdiobus_free(tp->mdio_bus);
Matt Carlson158d7ab2008-05-29 01:37:54 -07001068 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1069 }
1070}
1071
Matt Carlson95e28692008-05-25 23:44:14 -07001072/* tp->lock is held. */
Matt Carlson4ba526c2008-08-15 14:10:04 -07001073static inline void tg3_generate_fw_event(struct tg3 *tp)
1074{
1075 u32 val;
1076
1077 val = tr32(GRC_RX_CPU_EVENT);
1078 val |= GRC_RX_CPU_DRIVER_EVENT;
1079 tw32_f(GRC_RX_CPU_EVENT, val);
1080
1081 tp->last_event_jiffies = jiffies;
1082}
1083
1084#define TG3_FW_EVENT_TIMEOUT_USEC 2500
1085
1086/* tp->lock is held. */
Matt Carlson95e28692008-05-25 23:44:14 -07001087static void tg3_wait_for_event_ack(struct tg3 *tp)
1088{
1089 int i;
Matt Carlson4ba526c2008-08-15 14:10:04 -07001090 unsigned int delay_cnt;
1091 long time_remain;
Matt Carlson95e28692008-05-25 23:44:14 -07001092
Matt Carlson4ba526c2008-08-15 14:10:04 -07001093 /* If enough time has passed, no wait is necessary. */
1094 time_remain = (long)(tp->last_event_jiffies + 1 +
1095 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1096 (long)jiffies;
1097 if (time_remain < 0)
1098 return;
1099
1100 /* Check if we can shorten the wait time. */
1101 delay_cnt = jiffies_to_usecs(time_remain);
1102 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1103 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1104 delay_cnt = (delay_cnt >> 3) + 1;
1105
1106 for (i = 0; i < delay_cnt; i++) {
Matt Carlson95e28692008-05-25 23:44:14 -07001107 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1108 break;
Matt Carlson4ba526c2008-08-15 14:10:04 -07001109 udelay(8);
Matt Carlson95e28692008-05-25 23:44:14 -07001110 }
1111}
1112
1113/* tp->lock is held. */
1114static void tg3_ump_link_report(struct tg3 *tp)
1115{
1116 u32 reg;
1117 u32 val;
1118
1119 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1120 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1121 return;
1122
1123 tg3_wait_for_event_ack(tp);
1124
1125 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1126
1127 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1128
1129 val = 0;
1130 if (!tg3_readphy(tp, MII_BMCR, &reg))
1131 val = reg << 16;
1132 if (!tg3_readphy(tp, MII_BMSR, &reg))
1133 val |= (reg & 0xffff);
1134 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1135
1136 val = 0;
1137 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1138 val = reg << 16;
1139 if (!tg3_readphy(tp, MII_LPA, &reg))
1140 val |= (reg & 0xffff);
1141 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1142
1143 val = 0;
1144 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1145 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1146 val = reg << 16;
1147 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1148 val |= (reg & 0xffff);
1149 }
1150 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1151
1152 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1153 val = reg << 16;
1154 else
1155 val = 0;
1156 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1157
Matt Carlson4ba526c2008-08-15 14:10:04 -07001158 tg3_generate_fw_event(tp);
Matt Carlson95e28692008-05-25 23:44:14 -07001159}
1160
1161static void tg3_link_report(struct tg3 *tp)
1162{
1163 if (!netif_carrier_ok(tp->dev)) {
1164 if (netif_msg_link(tp))
1165 printk(KERN_INFO PFX "%s: Link is down.\n",
1166 tp->dev->name);
1167 tg3_ump_link_report(tp);
1168 } else if (netif_msg_link(tp)) {
1169 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1170 tp->dev->name,
1171 (tp->link_config.active_speed == SPEED_1000 ?
1172 1000 :
1173 (tp->link_config.active_speed == SPEED_100 ?
1174 100 : 10)),
1175 (tp->link_config.active_duplex == DUPLEX_FULL ?
1176 "full" : "half"));
1177
1178 printk(KERN_INFO PFX
1179 "%s: Flow control is %s for TX and %s for RX.\n",
1180 tp->dev->name,
1181 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1182 "on" : "off",
1183 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1184 "on" : "off");
1185 tg3_ump_link_report(tp);
1186 }
1187}
1188
1189static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1190{
1191 u16 miireg;
1192
1193 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1194 miireg = ADVERTISE_PAUSE_CAP;
1195 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1196 miireg = ADVERTISE_PAUSE_ASYM;
1197 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1198 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1199 else
1200 miireg = 0;
1201
1202 return miireg;
1203}
1204
1205static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1206{
1207 u16 miireg;
1208
1209 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1210 miireg = ADVERTISE_1000XPAUSE;
1211 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1212 miireg = ADVERTISE_1000XPSE_ASYM;
1213 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1214 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1215 else
1216 miireg = 0;
1217
1218 return miireg;
1219}
1220
1221static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1222{
1223 u8 cap = 0;
1224
1225 if (lcladv & ADVERTISE_PAUSE_CAP) {
1226 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1227 if (rmtadv & LPA_PAUSE_CAP)
1228 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1229 else if (rmtadv & LPA_PAUSE_ASYM)
1230 cap = TG3_FLOW_CTRL_RX;
1231 } else {
1232 if (rmtadv & LPA_PAUSE_CAP)
1233 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1234 }
1235 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1236 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1237 cap = TG3_FLOW_CTRL_TX;
1238 }
1239
1240 return cap;
1241}
1242
1243static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1244{
1245 u8 cap = 0;
1246
1247 if (lcladv & ADVERTISE_1000XPAUSE) {
1248 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1249 if (rmtadv & LPA_1000XPAUSE)
1250 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1251 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1252 cap = TG3_FLOW_CTRL_RX;
1253 } else {
1254 if (rmtadv & LPA_1000XPAUSE)
1255 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1256 }
1257 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1258 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1259 cap = TG3_FLOW_CTRL_TX;
1260 }
1261
1262 return cap;
1263}
1264
Matt Carlsonf51f3562008-05-25 23:45:08 -07001265static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
Matt Carlson95e28692008-05-25 23:44:14 -07001266{
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001267 u8 autoneg;
Matt Carlsonf51f3562008-05-25 23:45:08 -07001268 u8 flowctrl = 0;
Matt Carlson95e28692008-05-25 23:44:14 -07001269 u32 old_rx_mode = tp->rx_mode;
1270 u32 old_tx_mode = tp->tx_mode;
1271
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001272 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001273 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001274 else
1275 autoneg = tp->link_config.autoneg;
1276
1277 if (autoneg == AUTONEG_ENABLE &&
Matt Carlson95e28692008-05-25 23:44:14 -07001278 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1279 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Matt Carlsonf51f3562008-05-25 23:45:08 -07001280 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
Matt Carlson95e28692008-05-25 23:44:14 -07001281 else
Matt Carlsonf51f3562008-05-25 23:45:08 -07001282 flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1283 } else
1284 flowctrl = tp->link_config.flowctrl;
Matt Carlson95e28692008-05-25 23:44:14 -07001285
Matt Carlsonf51f3562008-05-25 23:45:08 -07001286 tp->link_config.active_flowctrl = flowctrl;
Matt Carlson95e28692008-05-25 23:44:14 -07001287
Matt Carlsonf51f3562008-05-25 23:45:08 -07001288 if (flowctrl & TG3_FLOW_CTRL_RX)
Matt Carlson95e28692008-05-25 23:44:14 -07001289 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1290 else
1291 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1292
Matt Carlsonf51f3562008-05-25 23:45:08 -07001293 if (old_rx_mode != tp->rx_mode)
Matt Carlson95e28692008-05-25 23:44:14 -07001294 tw32_f(MAC_RX_MODE, tp->rx_mode);
Matt Carlson95e28692008-05-25 23:44:14 -07001295
Matt Carlsonf51f3562008-05-25 23:45:08 -07001296 if (flowctrl & TG3_FLOW_CTRL_TX)
Matt Carlson95e28692008-05-25 23:44:14 -07001297 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1298 else
1299 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1300
Matt Carlsonf51f3562008-05-25 23:45:08 -07001301 if (old_tx_mode != tp->tx_mode)
Matt Carlson95e28692008-05-25 23:44:14 -07001302 tw32_f(MAC_TX_MODE, tp->tx_mode);
Matt Carlson95e28692008-05-25 23:44:14 -07001303}
1304
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001305static void tg3_adjust_link(struct net_device *dev)
1306{
1307 u8 oldflowctrl, linkmesg = 0;
1308 u32 mac_mode, lcl_adv, rmt_adv;
1309 struct tg3 *tp = netdev_priv(dev);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001310 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001311
1312 spin_lock(&tp->lock);
1313
1314 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1315 MAC_MODE_HALF_DUPLEX);
1316
1317 oldflowctrl = tp->link_config.active_flowctrl;
1318
1319 if (phydev->link) {
1320 lcl_adv = 0;
1321 rmt_adv = 0;
1322
1323 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1324 mac_mode |= MAC_MODE_PORT_MODE_MII;
1325 else
1326 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1327
1328 if (phydev->duplex == DUPLEX_HALF)
1329 mac_mode |= MAC_MODE_HALF_DUPLEX;
1330 else {
1331 lcl_adv = tg3_advert_flowctrl_1000T(
1332 tp->link_config.flowctrl);
1333
1334 if (phydev->pause)
1335 rmt_adv = LPA_PAUSE_CAP;
1336 if (phydev->asym_pause)
1337 rmt_adv |= LPA_PAUSE_ASYM;
1338 }
1339
1340 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1341 } else
1342 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1343
1344 if (mac_mode != tp->mac_mode) {
1345 tp->mac_mode = mac_mode;
1346 tw32_f(MAC_MODE, tp->mac_mode);
1347 udelay(40);
1348 }
1349
Matt Carlsonfcb389d2008-11-03 16:55:44 -08001350 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1351 if (phydev->speed == SPEED_10)
1352 tw32(MAC_MI_STAT,
1353 MAC_MI_STAT_10MBPS_MODE |
1354 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1355 else
1356 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1357 }
1358
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001359 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1360 tw32(MAC_TX_LENGTHS,
1361 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1362 (6 << TX_LENGTHS_IPG_SHIFT) |
1363 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1364 else
1365 tw32(MAC_TX_LENGTHS,
1366 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1367 (6 << TX_LENGTHS_IPG_SHIFT) |
1368 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1369
1370 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1371 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1372 phydev->speed != tp->link_config.active_speed ||
1373 phydev->duplex != tp->link_config.active_duplex ||
1374 oldflowctrl != tp->link_config.active_flowctrl)
1375 linkmesg = 1;
1376
1377 tp->link_config.active_speed = phydev->speed;
1378 tp->link_config.active_duplex = phydev->duplex;
1379
1380 spin_unlock(&tp->lock);
1381
1382 if (linkmesg)
1383 tg3_link_report(tp);
1384}
1385
1386static int tg3_phy_init(struct tg3 *tp)
1387{
1388 struct phy_device *phydev;
1389
1390 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1391 return 0;
1392
1393 /* Bring the PHY back to a known state. */
1394 tg3_bmcr_reset(tp);
1395
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001396 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001397
1398 /* Attach the MAC to the PHY. */
Kay Sieversfb28ad32008-11-10 13:55:14 -08001399 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
Matt Carlsona9daf362008-05-25 23:49:44 -07001400 phydev->dev_flags, phydev->interface);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001401 if (IS_ERR(phydev)) {
1402 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1403 return PTR_ERR(phydev);
1404 }
1405
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001406 /* Mask with MAC supported features. */
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001407 switch (phydev->interface) {
1408 case PHY_INTERFACE_MODE_GMII:
1409 case PHY_INTERFACE_MODE_RGMII:
1410 phydev->supported &= (PHY_GBIT_FEATURES |
1411 SUPPORTED_Pause |
1412 SUPPORTED_Asym_Pause);
1413 break;
1414 case PHY_INTERFACE_MODE_MII:
1415 phydev->supported &= (PHY_BASIC_FEATURES |
1416 SUPPORTED_Pause |
1417 SUPPORTED_Asym_Pause);
1418 break;
1419 default:
1420 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1421 return -EINVAL;
1422 }
1423
1424 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001425
1426 phydev->advertising = phydev->supported;
1427
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001428 return 0;
1429}
1430
1431static void tg3_phy_start(struct tg3 *tp)
1432{
1433 struct phy_device *phydev;
1434
1435 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1436 return;
1437
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001438 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001439
1440 if (tp->link_config.phy_is_low_power) {
1441 tp->link_config.phy_is_low_power = 0;
1442 phydev->speed = tp->link_config.orig_speed;
1443 phydev->duplex = tp->link_config.orig_duplex;
1444 phydev->autoneg = tp->link_config.orig_autoneg;
1445 phydev->advertising = tp->link_config.orig_advertising;
1446 }
1447
1448 phy_start(phydev);
1449
1450 phy_start_aneg(phydev);
1451}
1452
1453static void tg3_phy_stop(struct tg3 *tp)
1454{
1455 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1456 return;
1457
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001458 phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001459}
1460
1461static void tg3_phy_fini(struct tg3 *tp)
1462{
1463 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001464 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001465 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1466 }
1467}
1468
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001469static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1470{
1471 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1472 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1473}
1474
Matt Carlson9ef8ca92007-07-11 19:48:29 -07001475static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1476{
1477 u32 phy;
1478
1479 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1480 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1481 return;
1482
1483 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1484 u32 ephy;
1485
1486 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1487 tg3_writephy(tp, MII_TG3_EPHY_TEST,
1488 ephy | MII_TG3_EPHY_SHADOW_EN);
1489 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1490 if (enable)
1491 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1492 else
1493 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1494 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1495 }
1496 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1497 }
1498 } else {
1499 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1500 MII_TG3_AUXCTL_SHDWSEL_MISC;
1501 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1502 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1503 if (enable)
1504 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1505 else
1506 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1507 phy |= MII_TG3_AUXCTL_MISC_WREN;
1508 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1509 }
1510 }
1511}
1512
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513static void tg3_phy_set_wirespeed(struct tg3 *tp)
1514{
1515 u32 val;
1516
1517 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1518 return;
1519
1520 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1521 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1522 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1523 (val | (1 << 15) | (1 << 4)));
1524}
1525
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001526static void tg3_phy_apply_otp(struct tg3 *tp)
1527{
1528 u32 otp, phy;
1529
1530 if (!tp->phy_otp)
1531 return;
1532
1533 otp = tp->phy_otp;
1534
1535 /* Enable SM_DSP clock and tx 6dB coding. */
1536 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1537 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1538 MII_TG3_AUXCTL_ACTL_TX_6DB;
1539 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1540
1541 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1542 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1543 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1544
1545 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1546 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1547 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1548
1549 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1550 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1551 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1552
1553 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1554 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1555
1556 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1557 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1558
1559 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1560 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1561 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1562
1563 /* Turn off SM_DSP clock. */
1564 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1565 MII_TG3_AUXCTL_ACTL_TX_6DB;
1566 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1567}
1568
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569static int tg3_wait_macro_done(struct tg3 *tp)
1570{
1571 int limit = 100;
1572
1573 while (limit--) {
1574 u32 tmp32;
1575
1576 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1577 if ((tmp32 & 0x1000) == 0)
1578 break;
1579 }
1580 }
1581 if (limit <= 0)
1582 return -EBUSY;
1583
1584 return 0;
1585}
1586
1587static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1588{
1589 static const u32 test_pat[4][6] = {
1590 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1591 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1592 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1593 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1594 };
1595 int chan;
1596
1597 for (chan = 0; chan < 4; chan++) {
1598 int i;
1599
1600 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1601 (chan * 0x2000) | 0x0200);
1602 tg3_writephy(tp, 0x16, 0x0002);
1603
1604 for (i = 0; i < 6; i++)
1605 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1606 test_pat[chan][i]);
1607
1608 tg3_writephy(tp, 0x16, 0x0202);
1609 if (tg3_wait_macro_done(tp)) {
1610 *resetp = 1;
1611 return -EBUSY;
1612 }
1613
1614 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1615 (chan * 0x2000) | 0x0200);
1616 tg3_writephy(tp, 0x16, 0x0082);
1617 if (tg3_wait_macro_done(tp)) {
1618 *resetp = 1;
1619 return -EBUSY;
1620 }
1621
1622 tg3_writephy(tp, 0x16, 0x0802);
1623 if (tg3_wait_macro_done(tp)) {
1624 *resetp = 1;
1625 return -EBUSY;
1626 }
1627
1628 for (i = 0; i < 6; i += 2) {
1629 u32 low, high;
1630
1631 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1632 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1633 tg3_wait_macro_done(tp)) {
1634 *resetp = 1;
1635 return -EBUSY;
1636 }
1637 low &= 0x7fff;
1638 high &= 0x000f;
1639 if (low != test_pat[chan][i] ||
1640 high != test_pat[chan][i+1]) {
1641 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1642 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1643 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1644
1645 return -EBUSY;
1646 }
1647 }
1648 }
1649
1650 return 0;
1651}
1652
1653static int tg3_phy_reset_chanpat(struct tg3 *tp)
1654{
1655 int chan;
1656
1657 for (chan = 0; chan < 4; chan++) {
1658 int i;
1659
1660 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1661 (chan * 0x2000) | 0x0200);
1662 tg3_writephy(tp, 0x16, 0x0002);
1663 for (i = 0; i < 6; i++)
1664 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1665 tg3_writephy(tp, 0x16, 0x0202);
1666 if (tg3_wait_macro_done(tp))
1667 return -EBUSY;
1668 }
1669
1670 return 0;
1671}
1672
1673static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1674{
1675 u32 reg32, phy9_orig;
1676 int retries, do_phy_reset, err;
1677
1678 retries = 10;
1679 do_phy_reset = 1;
1680 do {
1681 if (do_phy_reset) {
1682 err = tg3_bmcr_reset(tp);
1683 if (err)
1684 return err;
1685 do_phy_reset = 0;
1686 }
1687
1688 /* Disable transmitter and interrupt. */
1689 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1690 continue;
1691
1692 reg32 |= 0x3000;
1693 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1694
1695 /* Set full-duplex, 1000 mbps. */
1696 tg3_writephy(tp, MII_BMCR,
1697 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1698
1699 /* Set to master mode. */
1700 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1701 continue;
1702
1703 tg3_writephy(tp, MII_TG3_CTRL,
1704 (MII_TG3_CTRL_AS_MASTER |
1705 MII_TG3_CTRL_ENABLE_AS_MASTER));
1706
1707 /* Enable SM_DSP_CLOCK and 6dB. */
1708 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1709
1710 /* Block the PHY control access. */
1711 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1712 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1713
1714 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1715 if (!err)
1716 break;
1717 } while (--retries);
1718
1719 err = tg3_phy_reset_chanpat(tp);
1720 if (err)
1721 return err;
1722
1723 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1724 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1725
1726 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1727 tg3_writephy(tp, 0x16, 0x0000);
1728
1729 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1730 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1731 /* Set Extended packet length bit for jumbo frames */
1732 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1733 }
1734 else {
1735 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1736 }
1737
1738 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1739
1740 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1741 reg32 &= ~0x3000;
1742 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1743 } else if (!err)
1744 err = -EBUSY;
1745
1746 return err;
1747}
1748
1749/* This will reset the tigon3 PHY if there is no valid
1750 * link unless the FORCE argument is non-zero.
1751 */
1752static int tg3_phy_reset(struct tg3 *tp)
1753{
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001754 u32 cpmuctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 u32 phy_status;
1756 int err;
1757
Michael Chan60189dd2006-12-17 17:08:07 -08001758 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1759 u32 val;
1760
1761 val = tr32(GRC_MISC_CFG);
1762 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1763 udelay(40);
1764 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1766 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1767 if (err != 0)
1768 return -EBUSY;
1769
Michael Chanc8e1e822006-04-29 18:55:17 -07001770 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1771 netif_carrier_off(tp->dev);
1772 tg3_link_report(tp);
1773 }
1774
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1776 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1777 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1778 err = tg3_phy_reset_5703_4_5(tp);
1779 if (err)
1780 return err;
1781 goto out;
1782 }
1783
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001784 cpmuctrl = 0;
1785 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1786 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1787 cpmuctrl = tr32(TG3_CPMU_CTRL);
1788 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1789 tw32(TG3_CPMU_CTRL,
1790 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1791 }
1792
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 err = tg3_bmcr_reset(tp);
1794 if (err)
1795 return err;
1796
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001797 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1798 u32 phy;
1799
1800 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1801 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1802
1803 tw32(TG3_CPMU_CTRL, cpmuctrl);
1804 }
1805
Matt Carlsonbcb37f62008-11-03 16:52:09 -08001806 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1807 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
Matt Carlsonce057f02007-11-12 21:08:03 -08001808 u32 val;
1809
1810 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1811 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1812 CPMU_LSPD_1000MB_MACCLK_12_5) {
1813 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1814 udelay(40);
1815 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1816 }
Matt Carlson662f38d2007-11-12 21:16:17 -08001817
1818 /* Disable GPHY autopowerdown. */
1819 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1820 MII_TG3_MISC_SHDW_WREN |
1821 MII_TG3_MISC_SHDW_APD_SEL |
1822 MII_TG3_MISC_SHDW_APD_WKTM_84MS);
Matt Carlsonce057f02007-11-12 21:08:03 -08001823 }
1824
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001825 tg3_phy_apply_otp(tp);
1826
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827out:
1828 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1829 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1830 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1831 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1832 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1833 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1834 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1835 }
1836 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1837 tg3_writephy(tp, 0x1c, 0x8d68);
1838 tg3_writephy(tp, 0x1c, 0x8d68);
1839 }
1840 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1841 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1842 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1843 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1844 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1845 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1846 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1847 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1848 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1849 }
Michael Chanc424cb22006-04-29 18:56:34 -07001850 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1851 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1852 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
Michael Chanc1d2a192007-01-08 19:57:20 -08001853 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1854 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1855 tg3_writephy(tp, MII_TG3_TEST1,
1856 MII_TG3_TEST1_TRIM_EN | 0x4);
1857 } else
1858 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
Michael Chanc424cb22006-04-29 18:56:34 -07001859 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1860 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 /* Set Extended packet length bit (bit 14) on all chips that */
1862 /* support jumbo frames */
1863 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1864 /* Cannot do read-modify-write on 5401 */
1865 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
Michael Chan0f893dc2005-07-25 12:30:38 -07001866 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867 u32 phy_reg;
1868
1869 /* Set bit 14 with read-modify-write to preserve other bits */
1870 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1871 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1872 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1873 }
1874
1875 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1876 * jumbo frames transmission.
1877 */
Michael Chan0f893dc2005-07-25 12:30:38 -07001878 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879 u32 phy_reg;
1880
1881 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1882 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1883 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1884 }
1885
Michael Chan715116a2006-09-27 16:09:25 -07001886 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan715116a2006-09-27 16:09:25 -07001887 /* adjust output voltage */
1888 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
Michael Chan715116a2006-09-27 16:09:25 -07001889 }
1890
Matt Carlson9ef8ca92007-07-11 19:48:29 -07001891 tg3_phy_toggle_automdix(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892 tg3_phy_set_wirespeed(tp);
1893 return 0;
1894}
1895
1896static void tg3_frob_aux_power(struct tg3 *tp)
1897{
1898 struct tg3 *tp_peer = tp;
1899
Michael Chan9d26e212006-12-07 00:21:14 -08001900 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 return;
1902
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001903 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1904 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1905 struct net_device *dev_peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001907 dev_peer = pci_get_drvdata(tp->pdev_peer);
Michael Chanbc1c7562006-03-20 17:48:03 -08001908 /* remove_one() may have been run on the peer. */
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001909 if (!dev_peer)
Michael Chanbc1c7562006-03-20 17:48:03 -08001910 tp_peer = tp;
1911 else
1912 tp_peer = netdev_priv(dev_peer);
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001913 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914
1915 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
Michael Chan6921d202005-12-13 21:15:53 -08001916 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1917 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1918 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1920 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
Michael Chanb401e9e2005-12-19 16:27:04 -08001921 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1922 (GRC_LCLCTRL_GPIO_OE0 |
1923 GRC_LCLCTRL_GPIO_OE1 |
1924 GRC_LCLCTRL_GPIO_OE2 |
1925 GRC_LCLCTRL_GPIO_OUTPUT0 |
1926 GRC_LCLCTRL_GPIO_OUTPUT1),
1927 100);
Matt Carlson5f0c4a32008-06-09 15:41:12 -07001928 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1929 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1930 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1931 GRC_LCLCTRL_GPIO_OE1 |
1932 GRC_LCLCTRL_GPIO_OE2 |
1933 GRC_LCLCTRL_GPIO_OUTPUT0 |
1934 GRC_LCLCTRL_GPIO_OUTPUT1 |
1935 tp->grc_local_ctrl;
1936 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1937
1938 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1939 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1940
1941 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1942 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943 } else {
1944 u32 no_gpio2;
Michael Chandc56b7d2005-12-19 16:26:28 -08001945 u32 grc_local_ctrl = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946
1947 if (tp_peer != tp &&
1948 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1949 return;
1950
Michael Chandc56b7d2005-12-19 16:26:28 -08001951 /* Workaround to prevent overdrawing Amps. */
1952 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1953 ASIC_REV_5714) {
1954 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chanb401e9e2005-12-19 16:27:04 -08001955 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1956 grc_local_ctrl, 100);
Michael Chandc56b7d2005-12-19 16:26:28 -08001957 }
1958
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 /* On 5753 and variants, GPIO2 cannot be used. */
1960 no_gpio2 = tp->nic_sram_data_cfg &
1961 NIC_SRAM_DATA_CFG_NO_GPIO2;
1962
Michael Chandc56b7d2005-12-19 16:26:28 -08001963 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964 GRC_LCLCTRL_GPIO_OE1 |
1965 GRC_LCLCTRL_GPIO_OE2 |
1966 GRC_LCLCTRL_GPIO_OUTPUT1 |
1967 GRC_LCLCTRL_GPIO_OUTPUT2;
1968 if (no_gpio2) {
1969 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1970 GRC_LCLCTRL_GPIO_OUTPUT2);
1971 }
Michael Chanb401e9e2005-12-19 16:27:04 -08001972 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1973 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974
1975 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1976
Michael Chanb401e9e2005-12-19 16:27:04 -08001977 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1978 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979
1980 if (!no_gpio2) {
1981 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chanb401e9e2005-12-19 16:27:04 -08001982 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1983 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984 }
1985 }
1986 } else {
1987 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1988 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1989 if (tp_peer != tp &&
1990 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1991 return;
1992
Michael Chanb401e9e2005-12-19 16:27:04 -08001993 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1994 (GRC_LCLCTRL_GPIO_OE1 |
1995 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996
Michael Chanb401e9e2005-12-19 16:27:04 -08001997 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1998 GRC_LCLCTRL_GPIO_OE1, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999
Michael Chanb401e9e2005-12-19 16:27:04 -08002000 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2001 (GRC_LCLCTRL_GPIO_OE1 |
2002 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003 }
2004 }
2005}
2006
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002007static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2008{
2009 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2010 return 1;
2011 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2012 if (speed != SPEED_10)
2013 return 1;
2014 } else if (speed == SPEED_10)
2015 return 1;
2016
2017 return 0;
2018}
2019
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020static int tg3_setup_phy(struct tg3 *, int);
2021
2022#define RESET_KIND_SHUTDOWN 0
2023#define RESET_KIND_INIT 1
2024#define RESET_KIND_SUSPEND 2
2025
2026static void tg3_write_sig_post_reset(struct tg3 *, int);
2027static int tg3_halt_cpu(struct tg3 *, u32);
Michael Chan6921d202005-12-13 21:15:53 -08002028static int tg3_nvram_lock(struct tg3 *);
2029static void tg3_nvram_unlock(struct tg3 *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030
Matt Carlson0a459aa2008-11-03 16:54:15 -08002031static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
Michael Chan15c3b692006-03-22 01:06:52 -08002032{
Matt Carlsonce057f02007-11-12 21:08:03 -08002033 u32 val;
2034
Michael Chan51297242007-02-13 12:17:57 -08002035 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2036 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2037 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2038 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2039
2040 sg_dig_ctrl |=
2041 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2042 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2043 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2044 }
Michael Chan3f7045c2006-09-27 16:02:29 -07002045 return;
Michael Chan51297242007-02-13 12:17:57 -08002046 }
Michael Chan3f7045c2006-09-27 16:02:29 -07002047
Michael Chan60189dd2006-12-17 17:08:07 -08002048 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan60189dd2006-12-17 17:08:07 -08002049 tg3_bmcr_reset(tp);
2050 val = tr32(GRC_MISC_CFG);
2051 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2052 udelay(40);
2053 return;
Matt Carlson0a459aa2008-11-03 16:54:15 -08002054 } else if (do_low_power) {
Michael Chan715116a2006-09-27 16:09:25 -07002055 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2056 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
Matt Carlson0a459aa2008-11-03 16:54:15 -08002057
2058 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2059 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2060 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2061 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2062 MII_TG3_AUXCTL_PCTL_VREG_11V);
Michael Chan715116a2006-09-27 16:09:25 -07002063 }
Michael Chan3f7045c2006-09-27 16:02:29 -07002064
Michael Chan15c3b692006-03-22 01:06:52 -08002065 /* The PHY should not be powered down on some chips because
2066 * of bugs.
2067 */
2068 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2069 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2070 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2071 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2072 return;
Matt Carlsonce057f02007-11-12 21:08:03 -08002073
Matt Carlsonbcb37f62008-11-03 16:52:09 -08002074 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2075 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
Matt Carlsonce057f02007-11-12 21:08:03 -08002076 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2077 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2078 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2079 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2080 }
2081
Michael Chan15c3b692006-03-22 01:06:52 -08002082 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2083}
2084
Matt Carlson3f007892008-11-03 16:51:36 -08002085/* tp->lock is held. */
2086static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2087{
2088 u32 addr_high, addr_low;
2089 int i;
2090
2091 addr_high = ((tp->dev->dev_addr[0] << 8) |
2092 tp->dev->dev_addr[1]);
2093 addr_low = ((tp->dev->dev_addr[2] << 24) |
2094 (tp->dev->dev_addr[3] << 16) |
2095 (tp->dev->dev_addr[4] << 8) |
2096 (tp->dev->dev_addr[5] << 0));
2097 for (i = 0; i < 4; i++) {
2098 if (i == 1 && skip_mac_1)
2099 continue;
2100 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2101 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2102 }
2103
2104 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2105 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2106 for (i = 0; i < 12; i++) {
2107 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2108 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2109 }
2110 }
2111
2112 addr_high = (tp->dev->dev_addr[0] +
2113 tp->dev->dev_addr[1] +
2114 tp->dev->dev_addr[2] +
2115 tp->dev->dev_addr[3] +
2116 tp->dev->dev_addr[4] +
2117 tp->dev->dev_addr[5]) &
2118 TX_BACKOFF_SEED_MASK;
2119 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2120}
2121
Michael Chanbc1c7562006-03-20 17:48:03 -08002122static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123{
2124 u32 misc_host_ctrl;
Matt Carlson0a459aa2008-11-03 16:54:15 -08002125 bool device_should_wake, do_low_power;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126
2127 /* Make sure register accesses (indirect or otherwise)
2128 * will function correctly.
2129 */
2130 pci_write_config_dword(tp->pdev,
2131 TG3PCI_MISC_HOST_CTRL,
2132 tp->misc_host_ctrl);
2133
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 switch (state) {
Michael Chanbc1c7562006-03-20 17:48:03 -08002135 case PCI_D0:
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002136 pci_enable_wake(tp->pdev, state, false);
2137 pci_set_power_state(tp->pdev, PCI_D0);
Michael Chan8c6bda12005-04-21 17:09:08 -07002138
Michael Chan9d26e212006-12-07 00:21:14 -08002139 /* Switch out of Vaux if it is a NIC */
2140 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
Michael Chanb401e9e2005-12-19 16:27:04 -08002141 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142
2143 return 0;
2144
Michael Chanbc1c7562006-03-20 17:48:03 -08002145 case PCI_D1:
Michael Chanbc1c7562006-03-20 17:48:03 -08002146 case PCI_D2:
Michael Chanbc1c7562006-03-20 17:48:03 -08002147 case PCI_D3hot:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 break;
2149
2150 default:
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002151 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2152 tp->dev->name, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153 return -EINVAL;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002154 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2156 tw32(TG3PCI_MISC_HOST_CTRL,
2157 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2158
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002159 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2160 device_may_wakeup(&tp->pdev->dev) &&
2161 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2162
Matt Carlsondd477002008-05-25 23:45:58 -07002163 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
Matt Carlson0a459aa2008-11-03 16:54:15 -08002164 do_low_power = false;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002165 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2166 !tp->link_config.phy_is_low_power) {
2167 struct phy_device *phydev;
Matt Carlson0a459aa2008-11-03 16:54:15 -08002168 u32 phyid, advertising;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002169
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07002170 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002171
2172 tp->link_config.phy_is_low_power = 1;
2173
2174 tp->link_config.orig_speed = phydev->speed;
2175 tp->link_config.orig_duplex = phydev->duplex;
2176 tp->link_config.orig_autoneg = phydev->autoneg;
2177 tp->link_config.orig_advertising = phydev->advertising;
2178
2179 advertising = ADVERTISED_TP |
2180 ADVERTISED_Pause |
2181 ADVERTISED_Autoneg |
2182 ADVERTISED_10baseT_Half;
2183
2184 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002185 device_should_wake) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002186 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2187 advertising |=
2188 ADVERTISED_100baseT_Half |
2189 ADVERTISED_100baseT_Full |
2190 ADVERTISED_10baseT_Full;
2191 else
2192 advertising |= ADVERTISED_10baseT_Full;
2193 }
2194
2195 phydev->advertising = advertising;
2196
2197 phy_start_aneg(phydev);
Matt Carlson0a459aa2008-11-03 16:54:15 -08002198
2199 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2200 if (phyid != TG3_PHY_ID_BCMAC131) {
2201 phyid &= TG3_PHY_OUI_MASK;
2202 if (phyid == TG3_PHY_OUI_1 &&
2203 phyid == TG3_PHY_OUI_2 &&
2204 phyid == TG3_PHY_OUI_3)
2205 do_low_power = true;
2206 }
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002207 }
Matt Carlsondd477002008-05-25 23:45:58 -07002208 } else {
Matt Carlson0a459aa2008-11-03 16:54:15 -08002209 do_low_power = false;
2210
Matt Carlsondd477002008-05-25 23:45:58 -07002211 if (tp->link_config.phy_is_low_power == 0) {
2212 tp->link_config.phy_is_low_power = 1;
2213 tp->link_config.orig_speed = tp->link_config.speed;
2214 tp->link_config.orig_duplex = tp->link_config.duplex;
2215 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2216 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217
Matt Carlsondd477002008-05-25 23:45:58 -07002218 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2219 tp->link_config.speed = SPEED_10;
2220 tp->link_config.duplex = DUPLEX_HALF;
2221 tp->link_config.autoneg = AUTONEG_ENABLE;
2222 tg3_setup_phy(tp, 0);
2223 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 }
2225
Matt Carlson3f007892008-11-03 16:51:36 -08002226 __tg3_set_mac_addr(tp, 0);
2227
Michael Chanb5d37722006-09-27 16:06:21 -07002228 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2229 u32 val;
2230
2231 val = tr32(GRC_VCPU_EXT_CTRL);
2232 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2233 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chan6921d202005-12-13 21:15:53 -08002234 int i;
2235 u32 val;
2236
2237 for (i = 0; i < 200; i++) {
2238 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2239 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2240 break;
2241 msleep(1);
2242 }
2243 }
Gary Zambranoa85feb82007-05-05 11:52:19 -07002244 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2245 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2246 WOL_DRV_STATE_SHUTDOWN |
2247 WOL_DRV_WOL |
2248 WOL_SET_MAGIC_PKT);
Michael Chan6921d202005-12-13 21:15:53 -08002249
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002250 if (device_should_wake) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 u32 mac_mode;
2252
2253 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
Matt Carlson0a459aa2008-11-03 16:54:15 -08002254 if (do_low_power) {
Matt Carlsondd477002008-05-25 23:45:58 -07002255 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2256 udelay(40);
2257 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258
Michael Chan3f7045c2006-09-27 16:02:29 -07002259 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2260 mac_mode = MAC_MODE_PORT_MODE_GMII;
2261 else
2262 mac_mode = MAC_MODE_PORT_MODE_MII;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002264 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2265 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2266 ASIC_REV_5700) {
2267 u32 speed = (tp->tg3_flags &
2268 TG3_FLAG_WOL_SPEED_100MB) ?
2269 SPEED_100 : SPEED_10;
2270 if (tg3_5700_link_polarity(tp, speed))
2271 mac_mode |= MAC_MODE_LINK_POLARITY;
2272 else
2273 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2274 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275 } else {
2276 mac_mode = MAC_MODE_PORT_MODE_TBI;
2277 }
2278
John W. Linvillecbf46852005-04-21 17:01:29 -07002279 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 tw32(MAC_LED_CTRL, tp->led_ctrl);
2281
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002282 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2283 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2284 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2285 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2286 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2287 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288
Matt Carlson3bda1252008-08-15 14:08:22 -07002289 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2290 mac_mode |= tp->mac_mode &
2291 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2292 if (mac_mode & MAC_MODE_APE_TX_EN)
2293 mac_mode |= MAC_MODE_TDE_ENABLE;
2294 }
2295
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296 tw32_f(MAC_MODE, mac_mode);
2297 udelay(100);
2298
2299 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2300 udelay(10);
2301 }
2302
2303 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2304 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2305 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2306 u32 base_val;
2307
2308 base_val = tp->pci_clock_ctrl;
2309 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2310 CLOCK_CTRL_TXCLK_DISABLE);
2311
Michael Chanb401e9e2005-12-19 16:27:04 -08002312 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2313 CLOCK_CTRL_PWRDOWN_PLL133, 40);
Michael Chand7b0a852007-02-13 12:17:38 -08002314 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
Matt Carlson795d01c2007-10-07 23:28:17 -07002315 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
Michael Chand7b0a852007-02-13 12:17:38 -08002316 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
Michael Chan4cf78e42005-07-25 12:29:19 -07002317 /* do nothing */
Michael Chan85e94ce2005-04-21 17:05:28 -07002318 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2320 u32 newbits1, newbits2;
2321
2322 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2323 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2324 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2325 CLOCK_CTRL_TXCLK_DISABLE |
2326 CLOCK_CTRL_ALTCLK);
2327 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2328 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2329 newbits1 = CLOCK_CTRL_625_CORE;
2330 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2331 } else {
2332 newbits1 = CLOCK_CTRL_ALTCLK;
2333 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2334 }
2335
Michael Chanb401e9e2005-12-19 16:27:04 -08002336 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2337 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338
Michael Chanb401e9e2005-12-19 16:27:04 -08002339 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2340 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341
2342 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2343 u32 newbits3;
2344
2345 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2346 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2347 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2348 CLOCK_CTRL_TXCLK_DISABLE |
2349 CLOCK_CTRL_44MHZ_CORE);
2350 } else {
2351 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2352 }
2353
Michael Chanb401e9e2005-12-19 16:27:04 -08002354 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2355 tp->pci_clock_ctrl | newbits3, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 }
2357 }
2358
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002359 if (!(device_should_wake) &&
Matt Carlson0d3031d2007-10-10 18:02:43 -07002360 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
2361 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
Matt Carlson0a459aa2008-11-03 16:54:15 -08002362 tg3_power_down_phy(tp, do_low_power);
Michael Chan6921d202005-12-13 21:15:53 -08002363
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 tg3_frob_aux_power(tp);
2365
2366 /* Workaround for unstable PLL clock */
2367 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2368 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2369 u32 val = tr32(0x7d00);
2370
2371 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2372 tw32(0x7d00, val);
Michael Chan6921d202005-12-13 21:15:53 -08002373 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chanec41c7d2006-01-17 02:40:55 -08002374 int err;
2375
2376 err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 tg3_halt_cpu(tp, RX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -08002378 if (!err)
2379 tg3_nvram_unlock(tp);
Michael Chan6921d202005-12-13 21:15:53 -08002380 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381 }
2382
Michael Chanbbadf502006-04-06 21:46:34 -07002383 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2384
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002385 if (device_should_wake)
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002386 pci_enable_wake(tp->pdev, state, true);
2387
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388 /* Finally, set the new power state. */
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002389 pci_set_power_state(tp->pdev, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391 return 0;
2392}
2393
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2395{
2396 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2397 case MII_TG3_AUX_STAT_10HALF:
2398 *speed = SPEED_10;
2399 *duplex = DUPLEX_HALF;
2400 break;
2401
2402 case MII_TG3_AUX_STAT_10FULL:
2403 *speed = SPEED_10;
2404 *duplex = DUPLEX_FULL;
2405 break;
2406
2407 case MII_TG3_AUX_STAT_100HALF:
2408 *speed = SPEED_100;
2409 *duplex = DUPLEX_HALF;
2410 break;
2411
2412 case MII_TG3_AUX_STAT_100FULL:
2413 *speed = SPEED_100;
2414 *duplex = DUPLEX_FULL;
2415 break;
2416
2417 case MII_TG3_AUX_STAT_1000HALF:
2418 *speed = SPEED_1000;
2419 *duplex = DUPLEX_HALF;
2420 break;
2421
2422 case MII_TG3_AUX_STAT_1000FULL:
2423 *speed = SPEED_1000;
2424 *duplex = DUPLEX_FULL;
2425 break;
2426
2427 default:
Michael Chan715116a2006-09-27 16:09:25 -07002428 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2429 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2430 SPEED_10;
2431 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2432 DUPLEX_HALF;
2433 break;
2434 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435 *speed = SPEED_INVALID;
2436 *duplex = DUPLEX_INVALID;
2437 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002438 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439}
2440
2441static void tg3_phy_copper_begin(struct tg3 *tp)
2442{
2443 u32 new_adv;
2444 int i;
2445
2446 if (tp->link_config.phy_is_low_power) {
2447 /* Entering low power mode. Disable gigabit and
2448 * 100baseT advertisements.
2449 */
2450 tg3_writephy(tp, MII_TG3_CTRL, 0);
2451
2452 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2453 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2454 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2455 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2456
2457 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2458 } else if (tp->link_config.speed == SPEED_INVALID) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2460 tp->link_config.advertising &=
2461 ~(ADVERTISED_1000baseT_Half |
2462 ADVERTISED_1000baseT_Full);
2463
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002464 new_adv = ADVERTISE_CSMA;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2466 new_adv |= ADVERTISE_10HALF;
2467 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2468 new_adv |= ADVERTISE_10FULL;
2469 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2470 new_adv |= ADVERTISE_100HALF;
2471 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2472 new_adv |= ADVERTISE_100FULL;
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002473
2474 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2475
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2477
2478 if (tp->link_config.advertising &
2479 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2480 new_adv = 0;
2481 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2482 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2483 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2484 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2485 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2486 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2487 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2488 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2489 MII_TG3_CTRL_ENABLE_AS_MASTER);
2490 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2491 } else {
2492 tg3_writephy(tp, MII_TG3_CTRL, 0);
2493 }
2494 } else {
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002495 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2496 new_adv |= ADVERTISE_CSMA;
2497
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 /* Asking for a specific link mode. */
2499 if (tp->link_config.speed == SPEED_1000) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2501
2502 if (tp->link_config.duplex == DUPLEX_FULL)
2503 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2504 else
2505 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2506 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2507 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2508 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2509 MII_TG3_CTRL_ENABLE_AS_MASTER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 if (tp->link_config.speed == SPEED_100) {
2512 if (tp->link_config.duplex == DUPLEX_FULL)
2513 new_adv |= ADVERTISE_100FULL;
2514 else
2515 new_adv |= ADVERTISE_100HALF;
2516 } else {
2517 if (tp->link_config.duplex == DUPLEX_FULL)
2518 new_adv |= ADVERTISE_10FULL;
2519 else
2520 new_adv |= ADVERTISE_10HALF;
2521 }
2522 tg3_writephy(tp, MII_ADVERTISE, new_adv);
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002523
2524 new_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525 }
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002526
2527 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 }
2529
2530 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2531 tp->link_config.speed != SPEED_INVALID) {
2532 u32 bmcr, orig_bmcr;
2533
2534 tp->link_config.active_speed = tp->link_config.speed;
2535 tp->link_config.active_duplex = tp->link_config.duplex;
2536
2537 bmcr = 0;
2538 switch (tp->link_config.speed) {
2539 default:
2540 case SPEED_10:
2541 break;
2542
2543 case SPEED_100:
2544 bmcr |= BMCR_SPEED100;
2545 break;
2546
2547 case SPEED_1000:
2548 bmcr |= TG3_BMCR_SPEED1000;
2549 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002550 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551
2552 if (tp->link_config.duplex == DUPLEX_FULL)
2553 bmcr |= BMCR_FULLDPLX;
2554
2555 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2556 (bmcr != orig_bmcr)) {
2557 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2558 for (i = 0; i < 1500; i++) {
2559 u32 tmp;
2560
2561 udelay(10);
2562 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2563 tg3_readphy(tp, MII_BMSR, &tmp))
2564 continue;
2565 if (!(tmp & BMSR_LSTATUS)) {
2566 udelay(40);
2567 break;
2568 }
2569 }
2570 tg3_writephy(tp, MII_BMCR, bmcr);
2571 udelay(40);
2572 }
2573 } else {
2574 tg3_writephy(tp, MII_BMCR,
2575 BMCR_ANENABLE | BMCR_ANRESTART);
2576 }
2577}
2578
2579static int tg3_init_5401phy_dsp(struct tg3 *tp)
2580{
2581 int err;
2582
2583 /* Turn off tap power management. */
2584 /* Set Extended packet length bit */
2585 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2586
2587 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2588 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2589
2590 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2591 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2592
2593 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2594 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2595
2596 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2597 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2598
2599 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2600 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2601
2602 udelay(40);
2603
2604 return err;
2605}
2606
Michael Chan3600d912006-12-07 00:21:48 -08002607static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608{
Michael Chan3600d912006-12-07 00:21:48 -08002609 u32 adv_reg, all_mask = 0;
2610
2611 if (mask & ADVERTISED_10baseT_Half)
2612 all_mask |= ADVERTISE_10HALF;
2613 if (mask & ADVERTISED_10baseT_Full)
2614 all_mask |= ADVERTISE_10FULL;
2615 if (mask & ADVERTISED_100baseT_Half)
2616 all_mask |= ADVERTISE_100HALF;
2617 if (mask & ADVERTISED_100baseT_Full)
2618 all_mask |= ADVERTISE_100FULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619
2620 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2621 return 0;
2622
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623 if ((adv_reg & all_mask) != all_mask)
2624 return 0;
2625 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2626 u32 tg3_ctrl;
2627
Michael Chan3600d912006-12-07 00:21:48 -08002628 all_mask = 0;
2629 if (mask & ADVERTISED_1000baseT_Half)
2630 all_mask |= ADVERTISE_1000HALF;
2631 if (mask & ADVERTISED_1000baseT_Full)
2632 all_mask |= ADVERTISE_1000FULL;
2633
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2635 return 0;
2636
Linus Torvalds1da177e2005-04-16 15:20:36 -07002637 if ((tg3_ctrl & all_mask) != all_mask)
2638 return 0;
2639 }
2640 return 1;
2641}
2642
Matt Carlsonef167e22007-12-20 20:10:01 -08002643static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2644{
2645 u32 curadv, reqadv;
2646
2647 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2648 return 1;
2649
2650 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2651 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2652
2653 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2654 if (curadv != reqadv)
2655 return 0;
2656
2657 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2658 tg3_readphy(tp, MII_LPA, rmtadv);
2659 } else {
2660 /* Reprogram the advertisement register, even if it
2661 * does not affect the current link. If the link
2662 * gets renegotiated in the future, we can save an
2663 * additional renegotiation cycle by advertising
2664 * it correctly in the first place.
2665 */
2666 if (curadv != reqadv) {
2667 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2668 ADVERTISE_PAUSE_ASYM);
2669 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2670 }
2671 }
2672
2673 return 1;
2674}
2675
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2677{
2678 int current_link_up;
2679 u32 bmsr, dummy;
Matt Carlsonef167e22007-12-20 20:10:01 -08002680 u32 lcl_adv, rmt_adv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681 u16 current_speed;
2682 u8 current_duplex;
2683 int i, err;
2684
2685 tw32(MAC_EVENT, 0);
2686
2687 tw32_f(MAC_STATUS,
2688 (MAC_STATUS_SYNC_CHANGED |
2689 MAC_STATUS_CFG_CHANGED |
2690 MAC_STATUS_MI_COMPLETION |
2691 MAC_STATUS_LNKSTATE_CHANGED));
2692 udelay(40);
2693
Matt Carlson8ef21422008-05-02 16:47:53 -07002694 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2695 tw32_f(MAC_MI_MODE,
2696 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2697 udelay(80);
2698 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699
2700 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2701
2702 /* Some third-party PHYs need to be reset on link going
2703 * down.
2704 */
2705 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2706 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2707 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2708 netif_carrier_ok(tp->dev)) {
2709 tg3_readphy(tp, MII_BMSR, &bmsr);
2710 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2711 !(bmsr & BMSR_LSTATUS))
2712 force_reset = 1;
2713 }
2714 if (force_reset)
2715 tg3_phy_reset(tp);
2716
2717 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2718 tg3_readphy(tp, MII_BMSR, &bmsr);
2719 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2720 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2721 bmsr = 0;
2722
2723 if (!(bmsr & BMSR_LSTATUS)) {
2724 err = tg3_init_5401phy_dsp(tp);
2725 if (err)
2726 return err;
2727
2728 tg3_readphy(tp, MII_BMSR, &bmsr);
2729 for (i = 0; i < 1000; i++) {
2730 udelay(10);
2731 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2732 (bmsr & BMSR_LSTATUS)) {
2733 udelay(40);
2734 break;
2735 }
2736 }
2737
2738 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2739 !(bmsr & BMSR_LSTATUS) &&
2740 tp->link_config.active_speed == SPEED_1000) {
2741 err = tg3_phy_reset(tp);
2742 if (!err)
2743 err = tg3_init_5401phy_dsp(tp);
2744 if (err)
2745 return err;
2746 }
2747 }
2748 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2749 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2750 /* 5701 {A0,B0} CRC bug workaround */
2751 tg3_writephy(tp, 0x15, 0x0a75);
2752 tg3_writephy(tp, 0x1c, 0x8c68);
2753 tg3_writephy(tp, 0x1c, 0x8d68);
2754 tg3_writephy(tp, 0x1c, 0x8c68);
2755 }
2756
2757 /* Clear pending interrupts... */
2758 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2759 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2760
2761 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2762 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
Michael Chan715116a2006-09-27 16:09:25 -07002763 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2765
2766 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2767 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2768 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2769 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2770 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2771 else
2772 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2773 }
2774
2775 current_link_up = 0;
2776 current_speed = SPEED_INVALID;
2777 current_duplex = DUPLEX_INVALID;
2778
2779 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2780 u32 val;
2781
2782 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2783 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2784 if (!(val & (1 << 10))) {
2785 val |= (1 << 10);
2786 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2787 goto relink;
2788 }
2789 }
2790
2791 bmsr = 0;
2792 for (i = 0; i < 100; i++) {
2793 tg3_readphy(tp, MII_BMSR, &bmsr);
2794 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2795 (bmsr & BMSR_LSTATUS))
2796 break;
2797 udelay(40);
2798 }
2799
2800 if (bmsr & BMSR_LSTATUS) {
2801 u32 aux_stat, bmcr;
2802
2803 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2804 for (i = 0; i < 2000; i++) {
2805 udelay(10);
2806 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2807 aux_stat)
2808 break;
2809 }
2810
2811 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2812 &current_speed,
2813 &current_duplex);
2814
2815 bmcr = 0;
2816 for (i = 0; i < 200; i++) {
2817 tg3_readphy(tp, MII_BMCR, &bmcr);
2818 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2819 continue;
2820 if (bmcr && bmcr != 0x7fff)
2821 break;
2822 udelay(10);
2823 }
2824
Matt Carlsonef167e22007-12-20 20:10:01 -08002825 lcl_adv = 0;
2826 rmt_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827
Matt Carlsonef167e22007-12-20 20:10:01 -08002828 tp->link_config.active_speed = current_speed;
2829 tp->link_config.active_duplex = current_duplex;
2830
2831 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2832 if ((bmcr & BMCR_ANENABLE) &&
2833 tg3_copper_is_advertising_all(tp,
2834 tp->link_config.advertising)) {
2835 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2836 &rmt_adv))
2837 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838 }
2839 } else {
2840 if (!(bmcr & BMCR_ANENABLE) &&
2841 tp->link_config.speed == current_speed &&
Matt Carlsonef167e22007-12-20 20:10:01 -08002842 tp->link_config.duplex == current_duplex &&
2843 tp->link_config.flowctrl ==
2844 tp->link_config.active_flowctrl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846 }
2847 }
2848
Matt Carlsonef167e22007-12-20 20:10:01 -08002849 if (current_link_up == 1 &&
2850 tp->link_config.active_duplex == DUPLEX_FULL)
2851 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852 }
2853
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854relink:
Michael Chan6921d202005-12-13 21:15:53 -08002855 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856 u32 tmp;
2857
2858 tg3_phy_copper_begin(tp);
2859
2860 tg3_readphy(tp, MII_BMSR, &tmp);
2861 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2862 (tmp & BMSR_LSTATUS))
2863 current_link_up = 1;
2864 }
2865
2866 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2867 if (current_link_up == 1) {
2868 if (tp->link_config.active_speed == SPEED_100 ||
2869 tp->link_config.active_speed == SPEED_10)
2870 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2871 else
2872 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2873 } else
2874 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2875
2876 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2877 if (tp->link_config.active_duplex == DUPLEX_HALF)
2878 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2879
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002881 if (current_link_up == 1 &&
2882 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002884 else
2885 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886 }
2887
2888 /* ??? Without this setting Netgear GA302T PHY does not
2889 * ??? send/receive packets...
2890 */
2891 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2892 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2893 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2894 tw32_f(MAC_MI_MODE, tp->mi_mode);
2895 udelay(80);
2896 }
2897
2898 tw32_f(MAC_MODE, tp->mac_mode);
2899 udelay(40);
2900
2901 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2902 /* Polled via timer. */
2903 tw32_f(MAC_EVENT, 0);
2904 } else {
2905 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2906 }
2907 udelay(40);
2908
2909 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2910 current_link_up == 1 &&
2911 tp->link_config.active_speed == SPEED_1000 &&
2912 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2913 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2914 udelay(120);
2915 tw32_f(MAC_STATUS,
2916 (MAC_STATUS_SYNC_CHANGED |
2917 MAC_STATUS_CFG_CHANGED));
2918 udelay(40);
2919 tg3_write_mem(tp,
2920 NIC_SRAM_FIRMWARE_MBOX,
2921 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2922 }
2923
2924 if (current_link_up != netif_carrier_ok(tp->dev)) {
2925 if (current_link_up)
2926 netif_carrier_on(tp->dev);
2927 else
2928 netif_carrier_off(tp->dev);
2929 tg3_link_report(tp);
2930 }
2931
2932 return 0;
2933}
2934
2935struct tg3_fiber_aneginfo {
2936 int state;
2937#define ANEG_STATE_UNKNOWN 0
2938#define ANEG_STATE_AN_ENABLE 1
2939#define ANEG_STATE_RESTART_INIT 2
2940#define ANEG_STATE_RESTART 3
2941#define ANEG_STATE_DISABLE_LINK_OK 4
2942#define ANEG_STATE_ABILITY_DETECT_INIT 5
2943#define ANEG_STATE_ABILITY_DETECT 6
2944#define ANEG_STATE_ACK_DETECT_INIT 7
2945#define ANEG_STATE_ACK_DETECT 8
2946#define ANEG_STATE_COMPLETE_ACK_INIT 9
2947#define ANEG_STATE_COMPLETE_ACK 10
2948#define ANEG_STATE_IDLE_DETECT_INIT 11
2949#define ANEG_STATE_IDLE_DETECT 12
2950#define ANEG_STATE_LINK_OK 13
2951#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2952#define ANEG_STATE_NEXT_PAGE_WAIT 15
2953
2954 u32 flags;
2955#define MR_AN_ENABLE 0x00000001
2956#define MR_RESTART_AN 0x00000002
2957#define MR_AN_COMPLETE 0x00000004
2958#define MR_PAGE_RX 0x00000008
2959#define MR_NP_LOADED 0x00000010
2960#define MR_TOGGLE_TX 0x00000020
2961#define MR_LP_ADV_FULL_DUPLEX 0x00000040
2962#define MR_LP_ADV_HALF_DUPLEX 0x00000080
2963#define MR_LP_ADV_SYM_PAUSE 0x00000100
2964#define MR_LP_ADV_ASYM_PAUSE 0x00000200
2965#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2966#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2967#define MR_LP_ADV_NEXT_PAGE 0x00001000
2968#define MR_TOGGLE_RX 0x00002000
2969#define MR_NP_RX 0x00004000
2970
2971#define MR_LINK_OK 0x80000000
2972
2973 unsigned long link_time, cur_time;
2974
2975 u32 ability_match_cfg;
2976 int ability_match_count;
2977
2978 char ability_match, idle_match, ack_match;
2979
2980 u32 txconfig, rxconfig;
2981#define ANEG_CFG_NP 0x00000080
2982#define ANEG_CFG_ACK 0x00000040
2983#define ANEG_CFG_RF2 0x00000020
2984#define ANEG_CFG_RF1 0x00000010
2985#define ANEG_CFG_PS2 0x00000001
2986#define ANEG_CFG_PS1 0x00008000
2987#define ANEG_CFG_HD 0x00004000
2988#define ANEG_CFG_FD 0x00002000
2989#define ANEG_CFG_INVAL 0x00001f06
2990
2991};
2992#define ANEG_OK 0
2993#define ANEG_DONE 1
2994#define ANEG_TIMER_ENAB 2
2995#define ANEG_FAILED -1
2996
2997#define ANEG_STATE_SETTLE_TIME 10000
2998
2999static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3000 struct tg3_fiber_aneginfo *ap)
3001{
Matt Carlson5be73b42007-12-20 20:09:29 -08003002 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003003 unsigned long delta;
3004 u32 rx_cfg_reg;
3005 int ret;
3006
3007 if (ap->state == ANEG_STATE_UNKNOWN) {
3008 ap->rxconfig = 0;
3009 ap->link_time = 0;
3010 ap->cur_time = 0;
3011 ap->ability_match_cfg = 0;
3012 ap->ability_match_count = 0;
3013 ap->ability_match = 0;
3014 ap->idle_match = 0;
3015 ap->ack_match = 0;
3016 }
3017 ap->cur_time++;
3018
3019 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3020 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3021
3022 if (rx_cfg_reg != ap->ability_match_cfg) {
3023 ap->ability_match_cfg = rx_cfg_reg;
3024 ap->ability_match = 0;
3025 ap->ability_match_count = 0;
3026 } else {
3027 if (++ap->ability_match_count > 1) {
3028 ap->ability_match = 1;
3029 ap->ability_match_cfg = rx_cfg_reg;
3030 }
3031 }
3032 if (rx_cfg_reg & ANEG_CFG_ACK)
3033 ap->ack_match = 1;
3034 else
3035 ap->ack_match = 0;
3036
3037 ap->idle_match = 0;
3038 } else {
3039 ap->idle_match = 1;
3040 ap->ability_match_cfg = 0;
3041 ap->ability_match_count = 0;
3042 ap->ability_match = 0;
3043 ap->ack_match = 0;
3044
3045 rx_cfg_reg = 0;
3046 }
3047
3048 ap->rxconfig = rx_cfg_reg;
3049 ret = ANEG_OK;
3050
3051 switch(ap->state) {
3052 case ANEG_STATE_UNKNOWN:
3053 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3054 ap->state = ANEG_STATE_AN_ENABLE;
3055
3056 /* fallthru */
3057 case ANEG_STATE_AN_ENABLE:
3058 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3059 if (ap->flags & MR_AN_ENABLE) {
3060 ap->link_time = 0;
3061 ap->cur_time = 0;
3062 ap->ability_match_cfg = 0;
3063 ap->ability_match_count = 0;
3064 ap->ability_match = 0;
3065 ap->idle_match = 0;
3066 ap->ack_match = 0;
3067
3068 ap->state = ANEG_STATE_RESTART_INIT;
3069 } else {
3070 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3071 }
3072 break;
3073
3074 case ANEG_STATE_RESTART_INIT:
3075 ap->link_time = ap->cur_time;
3076 ap->flags &= ~(MR_NP_LOADED);
3077 ap->txconfig = 0;
3078 tw32(MAC_TX_AUTO_NEG, 0);
3079 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3080 tw32_f(MAC_MODE, tp->mac_mode);
3081 udelay(40);
3082
3083 ret = ANEG_TIMER_ENAB;
3084 ap->state = ANEG_STATE_RESTART;
3085
3086 /* fallthru */
3087 case ANEG_STATE_RESTART:
3088 delta = ap->cur_time - ap->link_time;
3089 if (delta > ANEG_STATE_SETTLE_TIME) {
3090 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3091 } else {
3092 ret = ANEG_TIMER_ENAB;
3093 }
3094 break;
3095
3096 case ANEG_STATE_DISABLE_LINK_OK:
3097 ret = ANEG_DONE;
3098 break;
3099
3100 case ANEG_STATE_ABILITY_DETECT_INIT:
3101 ap->flags &= ~(MR_TOGGLE_TX);
Matt Carlson5be73b42007-12-20 20:09:29 -08003102 ap->txconfig = ANEG_CFG_FD;
3103 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3104 if (flowctrl & ADVERTISE_1000XPAUSE)
3105 ap->txconfig |= ANEG_CFG_PS1;
3106 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3107 ap->txconfig |= ANEG_CFG_PS2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003108 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3109 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3110 tw32_f(MAC_MODE, tp->mac_mode);
3111 udelay(40);
3112
3113 ap->state = ANEG_STATE_ABILITY_DETECT;
3114 break;
3115
3116 case ANEG_STATE_ABILITY_DETECT:
3117 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3118 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3119 }
3120 break;
3121
3122 case ANEG_STATE_ACK_DETECT_INIT:
3123 ap->txconfig |= ANEG_CFG_ACK;
3124 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3125 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3126 tw32_f(MAC_MODE, tp->mac_mode);
3127 udelay(40);
3128
3129 ap->state = ANEG_STATE_ACK_DETECT;
3130
3131 /* fallthru */
3132 case ANEG_STATE_ACK_DETECT:
3133 if (ap->ack_match != 0) {
3134 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3135 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3136 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3137 } else {
3138 ap->state = ANEG_STATE_AN_ENABLE;
3139 }
3140 } else if (ap->ability_match != 0 &&
3141 ap->rxconfig == 0) {
3142 ap->state = ANEG_STATE_AN_ENABLE;
3143 }
3144 break;
3145
3146 case ANEG_STATE_COMPLETE_ACK_INIT:
3147 if (ap->rxconfig & ANEG_CFG_INVAL) {
3148 ret = ANEG_FAILED;
3149 break;
3150 }
3151 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3152 MR_LP_ADV_HALF_DUPLEX |
3153 MR_LP_ADV_SYM_PAUSE |
3154 MR_LP_ADV_ASYM_PAUSE |
3155 MR_LP_ADV_REMOTE_FAULT1 |
3156 MR_LP_ADV_REMOTE_FAULT2 |
3157 MR_LP_ADV_NEXT_PAGE |
3158 MR_TOGGLE_RX |
3159 MR_NP_RX);
3160 if (ap->rxconfig & ANEG_CFG_FD)
3161 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3162 if (ap->rxconfig & ANEG_CFG_HD)
3163 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3164 if (ap->rxconfig & ANEG_CFG_PS1)
3165 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3166 if (ap->rxconfig & ANEG_CFG_PS2)
3167 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3168 if (ap->rxconfig & ANEG_CFG_RF1)
3169 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3170 if (ap->rxconfig & ANEG_CFG_RF2)
3171 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3172 if (ap->rxconfig & ANEG_CFG_NP)
3173 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3174
3175 ap->link_time = ap->cur_time;
3176
3177 ap->flags ^= (MR_TOGGLE_TX);
3178 if (ap->rxconfig & 0x0008)
3179 ap->flags |= MR_TOGGLE_RX;
3180 if (ap->rxconfig & ANEG_CFG_NP)
3181 ap->flags |= MR_NP_RX;
3182 ap->flags |= MR_PAGE_RX;
3183
3184 ap->state = ANEG_STATE_COMPLETE_ACK;
3185 ret = ANEG_TIMER_ENAB;
3186 break;
3187
3188 case ANEG_STATE_COMPLETE_ACK:
3189 if (ap->ability_match != 0 &&
3190 ap->rxconfig == 0) {
3191 ap->state = ANEG_STATE_AN_ENABLE;
3192 break;
3193 }
3194 delta = ap->cur_time - ap->link_time;
3195 if (delta > ANEG_STATE_SETTLE_TIME) {
3196 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3197 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3198 } else {
3199 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3200 !(ap->flags & MR_NP_RX)) {
3201 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3202 } else {
3203 ret = ANEG_FAILED;
3204 }
3205 }
3206 }
3207 break;
3208
3209 case ANEG_STATE_IDLE_DETECT_INIT:
3210 ap->link_time = ap->cur_time;
3211 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3212 tw32_f(MAC_MODE, tp->mac_mode);
3213 udelay(40);
3214
3215 ap->state = ANEG_STATE_IDLE_DETECT;
3216 ret = ANEG_TIMER_ENAB;
3217 break;
3218
3219 case ANEG_STATE_IDLE_DETECT:
3220 if (ap->ability_match != 0 &&
3221 ap->rxconfig == 0) {
3222 ap->state = ANEG_STATE_AN_ENABLE;
3223 break;
3224 }
3225 delta = ap->cur_time - ap->link_time;
3226 if (delta > ANEG_STATE_SETTLE_TIME) {
3227 /* XXX another gem from the Broadcom driver :( */
3228 ap->state = ANEG_STATE_LINK_OK;
3229 }
3230 break;
3231
3232 case ANEG_STATE_LINK_OK:
3233 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3234 ret = ANEG_DONE;
3235 break;
3236
3237 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3238 /* ??? unimplemented */
3239 break;
3240
3241 case ANEG_STATE_NEXT_PAGE_WAIT:
3242 /* ??? unimplemented */
3243 break;
3244
3245 default:
3246 ret = ANEG_FAILED;
3247 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07003248 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003249
3250 return ret;
3251}
3252
Matt Carlson5be73b42007-12-20 20:09:29 -08003253static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003254{
3255 int res = 0;
3256 struct tg3_fiber_aneginfo aninfo;
3257 int status = ANEG_FAILED;
3258 unsigned int tick;
3259 u32 tmp;
3260
3261 tw32_f(MAC_TX_AUTO_NEG, 0);
3262
3263 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3264 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3265 udelay(40);
3266
3267 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3268 udelay(40);
3269
3270 memset(&aninfo, 0, sizeof(aninfo));
3271 aninfo.flags |= MR_AN_ENABLE;
3272 aninfo.state = ANEG_STATE_UNKNOWN;
3273 aninfo.cur_time = 0;
3274 tick = 0;
3275 while (++tick < 195000) {
3276 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3277 if (status == ANEG_DONE || status == ANEG_FAILED)
3278 break;
3279
3280 udelay(1);
3281 }
3282
3283 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3284 tw32_f(MAC_MODE, tp->mac_mode);
3285 udelay(40);
3286
Matt Carlson5be73b42007-12-20 20:09:29 -08003287 *txflags = aninfo.txconfig;
3288 *rxflags = aninfo.flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289
3290 if (status == ANEG_DONE &&
3291 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3292 MR_LP_ADV_FULL_DUPLEX)))
3293 res = 1;
3294
3295 return res;
3296}
3297
3298static void tg3_init_bcm8002(struct tg3 *tp)
3299{
3300 u32 mac_status = tr32(MAC_STATUS);
3301 int i;
3302
3303 /* Reset when initting first time or we have a link. */
3304 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3305 !(mac_status & MAC_STATUS_PCS_SYNCED))
3306 return;
3307
3308 /* Set PLL lock range. */
3309 tg3_writephy(tp, 0x16, 0x8007);
3310
3311 /* SW reset */
3312 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3313
3314 /* Wait for reset to complete. */
3315 /* XXX schedule_timeout() ... */
3316 for (i = 0; i < 500; i++)
3317 udelay(10);
3318
3319 /* Config mode; select PMA/Ch 1 regs. */
3320 tg3_writephy(tp, 0x10, 0x8411);
3321
3322 /* Enable auto-lock and comdet, select txclk for tx. */
3323 tg3_writephy(tp, 0x11, 0x0a10);
3324
3325 tg3_writephy(tp, 0x18, 0x00a0);
3326 tg3_writephy(tp, 0x16, 0x41ff);
3327
3328 /* Assert and deassert POR. */
3329 tg3_writephy(tp, 0x13, 0x0400);
3330 udelay(40);
3331 tg3_writephy(tp, 0x13, 0x0000);
3332
3333 tg3_writephy(tp, 0x11, 0x0a50);
3334 udelay(40);
3335 tg3_writephy(tp, 0x11, 0x0a10);
3336
3337 /* Wait for signal to stabilize */
3338 /* XXX schedule_timeout() ... */
3339 for (i = 0; i < 15000; i++)
3340 udelay(10);
3341
3342 /* Deselect the channel register so we can read the PHYID
3343 * later.
3344 */
3345 tg3_writephy(tp, 0x10, 0x8011);
3346}
3347
3348static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3349{
Matt Carlson82cd3d12007-12-20 20:09:00 -08003350 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351 u32 sg_dig_ctrl, sg_dig_status;
3352 u32 serdes_cfg, expected_sg_dig_ctrl;
3353 int workaround, port_a;
3354 int current_link_up;
3355
3356 serdes_cfg = 0;
3357 expected_sg_dig_ctrl = 0;
3358 workaround = 0;
3359 port_a = 1;
3360 current_link_up = 0;
3361
3362 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3363 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3364 workaround = 1;
3365 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3366 port_a = 0;
3367
3368 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3369 /* preserve bits 20-23 for voltage regulator */
3370 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3371 }
3372
3373 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3374
3375 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003376 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003377 if (workaround) {
3378 u32 val = serdes_cfg;
3379
3380 if (port_a)
3381 val |= 0xc010000;
3382 else
3383 val |= 0x4010000;
3384 tw32_f(MAC_SERDES_CFG, val);
3385 }
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003386
3387 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003388 }
3389 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3390 tg3_setup_flow_control(tp, 0, 0);
3391 current_link_up = 1;
3392 }
3393 goto out;
3394 }
3395
3396 /* Want auto-negotiation. */
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003397 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398
Matt Carlson82cd3d12007-12-20 20:09:00 -08003399 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3400 if (flowctrl & ADVERTISE_1000XPAUSE)
3401 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3402 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3403 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003404
3405 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003406 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3407 tp->serdes_counter &&
3408 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3409 MAC_STATUS_RCVD_CFG)) ==
3410 MAC_STATUS_PCS_SYNCED)) {
3411 tp->serdes_counter--;
3412 current_link_up = 1;
3413 goto out;
3414 }
3415restart_autoneg:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003416 if (workaround)
3417 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003418 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419 udelay(5);
3420 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3421
Michael Chan3d3ebe72006-09-27 15:59:15 -07003422 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3423 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3425 MAC_STATUS_SIGNAL_DET)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003426 sg_dig_status = tr32(SG_DIG_STATUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427 mac_status = tr32(MAC_STATUS);
3428
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003429 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003430 (mac_status & MAC_STATUS_PCS_SYNCED)) {
Matt Carlson82cd3d12007-12-20 20:09:00 -08003431 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003432
Matt Carlson82cd3d12007-12-20 20:09:00 -08003433 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3434 local_adv |= ADVERTISE_1000XPAUSE;
3435 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3436 local_adv |= ADVERTISE_1000XPSE_ASYM;
3437
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003438 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08003439 remote_adv |= LPA_1000XPAUSE;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003440 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08003441 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442
3443 tg3_setup_flow_control(tp, local_adv, remote_adv);
3444 current_link_up = 1;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003445 tp->serdes_counter = 0;
3446 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003447 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003448 if (tp->serdes_counter)
3449 tp->serdes_counter--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450 else {
3451 if (workaround) {
3452 u32 val = serdes_cfg;
3453
3454 if (port_a)
3455 val |= 0xc010000;
3456 else
3457 val |= 0x4010000;
3458
3459 tw32_f(MAC_SERDES_CFG, val);
3460 }
3461
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003462 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463 udelay(40);
3464
3465 /* Link parallel detection - link is up */
3466 /* only if we have PCS_SYNC and not */
3467 /* receiving config code words */
3468 mac_status = tr32(MAC_STATUS);
3469 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3470 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3471 tg3_setup_flow_control(tp, 0, 0);
3472 current_link_up = 1;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003473 tp->tg3_flags2 |=
3474 TG3_FLG2_PARALLEL_DETECT;
3475 tp->serdes_counter =
3476 SERDES_PARALLEL_DET_TIMEOUT;
3477 } else
3478 goto restart_autoneg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003479 }
3480 }
Michael Chan3d3ebe72006-09-27 15:59:15 -07003481 } else {
3482 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3483 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484 }
3485
3486out:
3487 return current_link_up;
3488}
3489
3490static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3491{
3492 int current_link_up = 0;
3493
Michael Chan5cf64b82007-05-05 12:11:21 -07003494 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496
3497 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
Matt Carlson5be73b42007-12-20 20:09:29 -08003498 u32 txflags, rxflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003499 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003500
Matt Carlson5be73b42007-12-20 20:09:29 -08003501 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3502 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003503
Matt Carlson5be73b42007-12-20 20:09:29 -08003504 if (txflags & ANEG_CFG_PS1)
3505 local_adv |= ADVERTISE_1000XPAUSE;
3506 if (txflags & ANEG_CFG_PS2)
3507 local_adv |= ADVERTISE_1000XPSE_ASYM;
3508
3509 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3510 remote_adv |= LPA_1000XPAUSE;
3511 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3512 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003513
3514 tg3_setup_flow_control(tp, local_adv, remote_adv);
3515
Linus Torvalds1da177e2005-04-16 15:20:36 -07003516 current_link_up = 1;
3517 }
3518 for (i = 0; i < 30; i++) {
3519 udelay(20);
3520 tw32_f(MAC_STATUS,
3521 (MAC_STATUS_SYNC_CHANGED |
3522 MAC_STATUS_CFG_CHANGED));
3523 udelay(40);
3524 if ((tr32(MAC_STATUS) &
3525 (MAC_STATUS_SYNC_CHANGED |
3526 MAC_STATUS_CFG_CHANGED)) == 0)
3527 break;
3528 }
3529
3530 mac_status = tr32(MAC_STATUS);
3531 if (current_link_up == 0 &&
3532 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3533 !(mac_status & MAC_STATUS_RCVD_CFG))
3534 current_link_up = 1;
3535 } else {
Matt Carlson5be73b42007-12-20 20:09:29 -08003536 tg3_setup_flow_control(tp, 0, 0);
3537
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538 /* Forcing 1000FD link up. */
3539 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540
3541 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3542 udelay(40);
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07003543
3544 tw32_f(MAC_MODE, tp->mac_mode);
3545 udelay(40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546 }
3547
3548out:
3549 return current_link_up;
3550}
3551
3552static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3553{
3554 u32 orig_pause_cfg;
3555 u16 orig_active_speed;
3556 u8 orig_active_duplex;
3557 u32 mac_status;
3558 int current_link_up;
3559 int i;
3560
Matt Carlson8d018622007-12-20 20:05:44 -08003561 orig_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003562 orig_active_speed = tp->link_config.active_speed;
3563 orig_active_duplex = tp->link_config.active_duplex;
3564
3565 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3566 netif_carrier_ok(tp->dev) &&
3567 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3568 mac_status = tr32(MAC_STATUS);
3569 mac_status &= (MAC_STATUS_PCS_SYNCED |
3570 MAC_STATUS_SIGNAL_DET |
3571 MAC_STATUS_CFG_CHANGED |
3572 MAC_STATUS_RCVD_CFG);
3573 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3574 MAC_STATUS_SIGNAL_DET)) {
3575 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3576 MAC_STATUS_CFG_CHANGED));
3577 return 0;
3578 }
3579 }
3580
3581 tw32_f(MAC_TX_AUTO_NEG, 0);
3582
3583 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3584 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3585 tw32_f(MAC_MODE, tp->mac_mode);
3586 udelay(40);
3587
3588 if (tp->phy_id == PHY_ID_BCM8002)
3589 tg3_init_bcm8002(tp);
3590
3591 /* Enable link change event even when serdes polling. */
3592 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3593 udelay(40);
3594
3595 current_link_up = 0;
3596 mac_status = tr32(MAC_STATUS);
3597
3598 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3599 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3600 else
3601 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3602
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603 tp->hw_status->status =
3604 (SD_STATUS_UPDATED |
3605 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3606
3607 for (i = 0; i < 100; i++) {
3608 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3609 MAC_STATUS_CFG_CHANGED));
3610 udelay(5);
3611 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
Michael Chan3d3ebe72006-09-27 15:59:15 -07003612 MAC_STATUS_CFG_CHANGED |
3613 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003614 break;
3615 }
3616
3617 mac_status = tr32(MAC_STATUS);
3618 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3619 current_link_up = 0;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003620 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3621 tp->serdes_counter == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622 tw32_f(MAC_MODE, (tp->mac_mode |
3623 MAC_MODE_SEND_CONFIGS));
3624 udelay(1);
3625 tw32_f(MAC_MODE, tp->mac_mode);
3626 }
3627 }
3628
3629 if (current_link_up == 1) {
3630 tp->link_config.active_speed = SPEED_1000;
3631 tp->link_config.active_duplex = DUPLEX_FULL;
3632 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3633 LED_CTRL_LNKLED_OVERRIDE |
3634 LED_CTRL_1000MBPS_ON));
3635 } else {
3636 tp->link_config.active_speed = SPEED_INVALID;
3637 tp->link_config.active_duplex = DUPLEX_INVALID;
3638 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3639 LED_CTRL_LNKLED_OVERRIDE |
3640 LED_CTRL_TRAFFIC_OVERRIDE));
3641 }
3642
3643 if (current_link_up != netif_carrier_ok(tp->dev)) {
3644 if (current_link_up)
3645 netif_carrier_on(tp->dev);
3646 else
3647 netif_carrier_off(tp->dev);
3648 tg3_link_report(tp);
3649 } else {
Matt Carlson8d018622007-12-20 20:05:44 -08003650 u32 now_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651 if (orig_pause_cfg != now_pause_cfg ||
3652 orig_active_speed != tp->link_config.active_speed ||
3653 orig_active_duplex != tp->link_config.active_duplex)
3654 tg3_link_report(tp);
3655 }
3656
3657 return 0;
3658}
3659
Michael Chan747e8f82005-07-25 12:33:22 -07003660static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3661{
3662 int current_link_up, err = 0;
3663 u32 bmsr, bmcr;
3664 u16 current_speed;
3665 u8 current_duplex;
Matt Carlsonef167e22007-12-20 20:10:01 -08003666 u32 local_adv, remote_adv;
Michael Chan747e8f82005-07-25 12:33:22 -07003667
3668 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3669 tw32_f(MAC_MODE, tp->mac_mode);
3670 udelay(40);
3671
3672 tw32(MAC_EVENT, 0);
3673
3674 tw32_f(MAC_STATUS,
3675 (MAC_STATUS_SYNC_CHANGED |
3676 MAC_STATUS_CFG_CHANGED |
3677 MAC_STATUS_MI_COMPLETION |
3678 MAC_STATUS_LNKSTATE_CHANGED));
3679 udelay(40);
3680
3681 if (force_reset)
3682 tg3_phy_reset(tp);
3683
3684 current_link_up = 0;
3685 current_speed = SPEED_INVALID;
3686 current_duplex = DUPLEX_INVALID;
3687
3688 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3689 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08003690 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3691 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3692 bmsr |= BMSR_LSTATUS;
3693 else
3694 bmsr &= ~BMSR_LSTATUS;
3695 }
Michael Chan747e8f82005-07-25 12:33:22 -07003696
3697 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3698
3699 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
Matt Carlson2bd3ed02008-06-09 15:39:55 -07003700 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
Michael Chan747e8f82005-07-25 12:33:22 -07003701 /* do nothing, just check for link up at the end */
3702 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3703 u32 adv, new_adv;
3704
3705 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3706 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3707 ADVERTISE_1000XPAUSE |
3708 ADVERTISE_1000XPSE_ASYM |
3709 ADVERTISE_SLCT);
3710
Matt Carlsonba4d07a2007-12-20 20:08:00 -08003711 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
Michael Chan747e8f82005-07-25 12:33:22 -07003712
3713 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3714 new_adv |= ADVERTISE_1000XHALF;
3715 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3716 new_adv |= ADVERTISE_1000XFULL;
3717
3718 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3719 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3720 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3721 tg3_writephy(tp, MII_BMCR, bmcr);
3722
3723 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
Michael Chan3d3ebe72006-09-27 15:59:15 -07003724 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
Michael Chan747e8f82005-07-25 12:33:22 -07003725 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3726
3727 return err;
3728 }
3729 } else {
3730 u32 new_bmcr;
3731
3732 bmcr &= ~BMCR_SPEED1000;
3733 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3734
3735 if (tp->link_config.duplex == DUPLEX_FULL)
3736 new_bmcr |= BMCR_FULLDPLX;
3737
3738 if (new_bmcr != bmcr) {
3739 /* BMCR_SPEED1000 is a reserved bit that needs
3740 * to be set on write.
3741 */
3742 new_bmcr |= BMCR_SPEED1000;
3743
3744 /* Force a linkdown */
3745 if (netif_carrier_ok(tp->dev)) {
3746 u32 adv;
3747
3748 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3749 adv &= ~(ADVERTISE_1000XFULL |
3750 ADVERTISE_1000XHALF |
3751 ADVERTISE_SLCT);
3752 tg3_writephy(tp, MII_ADVERTISE, adv);
3753 tg3_writephy(tp, MII_BMCR, bmcr |
3754 BMCR_ANRESTART |
3755 BMCR_ANENABLE);
3756 udelay(10);
3757 netif_carrier_off(tp->dev);
3758 }
3759 tg3_writephy(tp, MII_BMCR, new_bmcr);
3760 bmcr = new_bmcr;
3761 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3762 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08003763 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3764 ASIC_REV_5714) {
3765 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3766 bmsr |= BMSR_LSTATUS;
3767 else
3768 bmsr &= ~BMSR_LSTATUS;
3769 }
Michael Chan747e8f82005-07-25 12:33:22 -07003770 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3771 }
3772 }
3773
3774 if (bmsr & BMSR_LSTATUS) {
3775 current_speed = SPEED_1000;
3776 current_link_up = 1;
3777 if (bmcr & BMCR_FULLDPLX)
3778 current_duplex = DUPLEX_FULL;
3779 else
3780 current_duplex = DUPLEX_HALF;
3781
Matt Carlsonef167e22007-12-20 20:10:01 -08003782 local_adv = 0;
3783 remote_adv = 0;
3784
Michael Chan747e8f82005-07-25 12:33:22 -07003785 if (bmcr & BMCR_ANENABLE) {
Matt Carlsonef167e22007-12-20 20:10:01 -08003786 u32 common;
Michael Chan747e8f82005-07-25 12:33:22 -07003787
3788 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3789 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3790 common = local_adv & remote_adv;
3791 if (common & (ADVERTISE_1000XHALF |
3792 ADVERTISE_1000XFULL)) {
3793 if (common & ADVERTISE_1000XFULL)
3794 current_duplex = DUPLEX_FULL;
3795 else
3796 current_duplex = DUPLEX_HALF;
Michael Chan747e8f82005-07-25 12:33:22 -07003797 }
3798 else
3799 current_link_up = 0;
3800 }
3801 }
3802
Matt Carlsonef167e22007-12-20 20:10:01 -08003803 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3804 tg3_setup_flow_control(tp, local_adv, remote_adv);
3805
Michael Chan747e8f82005-07-25 12:33:22 -07003806 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3807 if (tp->link_config.active_duplex == DUPLEX_HALF)
3808 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3809
3810 tw32_f(MAC_MODE, tp->mac_mode);
3811 udelay(40);
3812
3813 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3814
3815 tp->link_config.active_speed = current_speed;
3816 tp->link_config.active_duplex = current_duplex;
3817
3818 if (current_link_up != netif_carrier_ok(tp->dev)) {
3819 if (current_link_up)
3820 netif_carrier_on(tp->dev);
3821 else {
3822 netif_carrier_off(tp->dev);
3823 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3824 }
3825 tg3_link_report(tp);
3826 }
3827 return err;
3828}
3829
3830static void tg3_serdes_parallel_detect(struct tg3 *tp)
3831{
Michael Chan3d3ebe72006-09-27 15:59:15 -07003832 if (tp->serdes_counter) {
Michael Chan747e8f82005-07-25 12:33:22 -07003833 /* Give autoneg time to complete. */
Michael Chan3d3ebe72006-09-27 15:59:15 -07003834 tp->serdes_counter--;
Michael Chan747e8f82005-07-25 12:33:22 -07003835 return;
3836 }
3837 if (!netif_carrier_ok(tp->dev) &&
3838 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3839 u32 bmcr;
3840
3841 tg3_readphy(tp, MII_BMCR, &bmcr);
3842 if (bmcr & BMCR_ANENABLE) {
3843 u32 phy1, phy2;
3844
3845 /* Select shadow register 0x1f */
3846 tg3_writephy(tp, 0x1c, 0x7c00);
3847 tg3_readphy(tp, 0x1c, &phy1);
3848
3849 /* Select expansion interrupt status register */
3850 tg3_writephy(tp, 0x17, 0x0f01);
3851 tg3_readphy(tp, 0x15, &phy2);
3852 tg3_readphy(tp, 0x15, &phy2);
3853
3854 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3855 /* We have signal detect and not receiving
3856 * config code words, link is up by parallel
3857 * detection.
3858 */
3859
3860 bmcr &= ~BMCR_ANENABLE;
3861 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3862 tg3_writephy(tp, MII_BMCR, bmcr);
3863 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3864 }
3865 }
3866 }
3867 else if (netif_carrier_ok(tp->dev) &&
3868 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3869 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3870 u32 phy2;
3871
3872 /* Select expansion interrupt status register */
3873 tg3_writephy(tp, 0x17, 0x0f01);
3874 tg3_readphy(tp, 0x15, &phy2);
3875 if (phy2 & 0x20) {
3876 u32 bmcr;
3877
3878 /* Config code words received, turn on autoneg. */
3879 tg3_readphy(tp, MII_BMCR, &bmcr);
3880 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3881
3882 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3883
3884 }
3885 }
3886}
3887
Linus Torvalds1da177e2005-04-16 15:20:36 -07003888static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3889{
3890 int err;
3891
3892 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3893 err = tg3_setup_fiber_phy(tp, force_reset);
Michael Chan747e8f82005-07-25 12:33:22 -07003894 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3895 err = tg3_setup_fiber_mii_phy(tp, force_reset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003896 } else {
3897 err = tg3_setup_copper_phy(tp, force_reset);
3898 }
3899
Matt Carlsonbcb37f62008-11-03 16:52:09 -08003900 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
Matt Carlsonaa6c91f2007-11-12 21:18:04 -08003901 u32 val, scale;
3902
3903 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3904 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3905 scale = 65;
3906 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3907 scale = 6;
3908 else
3909 scale = 12;
3910
3911 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3912 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3913 tw32(GRC_MISC_CFG, val);
3914 }
3915
Linus Torvalds1da177e2005-04-16 15:20:36 -07003916 if (tp->link_config.active_speed == SPEED_1000 &&
3917 tp->link_config.active_duplex == DUPLEX_HALF)
3918 tw32(MAC_TX_LENGTHS,
3919 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3920 (6 << TX_LENGTHS_IPG_SHIFT) |
3921 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3922 else
3923 tw32(MAC_TX_LENGTHS,
3924 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3925 (6 << TX_LENGTHS_IPG_SHIFT) |
3926 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3927
3928 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3929 if (netif_carrier_ok(tp->dev)) {
3930 tw32(HOSTCC_STAT_COAL_TICKS,
David S. Miller15f98502005-05-18 22:49:26 -07003931 tp->coal.stats_block_coalesce_usecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003932 } else {
3933 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3934 }
3935 }
3936
Matt Carlson8ed5d972007-05-07 00:25:49 -07003937 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3938 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3939 if (!netif_carrier_ok(tp->dev))
3940 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3941 tp->pwrmgmt_thresh;
3942 else
3943 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3944 tw32(PCIE_PWR_MGMT_THRESH, val);
3945 }
3946
Linus Torvalds1da177e2005-04-16 15:20:36 -07003947 return err;
3948}
3949
Michael Chandf3e6542006-05-26 17:48:07 -07003950/* This is called whenever we suspect that the system chipset is re-
3951 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3952 * is bogus tx completions. We try to recover by setting the
3953 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3954 * in the workqueue.
3955 */
3956static void tg3_tx_recover(struct tg3 *tp)
3957{
3958 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3959 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3960
3961 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3962 "mapped I/O cycles to the network device, attempting to "
3963 "recover. Please report the problem to the driver maintainer "
3964 "and include system chipset information.\n", tp->dev->name);
3965
3966 spin_lock(&tp->lock);
Michael Chandf3e6542006-05-26 17:48:07 -07003967 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
Michael Chandf3e6542006-05-26 17:48:07 -07003968 spin_unlock(&tp->lock);
3969}
3970
Michael Chan1b2a7202006-08-07 21:46:02 -07003971static inline u32 tg3_tx_avail(struct tg3 *tp)
3972{
3973 smp_mb();
3974 return (tp->tx_pending -
3975 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3976}
3977
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978/* Tigon3 never reports partial packet sends. So we do not
3979 * need special logic to handle SKBs that have not had all
3980 * of their frags sent yet, like SunGEM does.
3981 */
3982static void tg3_tx(struct tg3 *tp)
3983{
3984 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3985 u32 sw_idx = tp->tx_cons;
3986
3987 while (sw_idx != hw_idx) {
3988 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3989 struct sk_buff *skb = ri->skb;
Michael Chandf3e6542006-05-26 17:48:07 -07003990 int i, tx_bug = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003991
Michael Chandf3e6542006-05-26 17:48:07 -07003992 if (unlikely(skb == NULL)) {
3993 tg3_tx_recover(tp);
3994 return;
3995 }
3996
David S. Miller90079ce2008-09-11 04:52:51 -07003997 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003998
3999 ri->skb = NULL;
4000
4001 sw_idx = NEXT_TX(sw_idx);
4002
4003 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004004 ri = &tp->tx_buffers[sw_idx];
Michael Chandf3e6542006-05-26 17:48:07 -07004005 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4006 tx_bug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004007 sw_idx = NEXT_TX(sw_idx);
4008 }
4009
David S. Millerf47c11e2005-06-24 20:18:35 -07004010 dev_kfree_skb(skb);
Michael Chandf3e6542006-05-26 17:48:07 -07004011
4012 if (unlikely(tx_bug)) {
4013 tg3_tx_recover(tp);
4014 return;
4015 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004016 }
4017
4018 tp->tx_cons = sw_idx;
4019
Michael Chan1b2a7202006-08-07 21:46:02 -07004020 /* Need to make the tx_cons update visible to tg3_start_xmit()
4021 * before checking for netif_queue_stopped(). Without the
4022 * memory barrier, there is a small possibility that tg3_start_xmit()
4023 * will miss it and cause the queue to be stopped forever.
4024 */
4025 smp_mb();
4026
4027 if (unlikely(netif_queue_stopped(tp->dev) &&
Ranjit Manomohan42952232006-10-18 20:54:26 -07004028 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
Michael Chan1b2a7202006-08-07 21:46:02 -07004029 netif_tx_lock(tp->dev);
Michael Chan51b91462005-09-01 17:41:28 -07004030 if (netif_queue_stopped(tp->dev) &&
Ranjit Manomohan42952232006-10-18 20:54:26 -07004031 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
Michael Chan51b91462005-09-01 17:41:28 -07004032 netif_wake_queue(tp->dev);
Michael Chan1b2a7202006-08-07 21:46:02 -07004033 netif_tx_unlock(tp->dev);
Michael Chan51b91462005-09-01 17:41:28 -07004034 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004035}
4036
4037/* Returns size of skb allocated or < 0 on error.
4038 *
4039 * We only need to fill in the address because the other members
4040 * of the RX descriptor are invariant, see tg3_init_rings.
4041 *
4042 * Note the purposeful assymetry of cpu vs. chip accesses. For
4043 * posting buffers we only dirty the first cache line of the RX
4044 * descriptor (containing the address). Whereas for the RX status
4045 * buffers the cpu only reads the last cacheline of the RX descriptor
4046 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4047 */
4048static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
4049 int src_idx, u32 dest_idx_unmasked)
4050{
4051 struct tg3_rx_buffer_desc *desc;
4052 struct ring_info *map, *src_map;
4053 struct sk_buff *skb;
4054 dma_addr_t mapping;
4055 int skb_size, dest_idx;
4056
4057 src_map = NULL;
4058 switch (opaque_key) {
4059 case RXD_OPAQUE_RING_STD:
4060 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4061 desc = &tp->rx_std[dest_idx];
4062 map = &tp->rx_std_buffers[dest_idx];
4063 if (src_idx >= 0)
4064 src_map = &tp->rx_std_buffers[src_idx];
Michael Chan7e72aad2005-07-25 12:31:17 -07004065 skb_size = tp->rx_pkt_buf_sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004066 break;
4067
4068 case RXD_OPAQUE_RING_JUMBO:
4069 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4070 desc = &tp->rx_jumbo[dest_idx];
4071 map = &tp->rx_jumbo_buffers[dest_idx];
4072 if (src_idx >= 0)
4073 src_map = &tp->rx_jumbo_buffers[src_idx];
4074 skb_size = RX_JUMBO_PKT_BUF_SZ;
4075 break;
4076
4077 default:
4078 return -EINVAL;
Stephen Hemminger855e1112008-04-16 16:37:28 -07004079 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004080
4081 /* Do not overwrite any of the map or rp information
4082 * until we are sure we can commit to a new buffer.
4083 *
4084 * Callers depend upon this behavior and assume that
4085 * we leave everything unchanged if we fail.
4086 */
David S. Millera20e9c62006-07-31 22:38:16 -07004087 skb = netdev_alloc_skb(tp->dev, skb_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004088 if (skb == NULL)
4089 return -ENOMEM;
4090
Linus Torvalds1da177e2005-04-16 15:20:36 -07004091 skb_reserve(skb, tp->rx_offset);
4092
4093 mapping = pci_map_single(tp->pdev, skb->data,
4094 skb_size - tp->rx_offset,
4095 PCI_DMA_FROMDEVICE);
4096
4097 map->skb = skb;
4098 pci_unmap_addr_set(map, mapping, mapping);
4099
4100 if (src_map != NULL)
4101 src_map->skb = NULL;
4102
4103 desc->addr_hi = ((u64)mapping >> 32);
4104 desc->addr_lo = ((u64)mapping & 0xffffffff);
4105
4106 return skb_size;
4107}
4108
4109/* We only need to move over in the address because the other
4110 * members of the RX descriptor are invariant. See notes above
4111 * tg3_alloc_rx_skb for full details.
4112 */
4113static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
4114 int src_idx, u32 dest_idx_unmasked)
4115{
4116 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4117 struct ring_info *src_map, *dest_map;
4118 int dest_idx;
4119
4120 switch (opaque_key) {
4121 case RXD_OPAQUE_RING_STD:
4122 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4123 dest_desc = &tp->rx_std[dest_idx];
4124 dest_map = &tp->rx_std_buffers[dest_idx];
4125 src_desc = &tp->rx_std[src_idx];
4126 src_map = &tp->rx_std_buffers[src_idx];
4127 break;
4128
4129 case RXD_OPAQUE_RING_JUMBO:
4130 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4131 dest_desc = &tp->rx_jumbo[dest_idx];
4132 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4133 src_desc = &tp->rx_jumbo[src_idx];
4134 src_map = &tp->rx_jumbo_buffers[src_idx];
4135 break;
4136
4137 default:
4138 return;
Stephen Hemminger855e1112008-04-16 16:37:28 -07004139 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004140
4141 dest_map->skb = src_map->skb;
4142 pci_unmap_addr_set(dest_map, mapping,
4143 pci_unmap_addr(src_map, mapping));
4144 dest_desc->addr_hi = src_desc->addr_hi;
4145 dest_desc->addr_lo = src_desc->addr_lo;
4146
4147 src_map->skb = NULL;
4148}
4149
4150#if TG3_VLAN_TAG_USED
4151static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4152{
4153 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4154}
4155#endif
4156
4157/* The RX ring scheme is composed of multiple rings which post fresh
4158 * buffers to the chip, and one special ring the chip uses to report
4159 * status back to the host.
4160 *
4161 * The special ring reports the status of received packets to the
4162 * host. The chip does not write into the original descriptor the
4163 * RX buffer was obtained from. The chip simply takes the original
4164 * descriptor as provided by the host, updates the status and length
4165 * field, then writes this into the next status ring entry.
4166 *
4167 * Each ring the host uses to post buffers to the chip is described
4168 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4169 * it is first placed into the on-chip ram. When the packet's length
4170 * is known, it walks down the TG3_BDINFO entries to select the ring.
4171 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4172 * which is within the range of the new packet's length is chosen.
4173 *
4174 * The "separate ring for rx status" scheme may sound queer, but it makes
4175 * sense from a cache coherency perspective. If only the host writes
4176 * to the buffer post rings, and only the chip writes to the rx status
4177 * rings, then cache lines never move beyond shared-modified state.
4178 * If both the host and chip were to write into the same ring, cache line
4179 * eviction could occur since both entities want it in an exclusive state.
4180 */
4181static int tg3_rx(struct tg3 *tp, int budget)
4182{
Michael Chanf92905d2006-06-29 20:14:29 -07004183 u32 work_mask, rx_std_posted = 0;
Michael Chan483ba502005-04-25 15:14:03 -07004184 u32 sw_idx = tp->rx_rcb_ptr;
4185 u16 hw_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004186 int received;
4187
4188 hw_idx = tp->hw_status->idx[0].rx_producer;
4189 /*
4190 * We need to order the read of hw_idx and the read of
4191 * the opaque cookie.
4192 */
4193 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004194 work_mask = 0;
4195 received = 0;
4196 while (sw_idx != hw_idx && budget > 0) {
4197 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4198 unsigned int len;
4199 struct sk_buff *skb;
4200 dma_addr_t dma_addr;
4201 u32 opaque_key, desc_idx, *post_ptr;
4202
4203 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4204 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4205 if (opaque_key == RXD_OPAQUE_RING_STD) {
4206 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4207 mapping);
4208 skb = tp->rx_std_buffers[desc_idx].skb;
4209 post_ptr = &tp->rx_std_ptr;
Michael Chanf92905d2006-06-29 20:14:29 -07004210 rx_std_posted++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004211 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4212 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4213 mapping);
4214 skb = tp->rx_jumbo_buffers[desc_idx].skb;
4215 post_ptr = &tp->rx_jumbo_ptr;
4216 }
4217 else {
4218 goto next_pkt_nopost;
4219 }
4220
4221 work_mask |= opaque_key;
4222
4223 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4224 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4225 drop_it:
4226 tg3_recycle_rx(tp, opaque_key,
4227 desc_idx, *post_ptr);
4228 drop_it_no_recycle:
4229 /* Other statistics kept track of by card. */
4230 tp->net_stats.rx_dropped++;
4231 goto next_pkt;
4232 }
4233
4234 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
4235
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004236 if (len > RX_COPY_THRESHOLD
Linus Torvalds1da177e2005-04-16 15:20:36 -07004237 && tp->rx_offset == 2
4238 /* rx_offset != 2 iff this is a 5701 card running
4239 * in PCI-X mode [see tg3_get_invariants()] */
4240 ) {
4241 int skb_size;
4242
4243 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4244 desc_idx, *post_ptr);
4245 if (skb_size < 0)
4246 goto drop_it;
4247
4248 pci_unmap_single(tp->pdev, dma_addr,
4249 skb_size - tp->rx_offset,
4250 PCI_DMA_FROMDEVICE);
4251
4252 skb_put(skb, len);
4253 } else {
4254 struct sk_buff *copy_skb;
4255
4256 tg3_recycle_rx(tp, opaque_key,
4257 desc_idx, *post_ptr);
4258
David S. Millera20e9c62006-07-31 22:38:16 -07004259 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004260 if (copy_skb == NULL)
4261 goto drop_it_no_recycle;
4262
Linus Torvalds1da177e2005-04-16 15:20:36 -07004263 skb_reserve(copy_skb, 2);
4264 skb_put(copy_skb, len);
4265 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03004266 skb_copy_from_linear_data(skb, copy_skb->data, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004267 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4268
4269 /* We'll reuse the original ring buffer. */
4270 skb = copy_skb;
4271 }
4272
4273 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4274 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4275 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4276 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4277 skb->ip_summed = CHECKSUM_UNNECESSARY;
4278 else
4279 skb->ip_summed = CHECKSUM_NONE;
4280
4281 skb->protocol = eth_type_trans(skb, tp->dev);
4282#if TG3_VLAN_TAG_USED
4283 if (tp->vlgrp != NULL &&
4284 desc->type_flags & RXD_FLAG_VLAN) {
4285 tg3_vlan_rx(tp, skb,
4286 desc->err_vlan & RXD_VLAN_MASK);
4287 } else
4288#endif
4289 netif_receive_skb(skb);
4290
Linus Torvalds1da177e2005-04-16 15:20:36 -07004291 received++;
4292 budget--;
4293
4294next_pkt:
4295 (*post_ptr)++;
Michael Chanf92905d2006-06-29 20:14:29 -07004296
4297 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4298 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4299
4300 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4301 TG3_64BIT_REG_LOW, idx);
4302 work_mask &= ~RXD_OPAQUE_RING_STD;
4303 rx_std_posted = 0;
4304 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004305next_pkt_nopost:
Michael Chan483ba502005-04-25 15:14:03 -07004306 sw_idx++;
Eric Dumazet6b31a512007-02-06 13:29:21 -08004307 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
Michael Chan52f6d692005-04-25 15:14:32 -07004308
4309 /* Refresh hw_idx to see if there is new work */
4310 if (sw_idx == hw_idx) {
4311 hw_idx = tp->hw_status->idx[0].rx_producer;
4312 rmb();
4313 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314 }
4315
4316 /* ACK the status ring. */
Michael Chan483ba502005-04-25 15:14:03 -07004317 tp->rx_rcb_ptr = sw_idx;
4318 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004319
4320 /* Refill RX ring(s). */
4321 if (work_mask & RXD_OPAQUE_RING_STD) {
4322 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4323 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4324 sw_idx);
4325 }
4326 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4327 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4328 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4329 sw_idx);
4330 }
4331 mmiowb();
4332
4333 return received;
4334}
4335
David S. Miller6f535762007-10-11 18:08:29 -07004336static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004337{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004338 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004339
Linus Torvalds1da177e2005-04-16 15:20:36 -07004340 /* handle link change and other phy events */
4341 if (!(tp->tg3_flags &
4342 (TG3_FLAG_USE_LINKCHG_REG |
4343 TG3_FLAG_POLL_SERDES))) {
4344 if (sblk->status & SD_STATUS_LINK_CHG) {
4345 sblk->status = SD_STATUS_UPDATED |
4346 (sblk->status & ~SD_STATUS_LINK_CHG);
David S. Millerf47c11e2005-06-24 20:18:35 -07004347 spin_lock(&tp->lock);
Matt Carlsondd477002008-05-25 23:45:58 -07004348 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4349 tw32_f(MAC_STATUS,
4350 (MAC_STATUS_SYNC_CHANGED |
4351 MAC_STATUS_CFG_CHANGED |
4352 MAC_STATUS_MI_COMPLETION |
4353 MAC_STATUS_LNKSTATE_CHANGED));
4354 udelay(40);
4355 } else
4356 tg3_setup_phy(tp, 0);
David S. Millerf47c11e2005-06-24 20:18:35 -07004357 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004358 }
4359 }
4360
4361 /* run TX completion thread */
4362 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004363 tg3_tx(tp);
David S. Miller6f535762007-10-11 18:08:29 -07004364 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
Michael Chan4fd7ab52007-10-12 01:39:50 -07004365 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004366 }
4367
Linus Torvalds1da177e2005-04-16 15:20:36 -07004368 /* run RX thread, within the bounds set by NAPI.
4369 * All RX "locking" is done by ensuring outside
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004370 * code synchronizes with tg3->napi.poll()
Linus Torvalds1da177e2005-04-16 15:20:36 -07004371 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004372 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
David S. Miller6f535762007-10-11 18:08:29 -07004373 work_done += tg3_rx(tp, budget - work_done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004374
David S. Miller6f535762007-10-11 18:08:29 -07004375 return work_done;
4376}
David S. Millerf7383c22005-05-18 22:50:53 -07004377
David S. Miller6f535762007-10-11 18:08:29 -07004378static int tg3_poll(struct napi_struct *napi, int budget)
4379{
4380 struct tg3 *tp = container_of(napi, struct tg3, napi);
4381 int work_done = 0;
Michael Chan4fd7ab52007-10-12 01:39:50 -07004382 struct tg3_hw_status *sblk = tp->hw_status;
David S. Miller6f535762007-10-11 18:08:29 -07004383
4384 while (1) {
4385 work_done = tg3_poll_work(tp, work_done, budget);
4386
4387 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4388 goto tx_recovery;
4389
4390 if (unlikely(work_done >= budget))
4391 break;
4392
Michael Chan4fd7ab52007-10-12 01:39:50 -07004393 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4394 /* tp->last_tag is used in tg3_restart_ints() below
4395 * to tell the hw how much work has been processed,
4396 * so we must read it before checking for more work.
4397 */
4398 tp->last_tag = sblk->status_tag;
4399 rmb();
4400 } else
4401 sblk->status &= ~SD_STATUS_UPDATED;
4402
David S. Miller6f535762007-10-11 18:08:29 -07004403 if (likely(!tg3_has_work(tp))) {
David S. Miller6f535762007-10-11 18:08:29 -07004404 netif_rx_complete(tp->dev, napi);
4405 tg3_restart_ints(tp);
4406 break;
4407 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004408 }
4409
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004410 return work_done;
David S. Miller6f535762007-10-11 18:08:29 -07004411
4412tx_recovery:
Michael Chan4fd7ab52007-10-12 01:39:50 -07004413 /* work_done is guaranteed to be less than budget. */
David S. Miller6f535762007-10-11 18:08:29 -07004414 netif_rx_complete(tp->dev, napi);
4415 schedule_work(&tp->reset_task);
Michael Chan4fd7ab52007-10-12 01:39:50 -07004416 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004417}
4418
David S. Millerf47c11e2005-06-24 20:18:35 -07004419static void tg3_irq_quiesce(struct tg3 *tp)
4420{
4421 BUG_ON(tp->irq_sync);
4422
4423 tp->irq_sync = 1;
4424 smp_mb();
4425
4426 synchronize_irq(tp->pdev->irq);
4427}
4428
4429static inline int tg3_irq_sync(struct tg3 *tp)
4430{
4431 return tp->irq_sync;
4432}
4433
4434/* Fully shutdown all tg3 driver activity elsewhere in the system.
4435 * If irq_sync is non-zero, then the IRQ handler must be synchronized
4436 * with as well. Most of the time, this is not necessary except when
4437 * shutting down the device.
4438 */
4439static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4440{
Michael Chan46966542007-07-11 19:47:19 -07004441 spin_lock_bh(&tp->lock);
David S. Millerf47c11e2005-06-24 20:18:35 -07004442 if (irq_sync)
4443 tg3_irq_quiesce(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07004444}
4445
4446static inline void tg3_full_unlock(struct tg3 *tp)
4447{
David S. Millerf47c11e2005-06-24 20:18:35 -07004448 spin_unlock_bh(&tp->lock);
4449}
4450
Michael Chanfcfa0a32006-03-20 22:28:41 -08004451/* One-shot MSI handler - Chip automatically disables interrupt
4452 * after sending MSI so driver doesn't have to do it.
4453 */
David Howells7d12e782006-10-05 14:55:46 +01004454static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
Michael Chanfcfa0a32006-03-20 22:28:41 -08004455{
4456 struct net_device *dev = dev_id;
4457 struct tg3 *tp = netdev_priv(dev);
4458
4459 prefetch(tp->hw_status);
4460 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4461
4462 if (likely(!tg3_irq_sync(tp)))
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004463 netif_rx_schedule(dev, &tp->napi);
Michael Chanfcfa0a32006-03-20 22:28:41 -08004464
4465 return IRQ_HANDLED;
4466}
4467
Michael Chan88b06bc2005-04-21 17:13:25 -07004468/* MSI ISR - No need to check for interrupt sharing and no need to
4469 * flush status block and interrupt mailbox. PCI ordering rules
4470 * guarantee that MSI will arrive after the status block.
4471 */
David Howells7d12e782006-10-05 14:55:46 +01004472static irqreturn_t tg3_msi(int irq, void *dev_id)
Michael Chan88b06bc2005-04-21 17:13:25 -07004473{
4474 struct net_device *dev = dev_id;
4475 struct tg3 *tp = netdev_priv(dev);
Michael Chan88b06bc2005-04-21 17:13:25 -07004476
Michael Chan61487482005-09-05 17:53:19 -07004477 prefetch(tp->hw_status);
4478 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Michael Chan88b06bc2005-04-21 17:13:25 -07004479 /*
David S. Millerfac9b832005-05-18 22:46:34 -07004480 * Writing any value to intr-mbox-0 clears PCI INTA# and
Michael Chan88b06bc2005-04-21 17:13:25 -07004481 * chip-internal interrupt pending events.
David S. Millerfac9b832005-05-18 22:46:34 -07004482 * Writing non-zero to intr-mbox-0 additional tells the
Michael Chan88b06bc2005-04-21 17:13:25 -07004483 * NIC to stop sending us irqs, engaging "in-intr-handler"
4484 * event coalescing.
4485 */
4486 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chan61487482005-09-05 17:53:19 -07004487 if (likely(!tg3_irq_sync(tp)))
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004488 netif_rx_schedule(dev, &tp->napi);
Michael Chan61487482005-09-05 17:53:19 -07004489
Michael Chan88b06bc2005-04-21 17:13:25 -07004490 return IRQ_RETVAL(1);
4491}
4492
David Howells7d12e782006-10-05 14:55:46 +01004493static irqreturn_t tg3_interrupt(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004494{
4495 struct net_device *dev = dev_id;
4496 struct tg3 *tp = netdev_priv(dev);
4497 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004498 unsigned int handled = 1;
4499
Linus Torvalds1da177e2005-04-16 15:20:36 -07004500 /* In INTx mode, it is possible for the interrupt to arrive at
4501 * the CPU before the status block posted prior to the interrupt.
4502 * Reading the PCI State register will confirm whether the
4503 * interrupt is ours and will flush the status block.
4504 */
Michael Chand18edcb2007-03-24 20:57:11 -07004505 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4506 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4507 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4508 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07004509 goto out;
David S. Millerfac9b832005-05-18 22:46:34 -07004510 }
Michael Chand18edcb2007-03-24 20:57:11 -07004511 }
4512
4513 /*
4514 * Writing any value to intr-mbox-0 clears PCI INTA# and
4515 * chip-internal interrupt pending events.
4516 * Writing non-zero to intr-mbox-0 additional tells the
4517 * NIC to stop sending us irqs, engaging "in-intr-handler"
4518 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07004519 *
4520 * Flush the mailbox to de-assert the IRQ immediately to prevent
4521 * spurious interrupts. The flush impacts performance but
4522 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07004523 */
Michael Chanc04cb342007-05-07 00:26:15 -07004524 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chand18edcb2007-03-24 20:57:11 -07004525 if (tg3_irq_sync(tp))
4526 goto out;
4527 sblk->status &= ~SD_STATUS_UPDATED;
4528 if (likely(tg3_has_work(tp))) {
4529 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004530 netif_rx_schedule(dev, &tp->napi);
Michael Chand18edcb2007-03-24 20:57:11 -07004531 } else {
4532 /* No work, shared interrupt perhaps? re-enable
4533 * interrupts, and flush that PCI write
4534 */
4535 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4536 0x00000000);
David S. Millerfac9b832005-05-18 22:46:34 -07004537 }
David S. Millerf47c11e2005-06-24 20:18:35 -07004538out:
David S. Millerfac9b832005-05-18 22:46:34 -07004539 return IRQ_RETVAL(handled);
4540}
4541
David Howells7d12e782006-10-05 14:55:46 +01004542static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
David S. Millerfac9b832005-05-18 22:46:34 -07004543{
4544 struct net_device *dev = dev_id;
4545 struct tg3 *tp = netdev_priv(dev);
4546 struct tg3_hw_status *sblk = tp->hw_status;
David S. Millerfac9b832005-05-18 22:46:34 -07004547 unsigned int handled = 1;
4548
David S. Millerfac9b832005-05-18 22:46:34 -07004549 /* In INTx mode, it is possible for the interrupt to arrive at
4550 * the CPU before the status block posted prior to the interrupt.
4551 * Reading the PCI State register will confirm whether the
4552 * interrupt is ours and will flush the status block.
4553 */
Michael Chand18edcb2007-03-24 20:57:11 -07004554 if (unlikely(sblk->status_tag == tp->last_tag)) {
4555 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4556 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4557 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07004558 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004559 }
Michael Chand18edcb2007-03-24 20:57:11 -07004560 }
4561
4562 /*
4563 * writing any value to intr-mbox-0 clears PCI INTA# and
4564 * chip-internal interrupt pending events.
4565 * writing non-zero to intr-mbox-0 additional tells the
4566 * NIC to stop sending us irqs, engaging "in-intr-handler"
4567 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07004568 *
4569 * Flush the mailbox to de-assert the IRQ immediately to prevent
4570 * spurious interrupts. The flush impacts performance but
4571 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07004572 */
Michael Chanc04cb342007-05-07 00:26:15 -07004573 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chand18edcb2007-03-24 20:57:11 -07004574 if (tg3_irq_sync(tp))
4575 goto out;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004576 if (netif_rx_schedule_prep(dev, &tp->napi)) {
Michael Chand18edcb2007-03-24 20:57:11 -07004577 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4578 /* Update last_tag to mark that this status has been
4579 * seen. Because interrupt may be shared, we may be
4580 * racing with tg3_poll(), so only update last_tag
4581 * if tg3_poll() is not scheduled.
4582 */
4583 tp->last_tag = sblk->status_tag;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004584 __netif_rx_schedule(dev, &tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004585 }
David S. Millerf47c11e2005-06-24 20:18:35 -07004586out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004587 return IRQ_RETVAL(handled);
4588}
4589
Michael Chan79381092005-04-21 17:13:59 -07004590/* ISR for interrupt test */
David Howells7d12e782006-10-05 14:55:46 +01004591static irqreturn_t tg3_test_isr(int irq, void *dev_id)
Michael Chan79381092005-04-21 17:13:59 -07004592{
4593 struct net_device *dev = dev_id;
4594 struct tg3 *tp = netdev_priv(dev);
4595 struct tg3_hw_status *sblk = tp->hw_status;
4596
Michael Chanf9804dd2005-09-27 12:13:10 -07004597 if ((sblk->status & SD_STATUS_UPDATED) ||
4598 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
Michael Chanb16250e2006-09-27 16:10:14 -07004599 tg3_disable_ints(tp);
Michael Chan79381092005-04-21 17:13:59 -07004600 return IRQ_RETVAL(1);
4601 }
4602 return IRQ_RETVAL(0);
4603}
4604
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07004605static int tg3_init_hw(struct tg3 *, int);
Michael Chan944d9802005-05-29 14:57:48 -07004606static int tg3_halt(struct tg3 *, int, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004607
Michael Chanb9ec6c12006-07-25 16:37:27 -07004608/* Restart hardware after configuration changes, self-test, etc.
4609 * Invoked with tp->lock held.
4610 */
4611static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
Eric Dumazet78c61462008-04-24 23:33:06 -07004612 __releases(tp->lock)
4613 __acquires(tp->lock)
Michael Chanb9ec6c12006-07-25 16:37:27 -07004614{
4615 int err;
4616
4617 err = tg3_init_hw(tp, reset_phy);
4618 if (err) {
4619 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4620 "aborting.\n", tp->dev->name);
4621 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4622 tg3_full_unlock(tp);
4623 del_timer_sync(&tp->timer);
4624 tp->irq_sync = 0;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004625 napi_enable(&tp->napi);
Michael Chanb9ec6c12006-07-25 16:37:27 -07004626 dev_close(tp->dev);
4627 tg3_full_lock(tp, 0);
4628 }
4629 return err;
4630}
4631
Linus Torvalds1da177e2005-04-16 15:20:36 -07004632#ifdef CONFIG_NET_POLL_CONTROLLER
4633static void tg3_poll_controller(struct net_device *dev)
4634{
Michael Chan88b06bc2005-04-21 17:13:25 -07004635 struct tg3 *tp = netdev_priv(dev);
4636
David Howells7d12e782006-10-05 14:55:46 +01004637 tg3_interrupt(tp->pdev->irq, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004638}
4639#endif
4640
David Howellsc4028952006-11-22 14:57:56 +00004641static void tg3_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004642{
David Howellsc4028952006-11-22 14:57:56 +00004643 struct tg3 *tp = container_of(work, struct tg3, reset_task);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004644 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004645 unsigned int restart_timer;
4646
Michael Chan7faa0062006-02-02 17:29:28 -08004647 tg3_full_lock(tp, 0);
Michael Chan7faa0062006-02-02 17:29:28 -08004648
4649 if (!netif_running(tp->dev)) {
Michael Chan7faa0062006-02-02 17:29:28 -08004650 tg3_full_unlock(tp);
4651 return;
4652 }
4653
4654 tg3_full_unlock(tp);
4655
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004656 tg3_phy_stop(tp);
4657
Linus Torvalds1da177e2005-04-16 15:20:36 -07004658 tg3_netif_stop(tp);
4659
David S. Millerf47c11e2005-06-24 20:18:35 -07004660 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004661
4662 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4663 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4664
Michael Chandf3e6542006-05-26 17:48:07 -07004665 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4666 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4667 tp->write32_rx_mbox = tg3_write_flush_reg32;
4668 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4669 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4670 }
4671
Michael Chan944d9802005-05-29 14:57:48 -07004672 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004673 err = tg3_init_hw(tp, 1);
4674 if (err)
Michael Chanb9ec6c12006-07-25 16:37:27 -07004675 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004676
4677 tg3_netif_start(tp);
4678
Linus Torvalds1da177e2005-04-16 15:20:36 -07004679 if (restart_timer)
4680 mod_timer(&tp->timer, jiffies + 1);
Michael Chan7faa0062006-02-02 17:29:28 -08004681
Michael Chanb9ec6c12006-07-25 16:37:27 -07004682out:
Michael Chan7faa0062006-02-02 17:29:28 -08004683 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004684
4685 if (!err)
4686 tg3_phy_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004687}
4688
Michael Chanb0408752007-02-13 12:18:30 -08004689static void tg3_dump_short_state(struct tg3 *tp)
4690{
4691 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4692 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4693 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4694 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4695}
4696
Linus Torvalds1da177e2005-04-16 15:20:36 -07004697static void tg3_tx_timeout(struct net_device *dev)
4698{
4699 struct tg3 *tp = netdev_priv(dev);
4700
Michael Chanb0408752007-02-13 12:18:30 -08004701 if (netif_msg_tx_err(tp)) {
Michael Chan9f88f292006-12-07 00:22:54 -08004702 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4703 dev->name);
Michael Chanb0408752007-02-13 12:18:30 -08004704 tg3_dump_short_state(tp);
4705 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004706
4707 schedule_work(&tp->reset_task);
4708}
4709
Michael Chanc58ec932005-09-17 00:46:27 -07004710/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4711static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4712{
4713 u32 base = (u32) mapping & 0xffffffff;
4714
4715 return ((base > 0xffffdcc0) &&
4716 (base + len + 8 < base));
4717}
4718
Michael Chan72f2afb2006-03-06 19:28:35 -08004719/* Test for DMA addresses > 40-bit */
4720static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4721 int len)
4722{
4723#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
Michael Chan6728a8e2006-03-27 23:16:49 -08004724 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
Michael Chan72f2afb2006-03-06 19:28:35 -08004725 return (((u64) mapping + len) > DMA_40BIT_MASK);
4726 return 0;
4727#else
4728 return 0;
4729#endif
4730}
4731
Linus Torvalds1da177e2005-04-16 15:20:36 -07004732static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4733
Michael Chan72f2afb2006-03-06 19:28:35 -08004734/* Workaround 4GB and 40-bit hardware DMA bugs. */
4735static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
Michael Chanc58ec932005-09-17 00:46:27 -07004736 u32 last_plus_one, u32 *start,
4737 u32 base_flags, u32 mss)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004738{
Matt Carlson41588ba2008-04-19 18:12:33 -07004739 struct sk_buff *new_skb;
Michael Chanc58ec932005-09-17 00:46:27 -07004740 dma_addr_t new_addr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004741 u32 entry = *start;
Michael Chanc58ec932005-09-17 00:46:27 -07004742 int i, ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743
Matt Carlson41588ba2008-04-19 18:12:33 -07004744 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4745 new_skb = skb_copy(skb, GFP_ATOMIC);
4746 else {
4747 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4748
4749 new_skb = skb_copy_expand(skb,
4750 skb_headroom(skb) + more_headroom,
4751 skb_tailroom(skb), GFP_ATOMIC);
4752 }
4753
Linus Torvalds1da177e2005-04-16 15:20:36 -07004754 if (!new_skb) {
Michael Chanc58ec932005-09-17 00:46:27 -07004755 ret = -1;
4756 } else {
4757 /* New SKB is guaranteed to be linear. */
4758 entry = *start;
David S. Miller90079ce2008-09-11 04:52:51 -07004759 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4760 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4761
Michael Chanc58ec932005-09-17 00:46:27 -07004762 /* Make sure new skb does not cross any 4G boundaries.
4763 * Drop the packet if it does.
4764 */
David S. Miller90079ce2008-09-11 04:52:51 -07004765 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
David S. Miller638266f2008-09-11 15:45:19 -07004766 if (!ret)
4767 skb_dma_unmap(&tp->pdev->dev, new_skb,
4768 DMA_TO_DEVICE);
Michael Chanc58ec932005-09-17 00:46:27 -07004769 ret = -1;
4770 dev_kfree_skb(new_skb);
4771 new_skb = NULL;
4772 } else {
4773 tg3_set_txd(tp, entry, new_addr, new_skb->len,
4774 base_flags, 1 | (mss << 1));
4775 *start = NEXT_TX(entry);
4776 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004777 }
4778
Linus Torvalds1da177e2005-04-16 15:20:36 -07004779 /* Now clean up the sw ring entries. */
4780 i = 0;
4781 while (entry != last_plus_one) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004782 if (i == 0) {
4783 tp->tx_buffers[entry].skb = new_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004784 } else {
4785 tp->tx_buffers[entry].skb = NULL;
4786 }
4787 entry = NEXT_TX(entry);
4788 i++;
4789 }
4790
David S. Miller90079ce2008-09-11 04:52:51 -07004791 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004792 dev_kfree_skb(skb);
4793
Michael Chanc58ec932005-09-17 00:46:27 -07004794 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004795}
4796
4797static void tg3_set_txd(struct tg3 *tp, int entry,
4798 dma_addr_t mapping, int len, u32 flags,
4799 u32 mss_and_is_end)
4800{
4801 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4802 int is_end = (mss_and_is_end & 0x1);
4803 u32 mss = (mss_and_is_end >> 1);
4804 u32 vlan_tag = 0;
4805
4806 if (is_end)
4807 flags |= TXD_FLAG_END;
4808 if (flags & TXD_FLAG_VLAN) {
4809 vlan_tag = flags >> 16;
4810 flags &= 0xffff;
4811 }
4812 vlan_tag |= (mss << TXD_MSS_SHIFT);
4813
4814 txd->addr_hi = ((u64) mapping >> 32);
4815 txd->addr_lo = ((u64) mapping & 0xffffffff);
4816 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4817 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4818}
4819
Michael Chan5a6f3072006-03-20 22:28:05 -08004820/* hard_start_xmit for devices that don't have any bugs and
4821 * support TG3_FLG2_HW_TSO_2 only.
4822 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004823static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4824{
4825 struct tg3 *tp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004826 u32 len, entry, base_flags, mss;
David S. Miller90079ce2008-09-11 04:52:51 -07004827 struct skb_shared_info *sp;
4828 dma_addr_t mapping;
Michael Chan5a6f3072006-03-20 22:28:05 -08004829
4830 len = skb_headlen(skb);
4831
Michael Chan00b70502006-06-17 21:58:45 -07004832 /* We are running in BH disabled context with netif_tx_lock
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004833 * and TX reclaim runs via tp->napi.poll inside of a software
Michael Chan5a6f3072006-03-20 22:28:05 -08004834 * interrupt. Furthermore, IRQ processing runs lockless so we have
4835 * no IRQ context deadlocks to worry about either. Rejoice!
4836 */
Michael Chan1b2a7202006-08-07 21:46:02 -07004837 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004838 if (!netif_queue_stopped(dev)) {
4839 netif_stop_queue(dev);
4840
4841 /* This is a hard error, log it. */
4842 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4843 "queue awake!\n", dev->name);
4844 }
Michael Chan5a6f3072006-03-20 22:28:05 -08004845 return NETDEV_TX_BUSY;
4846 }
4847
4848 entry = tp->tx_prod;
4849 base_flags = 0;
Michael Chan5a6f3072006-03-20 22:28:05 -08004850 mss = 0;
Matt Carlsonc13e3712007-05-05 11:50:04 -07004851 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004852 int tcp_opt_len, ip_tcp_len;
4853
4854 if (skb_header_cloned(skb) &&
4855 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4856 dev_kfree_skb(skb);
4857 goto out_unlock;
4858 }
4859
Michael Chanb0026622006-07-03 19:42:14 -07004860 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4861 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4862 else {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004863 struct iphdr *iph = ip_hdr(skb);
4864
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004865 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -03004866 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
Michael Chanb0026622006-07-03 19:42:14 -07004867
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004868 iph->check = 0;
4869 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
Michael Chanb0026622006-07-03 19:42:14 -07004870 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4871 }
Michael Chan5a6f3072006-03-20 22:28:05 -08004872
4873 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4874 TXD_FLAG_CPU_POST_DMA);
4875
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004876 tcp_hdr(skb)->check = 0;
Michael Chan5a6f3072006-03-20 22:28:05 -08004877
Michael Chan5a6f3072006-03-20 22:28:05 -08004878 }
Patrick McHardy84fa7932006-08-29 16:44:56 -07004879 else if (skb->ip_summed == CHECKSUM_PARTIAL)
Michael Chan5a6f3072006-03-20 22:28:05 -08004880 base_flags |= TXD_FLAG_TCPUDP_CSUM;
Michael Chan5a6f3072006-03-20 22:28:05 -08004881#if TG3_VLAN_TAG_USED
4882 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4883 base_flags |= (TXD_FLAG_VLAN |
4884 (vlan_tx_tag_get(skb) << 16));
4885#endif
4886
David S. Miller90079ce2008-09-11 04:52:51 -07004887 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4888 dev_kfree_skb(skb);
4889 goto out_unlock;
4890 }
4891
4892 sp = skb_shinfo(skb);
4893
4894 mapping = sp->dma_maps[0];
Michael Chan5a6f3072006-03-20 22:28:05 -08004895
4896 tp->tx_buffers[entry].skb = skb;
Michael Chan5a6f3072006-03-20 22:28:05 -08004897
4898 tg3_set_txd(tp, entry, mapping, len, base_flags,
4899 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4900
4901 entry = NEXT_TX(entry);
4902
4903 /* Now loop through additional data fragments, and queue them. */
4904 if (skb_shinfo(skb)->nr_frags > 0) {
4905 unsigned int i, last;
4906
4907 last = skb_shinfo(skb)->nr_frags - 1;
4908 for (i = 0; i <= last; i++) {
4909 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4910
4911 len = frag->size;
David S. Miller90079ce2008-09-11 04:52:51 -07004912 mapping = sp->dma_maps[i + 1];
Michael Chan5a6f3072006-03-20 22:28:05 -08004913 tp->tx_buffers[entry].skb = NULL;
Michael Chan5a6f3072006-03-20 22:28:05 -08004914
4915 tg3_set_txd(tp, entry, mapping, len,
4916 base_flags, (i == last) | (mss << 1));
4917
4918 entry = NEXT_TX(entry);
4919 }
4920 }
4921
4922 /* Packets are ready, update Tx producer idx local and on card. */
4923 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4924
4925 tp->tx_prod = entry;
Michael Chan1b2a7202006-08-07 21:46:02 -07004926 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004927 netif_stop_queue(dev);
Ranjit Manomohan42952232006-10-18 20:54:26 -07004928 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
Michael Chan5a6f3072006-03-20 22:28:05 -08004929 netif_wake_queue(tp->dev);
4930 }
4931
4932out_unlock:
4933 mmiowb();
Michael Chan5a6f3072006-03-20 22:28:05 -08004934
4935 dev->trans_start = jiffies;
4936
4937 return NETDEV_TX_OK;
4938}
4939
Michael Chan52c0fd82006-06-29 20:15:54 -07004940static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4941
4942/* Use GSO to workaround a rare TSO bug that may be triggered when the
4943 * TSO header is greater than 80 bytes.
4944 */
4945static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4946{
4947 struct sk_buff *segs, *nskb;
4948
4949 /* Estimate the number of fragments in the worst case */
Michael Chan1b2a7202006-08-07 21:46:02 -07004950 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
Michael Chan52c0fd82006-06-29 20:15:54 -07004951 netif_stop_queue(tp->dev);
Michael Chan7f62ad52007-02-20 23:25:40 -08004952 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4953 return NETDEV_TX_BUSY;
4954
4955 netif_wake_queue(tp->dev);
Michael Chan52c0fd82006-06-29 20:15:54 -07004956 }
4957
4958 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07004959 if (IS_ERR(segs))
Michael Chan52c0fd82006-06-29 20:15:54 -07004960 goto tg3_tso_bug_end;
4961
4962 do {
4963 nskb = segs;
4964 segs = segs->next;
4965 nskb->next = NULL;
4966 tg3_start_xmit_dma_bug(nskb, tp->dev);
4967 } while (segs);
4968
4969tg3_tso_bug_end:
4970 dev_kfree_skb(skb);
4971
4972 return NETDEV_TX_OK;
4973}
Michael Chan52c0fd82006-06-29 20:15:54 -07004974
Michael Chan5a6f3072006-03-20 22:28:05 -08004975/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4976 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4977 */
4978static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4979{
4980 struct tg3 *tp = netdev_priv(dev);
Michael Chan5a6f3072006-03-20 22:28:05 -08004981 u32 len, entry, base_flags, mss;
David S. Miller90079ce2008-09-11 04:52:51 -07004982 struct skb_shared_info *sp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004983 int would_hit_hwbug;
David S. Miller90079ce2008-09-11 04:52:51 -07004984 dma_addr_t mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004985
4986 len = skb_headlen(skb);
4987
Michael Chan00b70502006-06-17 21:58:45 -07004988 /* We are running in BH disabled context with netif_tx_lock
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004989 * and TX reclaim runs via tp->napi.poll inside of a software
David S. Millerf47c11e2005-06-24 20:18:35 -07004990 * interrupt. Furthermore, IRQ processing runs lockless so we have
4991 * no IRQ context deadlocks to worry about either. Rejoice!
Linus Torvalds1da177e2005-04-16 15:20:36 -07004992 */
Michael Chan1b2a7202006-08-07 21:46:02 -07004993 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
Stephen Hemminger1f064a82005-12-06 17:36:44 -08004994 if (!netif_queue_stopped(dev)) {
4995 netif_stop_queue(dev);
4996
4997 /* This is a hard error, log it. */
4998 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4999 "queue awake!\n", dev->name);
5000 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005001 return NETDEV_TX_BUSY;
5002 }
5003
5004 entry = tp->tx_prod;
5005 base_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07005006 if (skb->ip_summed == CHECKSUM_PARTIAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005007 base_flags |= TXD_FLAG_TCPUDP_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005008 mss = 0;
Matt Carlsonc13e3712007-05-05 11:50:04 -07005009 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005010 struct iphdr *iph;
Michael Chan52c0fd82006-06-29 20:15:54 -07005011 int tcp_opt_len, ip_tcp_len, hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005012
5013 if (skb_header_cloned(skb) &&
5014 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5015 dev_kfree_skb(skb);
5016 goto out_unlock;
5017 }
5018
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07005019 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -03005020 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005021
Michael Chan52c0fd82006-06-29 20:15:54 -07005022 hdr_len = ip_tcp_len + tcp_opt_len;
5023 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
Michael Chan7f62ad52007-02-20 23:25:40 -08005024 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
Michael Chan52c0fd82006-06-29 20:15:54 -07005025 return (tg3_tso_bug(tp, skb));
5026
Linus Torvalds1da177e2005-04-16 15:20:36 -07005027 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5028 TXD_FLAG_CPU_POST_DMA);
5029
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005030 iph = ip_hdr(skb);
5031 iph->check = 0;
5032 iph->tot_len = htons(mss + hdr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005033 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07005034 tcp_hdr(skb)->check = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005035 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07005036 } else
5037 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5038 iph->daddr, 0,
5039 IPPROTO_TCP,
5040 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005041
5042 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
5043 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005044 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005045 int tsflags;
5046
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005047 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005048 mss |= (tsflags << 11);
5049 }
5050 } else {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005051 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005052 int tsflags;
5053
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005054 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005055 base_flags |= tsflags << 12;
5056 }
5057 }
5058 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005059#if TG3_VLAN_TAG_USED
5060 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5061 base_flags |= (TXD_FLAG_VLAN |
5062 (vlan_tx_tag_get(skb) << 16));
5063#endif
5064
David S. Miller90079ce2008-09-11 04:52:51 -07005065 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5066 dev_kfree_skb(skb);
5067 goto out_unlock;
5068 }
5069
5070 sp = skb_shinfo(skb);
5071
5072 mapping = sp->dma_maps[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -07005073
5074 tp->tx_buffers[entry].skb = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005075
5076 would_hit_hwbug = 0;
5077
Matt Carlson41588ba2008-04-19 18:12:33 -07005078 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5079 would_hit_hwbug = 1;
5080 else if (tg3_4g_overflow_test(mapping, len))
Michael Chanc58ec932005-09-17 00:46:27 -07005081 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005082
5083 tg3_set_txd(tp, entry, mapping, len, base_flags,
5084 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5085
5086 entry = NEXT_TX(entry);
5087
5088 /* Now loop through additional data fragments, and queue them. */
5089 if (skb_shinfo(skb)->nr_frags > 0) {
5090 unsigned int i, last;
5091
5092 last = skb_shinfo(skb)->nr_frags - 1;
5093 for (i = 0; i <= last; i++) {
5094 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5095
5096 len = frag->size;
David S. Miller90079ce2008-09-11 04:52:51 -07005097 mapping = sp->dma_maps[i + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07005098
5099 tp->tx_buffers[entry].skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005100
Michael Chanc58ec932005-09-17 00:46:27 -07005101 if (tg3_4g_overflow_test(mapping, len))
5102 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005103
Michael Chan72f2afb2006-03-06 19:28:35 -08005104 if (tg3_40bit_overflow_test(tp, mapping, len))
5105 would_hit_hwbug = 1;
5106
Linus Torvalds1da177e2005-04-16 15:20:36 -07005107 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5108 tg3_set_txd(tp, entry, mapping, len,
5109 base_flags, (i == last)|(mss << 1));
5110 else
5111 tg3_set_txd(tp, entry, mapping, len,
5112 base_flags, (i == last));
5113
5114 entry = NEXT_TX(entry);
5115 }
5116 }
5117
5118 if (would_hit_hwbug) {
5119 u32 last_plus_one = entry;
5120 u32 start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005121
Michael Chanc58ec932005-09-17 00:46:27 -07005122 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5123 start &= (TG3_TX_RING_SIZE - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005124
5125 /* If the workaround fails due to memory/mapping
5126 * failure, silently drop this packet.
5127 */
Michael Chan72f2afb2006-03-06 19:28:35 -08005128 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
Michael Chanc58ec932005-09-17 00:46:27 -07005129 &start, base_flags, mss))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005130 goto out_unlock;
5131
5132 entry = start;
5133 }
5134
5135 /* Packets are ready, update Tx producer idx local and on card. */
5136 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5137
5138 tp->tx_prod = entry;
Michael Chan1b2a7202006-08-07 21:46:02 -07005139 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005140 netif_stop_queue(dev);
Ranjit Manomohan42952232006-10-18 20:54:26 -07005141 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
Michael Chan51b91462005-09-01 17:41:28 -07005142 netif_wake_queue(tp->dev);
5143 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005144
5145out_unlock:
5146 mmiowb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005147
5148 dev->trans_start = jiffies;
5149
5150 return NETDEV_TX_OK;
5151}
5152
5153static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5154 int new_mtu)
5155{
5156 dev->mtu = new_mtu;
5157
Michael Chanef7f5ec2005-07-25 12:32:25 -07005158 if (new_mtu > ETH_DATA_LEN) {
Michael Chana4e2b342005-10-26 15:46:52 -07005159 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chanef7f5ec2005-07-25 12:32:25 -07005160 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5161 ethtool_op_set_tso(dev, 0);
5162 }
5163 else
5164 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5165 } else {
Michael Chana4e2b342005-10-26 15:46:52 -07005166 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chanef7f5ec2005-07-25 12:32:25 -07005167 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
Michael Chan0f893dc2005-07-25 12:30:38 -07005168 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
Michael Chanef7f5ec2005-07-25 12:32:25 -07005169 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005170}
5171
5172static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5173{
5174 struct tg3 *tp = netdev_priv(dev);
Michael Chanb9ec6c12006-07-25 16:37:27 -07005175 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005176
5177 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5178 return -EINVAL;
5179
5180 if (!netif_running(dev)) {
5181 /* We'll just catch it later when the
5182 * device is up'd.
5183 */
5184 tg3_set_mtu(dev, tp, new_mtu);
5185 return 0;
5186 }
5187
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07005188 tg3_phy_stop(tp);
5189
Linus Torvalds1da177e2005-04-16 15:20:36 -07005190 tg3_netif_stop(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07005191
5192 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005193
Michael Chan944d9802005-05-29 14:57:48 -07005194 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005195
5196 tg3_set_mtu(dev, tp, new_mtu);
5197
Michael Chanb9ec6c12006-07-25 16:37:27 -07005198 err = tg3_restart_hw(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005199
Michael Chanb9ec6c12006-07-25 16:37:27 -07005200 if (!err)
5201 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005202
David S. Millerf47c11e2005-06-24 20:18:35 -07005203 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005204
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07005205 if (!err)
5206 tg3_phy_start(tp);
5207
Michael Chanb9ec6c12006-07-25 16:37:27 -07005208 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005209}
5210
5211/* Free up pending packets in all rx/tx rings.
5212 *
5213 * The chip has been shut down and the driver detached from
5214 * the networking, so no interrupts or new tx packets will
5215 * end up in the driver. tp->{tx,}lock is not held and we are not
5216 * in an interrupt context and thus may sleep.
5217 */
5218static void tg3_free_rings(struct tg3 *tp)
5219{
5220 struct ring_info *rxp;
5221 int i;
5222
5223 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5224 rxp = &tp->rx_std_buffers[i];
5225
5226 if (rxp->skb == NULL)
5227 continue;
5228 pci_unmap_single(tp->pdev,
5229 pci_unmap_addr(rxp, mapping),
Michael Chan7e72aad2005-07-25 12:31:17 -07005230 tp->rx_pkt_buf_sz - tp->rx_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005231 PCI_DMA_FROMDEVICE);
5232 dev_kfree_skb_any(rxp->skb);
5233 rxp->skb = NULL;
5234 }
5235
5236 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5237 rxp = &tp->rx_jumbo_buffers[i];
5238
5239 if (rxp->skb == NULL)
5240 continue;
5241 pci_unmap_single(tp->pdev,
5242 pci_unmap_addr(rxp, mapping),
5243 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5244 PCI_DMA_FROMDEVICE);
5245 dev_kfree_skb_any(rxp->skb);
5246 rxp->skb = NULL;
5247 }
5248
5249 for (i = 0; i < TG3_TX_RING_SIZE; ) {
5250 struct tx_ring_info *txp;
5251 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005252
5253 txp = &tp->tx_buffers[i];
5254 skb = txp->skb;
5255
5256 if (skb == NULL) {
5257 i++;
5258 continue;
5259 }
5260
David S. Miller90079ce2008-09-11 04:52:51 -07005261 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5262
Linus Torvalds1da177e2005-04-16 15:20:36 -07005263 txp->skb = NULL;
5264
David S. Miller90079ce2008-09-11 04:52:51 -07005265 i += skb_shinfo(skb)->nr_frags + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005266
5267 dev_kfree_skb_any(skb);
5268 }
5269}
5270
5271/* Initialize tx/rx rings for packet processing.
5272 *
5273 * The chip has been shut down and the driver detached from
5274 * the networking, so no interrupts or new tx packets will
5275 * end up in the driver. tp->{tx,}lock are held and thus
5276 * we may not sleep.
5277 */
Michael Chan32d8c572006-07-25 16:38:29 -07005278static int tg3_init_rings(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005279{
5280 u32 i;
5281
5282 /* Free up all the SKBs. */
5283 tg3_free_rings(tp);
5284
5285 /* Zero out all descriptors. */
5286 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5287 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5288 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5289 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5290
Michael Chan7e72aad2005-07-25 12:31:17 -07005291 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
Michael Chana4e2b342005-10-26 15:46:52 -07005292 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
Michael Chan7e72aad2005-07-25 12:31:17 -07005293 (tp->dev->mtu > ETH_DATA_LEN))
5294 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5295
Linus Torvalds1da177e2005-04-16 15:20:36 -07005296 /* Initialize invariants of the rings, we only set this
5297 * stuff once. This works because the card does not
5298 * write into the rx buffer posting rings.
5299 */
5300 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5301 struct tg3_rx_buffer_desc *rxd;
5302
5303 rxd = &tp->rx_std[i];
Michael Chan7e72aad2005-07-25 12:31:17 -07005304 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005305 << RXD_LEN_SHIFT;
5306 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5307 rxd->opaque = (RXD_OPAQUE_RING_STD |
5308 (i << RXD_OPAQUE_INDEX_SHIFT));
5309 }
5310
Michael Chan0f893dc2005-07-25 12:30:38 -07005311 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005312 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5313 struct tg3_rx_buffer_desc *rxd;
5314
5315 rxd = &tp->rx_jumbo[i];
5316 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5317 << RXD_LEN_SHIFT;
5318 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5319 RXD_FLAG_JUMBO;
5320 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5321 (i << RXD_OPAQUE_INDEX_SHIFT));
5322 }
5323 }
5324
5325 /* Now allocate fresh SKBs for each rx ring. */
5326 for (i = 0; i < tp->rx_pending; i++) {
Michael Chan32d8c572006-07-25 16:38:29 -07005327 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5328 printk(KERN_WARNING PFX
5329 "%s: Using a smaller RX standard ring, "
5330 "only %d out of %d buffers were allocated "
5331 "successfully.\n",
5332 tp->dev->name, i, tp->rx_pending);
5333 if (i == 0)
5334 return -ENOMEM;
5335 tp->rx_pending = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005336 break;
Michael Chan32d8c572006-07-25 16:38:29 -07005337 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005338 }
5339
Michael Chan0f893dc2005-07-25 12:30:38 -07005340 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005341 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5342 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
Michael Chan32d8c572006-07-25 16:38:29 -07005343 -1, i) < 0) {
5344 printk(KERN_WARNING PFX
5345 "%s: Using a smaller RX jumbo ring, "
5346 "only %d out of %d buffers were "
5347 "allocated successfully.\n",
5348 tp->dev->name, i, tp->rx_jumbo_pending);
5349 if (i == 0) {
5350 tg3_free_rings(tp);
5351 return -ENOMEM;
5352 }
5353 tp->rx_jumbo_pending = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005354 break;
Michael Chan32d8c572006-07-25 16:38:29 -07005355 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005356 }
5357 }
Michael Chan32d8c572006-07-25 16:38:29 -07005358 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005359}
5360
5361/*
5362 * Must not be invoked with interrupt sources disabled and
5363 * the hardware shutdown down.
5364 */
5365static void tg3_free_consistent(struct tg3 *tp)
5366{
Jesper Juhlb4558ea2005-10-28 16:53:13 -04005367 kfree(tp->rx_std_buffers);
5368 tp->rx_std_buffers = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005369 if (tp->rx_std) {
5370 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5371 tp->rx_std, tp->rx_std_mapping);
5372 tp->rx_std = NULL;
5373 }
5374 if (tp->rx_jumbo) {
5375 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5376 tp->rx_jumbo, tp->rx_jumbo_mapping);
5377 tp->rx_jumbo = NULL;
5378 }
5379 if (tp->rx_rcb) {
5380 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5381 tp->rx_rcb, tp->rx_rcb_mapping);
5382 tp->rx_rcb = NULL;
5383 }
5384 if (tp->tx_ring) {
5385 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5386 tp->tx_ring, tp->tx_desc_mapping);
5387 tp->tx_ring = NULL;
5388 }
5389 if (tp->hw_status) {
5390 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5391 tp->hw_status, tp->status_mapping);
5392 tp->hw_status = NULL;
5393 }
5394 if (tp->hw_stats) {
5395 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5396 tp->hw_stats, tp->stats_mapping);
5397 tp->hw_stats = NULL;
5398 }
5399}
5400
5401/*
5402 * Must not be invoked with interrupt sources disabled and
5403 * the hardware shutdown down. Can sleep.
5404 */
5405static int tg3_alloc_consistent(struct tg3 *tp)
5406{
Yan Burmanbd2b3342006-12-14 15:25:00 -08005407 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005408 (TG3_RX_RING_SIZE +
5409 TG3_RX_JUMBO_RING_SIZE)) +
5410 (sizeof(struct tx_ring_info) *
5411 TG3_TX_RING_SIZE),
5412 GFP_KERNEL);
5413 if (!tp->rx_std_buffers)
5414 return -ENOMEM;
5415
Linus Torvalds1da177e2005-04-16 15:20:36 -07005416 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5417 tp->tx_buffers = (struct tx_ring_info *)
5418 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5419
5420 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5421 &tp->rx_std_mapping);
5422 if (!tp->rx_std)
5423 goto err_out;
5424
5425 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5426 &tp->rx_jumbo_mapping);
5427
5428 if (!tp->rx_jumbo)
5429 goto err_out;
5430
5431 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5432 &tp->rx_rcb_mapping);
5433 if (!tp->rx_rcb)
5434 goto err_out;
5435
5436 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5437 &tp->tx_desc_mapping);
5438 if (!tp->tx_ring)
5439 goto err_out;
5440
5441 tp->hw_status = pci_alloc_consistent(tp->pdev,
5442 TG3_HW_STATUS_SIZE,
5443 &tp->status_mapping);
5444 if (!tp->hw_status)
5445 goto err_out;
5446
5447 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5448 sizeof(struct tg3_hw_stats),
5449 &tp->stats_mapping);
5450 if (!tp->hw_stats)
5451 goto err_out;
5452
5453 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5454 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5455
5456 return 0;
5457
5458err_out:
5459 tg3_free_consistent(tp);
5460 return -ENOMEM;
5461}
5462
5463#define MAX_WAIT_CNT 1000
5464
5465/* To stop a block, clear the enable bit and poll till it
5466 * clears. tp->lock is held.
5467 */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005468static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005469{
5470 unsigned int i;
5471 u32 val;
5472
5473 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5474 switch (ofs) {
5475 case RCVLSC_MODE:
5476 case DMAC_MODE:
5477 case MBFREE_MODE:
5478 case BUFMGR_MODE:
5479 case MEMARB_MODE:
5480 /* We can't enable/disable these bits of the
5481 * 5705/5750, just say success.
5482 */
5483 return 0;
5484
5485 default:
5486 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005487 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005488 }
5489
5490 val = tr32(ofs);
5491 val &= ~enable_bit;
5492 tw32_f(ofs, val);
5493
5494 for (i = 0; i < MAX_WAIT_CNT; i++) {
5495 udelay(100);
5496 val = tr32(ofs);
5497 if ((val & enable_bit) == 0)
5498 break;
5499 }
5500
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005501 if (i == MAX_WAIT_CNT && !silent) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005502 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5503 "ofs=%lx enable_bit=%x\n",
5504 ofs, enable_bit);
5505 return -ENODEV;
5506 }
5507
5508 return 0;
5509}
5510
5511/* tp->lock is held. */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005512static int tg3_abort_hw(struct tg3 *tp, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005513{
5514 int i, err;
5515
5516 tg3_disable_ints(tp);
5517
5518 tp->rx_mode &= ~RX_MODE_ENABLE;
5519 tw32_f(MAC_RX_MODE, tp->rx_mode);
5520 udelay(10);
5521
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005522 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5523 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5524 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5525 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5526 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5527 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005528
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005529 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5530 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5531 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5532 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5533 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5534 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5535 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005536
5537 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5538 tw32_f(MAC_MODE, tp->mac_mode);
5539 udelay(40);
5540
5541 tp->tx_mode &= ~TX_MODE_ENABLE;
5542 tw32_f(MAC_TX_MODE, tp->tx_mode);
5543
5544 for (i = 0; i < MAX_WAIT_CNT; i++) {
5545 udelay(100);
5546 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5547 break;
5548 }
5549 if (i >= MAX_WAIT_CNT) {
5550 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5551 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5552 tp->dev->name, tr32(MAC_TX_MODE));
Michael Chane6de8ad2005-05-05 14:42:41 -07005553 err |= -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005554 }
5555
Michael Chane6de8ad2005-05-05 14:42:41 -07005556 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005557 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5558 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005559
5560 tw32(FTQ_RESET, 0xffffffff);
5561 tw32(FTQ_RESET, 0x00000000);
5562
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005563 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5564 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005565
5566 if (tp->hw_status)
5567 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5568 if (tp->hw_stats)
5569 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5570
Linus Torvalds1da177e2005-04-16 15:20:36 -07005571 return err;
5572}
5573
5574/* tp->lock is held. */
5575static int tg3_nvram_lock(struct tg3 *tp)
5576{
5577 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5578 int i;
5579
Michael Chanec41c7d2006-01-17 02:40:55 -08005580 if (tp->nvram_lock_cnt == 0) {
5581 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5582 for (i = 0; i < 8000; i++) {
5583 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5584 break;
5585 udelay(20);
5586 }
5587 if (i == 8000) {
5588 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5589 return -ENODEV;
5590 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005591 }
Michael Chanec41c7d2006-01-17 02:40:55 -08005592 tp->nvram_lock_cnt++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005593 }
5594 return 0;
5595}
5596
5597/* tp->lock is held. */
5598static void tg3_nvram_unlock(struct tg3 *tp)
5599{
Michael Chanec41c7d2006-01-17 02:40:55 -08005600 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5601 if (tp->nvram_lock_cnt > 0)
5602 tp->nvram_lock_cnt--;
5603 if (tp->nvram_lock_cnt == 0)
5604 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5605 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005606}
5607
5608/* tp->lock is held. */
Michael Chane6af3012005-04-21 17:12:05 -07005609static void tg3_enable_nvram_access(struct tg3 *tp)
5610{
5611 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5612 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5613 u32 nvaccess = tr32(NVRAM_ACCESS);
5614
5615 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5616 }
5617}
5618
5619/* tp->lock is held. */
5620static void tg3_disable_nvram_access(struct tg3 *tp)
5621{
5622 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5623 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5624 u32 nvaccess = tr32(NVRAM_ACCESS);
5625
5626 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5627 }
5628}
5629
Matt Carlson0d3031d2007-10-10 18:02:43 -07005630static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5631{
5632 int i;
5633 u32 apedata;
5634
5635 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5636 if (apedata != APE_SEG_SIG_MAGIC)
5637 return;
5638
5639 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
Matt Carlson731fd792008-08-15 14:07:51 -07005640 if (!(apedata & APE_FW_STATUS_READY))
Matt Carlson0d3031d2007-10-10 18:02:43 -07005641 return;
5642
5643 /* Wait for up to 1 millisecond for APE to service previous event. */
5644 for (i = 0; i < 10; i++) {
5645 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5646 return;
5647
5648 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5649
5650 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5651 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5652 event | APE_EVENT_STATUS_EVENT_PENDING);
5653
5654 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5655
5656 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5657 break;
5658
5659 udelay(100);
5660 }
5661
5662 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5663 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5664}
5665
5666static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5667{
5668 u32 event;
5669 u32 apedata;
5670
5671 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5672 return;
5673
5674 switch (kind) {
5675 case RESET_KIND_INIT:
5676 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5677 APE_HOST_SEG_SIG_MAGIC);
5678 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5679 APE_HOST_SEG_LEN_MAGIC);
5680 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5681 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5682 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5683 APE_HOST_DRIVER_ID_MAGIC);
5684 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5685 APE_HOST_BEHAV_NO_PHYLOCK);
5686
5687 event = APE_EVENT_STATUS_STATE_START;
5688 break;
5689 case RESET_KIND_SHUTDOWN:
Matt Carlsonb2aee152008-11-03 16:51:11 -08005690 /* With the interface we are currently using,
5691 * APE does not track driver state. Wiping
5692 * out the HOST SEGMENT SIGNATURE forces
5693 * the APE to assume OS absent status.
5694 */
5695 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
5696
Matt Carlson0d3031d2007-10-10 18:02:43 -07005697 event = APE_EVENT_STATUS_STATE_UNLOAD;
5698 break;
5699 case RESET_KIND_SUSPEND:
5700 event = APE_EVENT_STATUS_STATE_SUSPEND;
5701 break;
5702 default:
5703 return;
5704 }
5705
5706 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5707
5708 tg3_ape_send_event(tp, event);
5709}
5710
Michael Chane6af3012005-04-21 17:12:05 -07005711/* tp->lock is held. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005712static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5713{
David S. Millerf49639e2006-06-09 11:58:36 -07005714 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5715 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005716
5717 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5718 switch (kind) {
5719 case RESET_KIND_INIT:
5720 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5721 DRV_STATE_START);
5722 break;
5723
5724 case RESET_KIND_SHUTDOWN:
5725 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5726 DRV_STATE_UNLOAD);
5727 break;
5728
5729 case RESET_KIND_SUSPEND:
5730 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5731 DRV_STATE_SUSPEND);
5732 break;
5733
5734 default:
5735 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005736 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005737 }
Matt Carlson0d3031d2007-10-10 18:02:43 -07005738
5739 if (kind == RESET_KIND_INIT ||
5740 kind == RESET_KIND_SUSPEND)
5741 tg3_ape_driver_state_change(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005742}
5743
5744/* tp->lock is held. */
5745static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5746{
5747 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5748 switch (kind) {
5749 case RESET_KIND_INIT:
5750 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5751 DRV_STATE_START_DONE);
5752 break;
5753
5754 case RESET_KIND_SHUTDOWN:
5755 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5756 DRV_STATE_UNLOAD_DONE);
5757 break;
5758
5759 default:
5760 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005761 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005762 }
Matt Carlson0d3031d2007-10-10 18:02:43 -07005763
5764 if (kind == RESET_KIND_SHUTDOWN)
5765 tg3_ape_driver_state_change(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005766}
5767
5768/* tp->lock is held. */
5769static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5770{
5771 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5772 switch (kind) {
5773 case RESET_KIND_INIT:
5774 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5775 DRV_STATE_START);
5776 break;
5777
5778 case RESET_KIND_SHUTDOWN:
5779 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5780 DRV_STATE_UNLOAD);
5781 break;
5782
5783 case RESET_KIND_SUSPEND:
5784 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5785 DRV_STATE_SUSPEND);
5786 break;
5787
5788 default:
5789 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005790 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005791 }
5792}
5793
Michael Chan7a6f4362006-09-27 16:03:31 -07005794static int tg3_poll_fw(struct tg3 *tp)
5795{
5796 int i;
5797 u32 val;
5798
Michael Chanb5d37722006-09-27 16:06:21 -07005799 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Gary Zambrano0ccead12006-11-14 16:34:00 -08005800 /* Wait up to 20ms for init done. */
5801 for (i = 0; i < 200; i++) {
Michael Chanb5d37722006-09-27 16:06:21 -07005802 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5803 return 0;
Gary Zambrano0ccead12006-11-14 16:34:00 -08005804 udelay(100);
Michael Chanb5d37722006-09-27 16:06:21 -07005805 }
5806 return -ENODEV;
5807 }
5808
Michael Chan7a6f4362006-09-27 16:03:31 -07005809 /* Wait for firmware initialization to complete. */
5810 for (i = 0; i < 100000; i++) {
5811 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5812 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5813 break;
5814 udelay(10);
5815 }
5816
5817 /* Chip might not be fitted with firmware. Some Sun onboard
5818 * parts are configured like that. So don't signal the timeout
5819 * of the above loop as an error, but do report the lack of
5820 * running firmware once.
5821 */
5822 if (i >= 100000 &&
5823 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5824 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5825
5826 printk(KERN_INFO PFX "%s: No firmware running.\n",
5827 tp->dev->name);
5828 }
5829
5830 return 0;
5831}
5832
Michael Chanee6a99b2007-07-18 21:49:10 -07005833/* Save PCI command register before chip reset */
5834static void tg3_save_pci_state(struct tg3 *tp)
5835{
Matt Carlson8a6eac92007-10-21 16:17:55 -07005836 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07005837}
5838
5839/* Restore PCI state after chip reset */
5840static void tg3_restore_pci_state(struct tg3 *tp)
5841{
5842 u32 val;
5843
5844 /* Re-enable indirect register accesses. */
5845 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5846 tp->misc_host_ctrl);
5847
5848 /* Set MAX PCI retry to zero. */
5849 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5850 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5851 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5852 val |= PCISTATE_RETRY_SAME_DMA;
Matt Carlson0d3031d2007-10-10 18:02:43 -07005853 /* Allow reads and writes to the APE register and memory space. */
5854 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5855 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5856 PCISTATE_ALLOW_APE_SHMEM_WR;
Michael Chanee6a99b2007-07-18 21:49:10 -07005857 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5858
Matt Carlson8a6eac92007-10-21 16:17:55 -07005859 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07005860
Matt Carlsonfcb389d2008-11-03 16:55:44 -08005861 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
5862 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5863 pcie_set_readrq(tp->pdev, 4096);
5864 else {
5865 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5866 tp->pci_cacheline_sz);
5867 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5868 tp->pci_lat_timer);
5869 }
Michael Chan114342f2007-10-15 02:12:26 -07005870 }
Matt Carlson5f5c51e2007-11-12 21:19:37 -08005871
Michael Chanee6a99b2007-07-18 21:49:10 -07005872 /* Make sure PCI-X relaxed ordering bit is clear. */
Matt Carlson9974a352007-10-07 23:27:28 -07005873 if (tp->pcix_cap) {
5874 u16 pcix_cmd;
5875
5876 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5877 &pcix_cmd);
5878 pcix_cmd &= ~PCI_X_CMD_ERO;
5879 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5880 pcix_cmd);
5881 }
Michael Chanee6a99b2007-07-18 21:49:10 -07005882
5883 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chanee6a99b2007-07-18 21:49:10 -07005884
5885 /* Chip reset on 5780 will reset MSI enable bit,
5886 * so need to restore it.
5887 */
5888 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5889 u16 ctrl;
5890
5891 pci_read_config_word(tp->pdev,
5892 tp->msi_cap + PCI_MSI_FLAGS,
5893 &ctrl);
5894 pci_write_config_word(tp->pdev,
5895 tp->msi_cap + PCI_MSI_FLAGS,
5896 ctrl | PCI_MSI_FLAGS_ENABLE);
5897 val = tr32(MSGINT_MODE);
5898 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5899 }
5900 }
5901}
5902
Linus Torvalds1da177e2005-04-16 15:20:36 -07005903static void tg3_stop_fw(struct tg3 *);
5904
5905/* tp->lock is held. */
5906static int tg3_chip_reset(struct tg3 *tp)
5907{
5908 u32 val;
Michael Chan1ee582d2005-08-09 20:16:46 -07005909 void (*write_op)(struct tg3 *, u32, u32);
Michael Chan7a6f4362006-09-27 16:03:31 -07005910 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005911
David S. Millerf49639e2006-06-09 11:58:36 -07005912 tg3_nvram_lock(tp);
5913
Matt Carlson158d7ab2008-05-29 01:37:54 -07005914 tg3_mdio_stop(tp);
5915
Matt Carlson77b483f2008-08-15 14:07:24 -07005916 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5917
David S. Millerf49639e2006-06-09 11:58:36 -07005918 /* No matching tg3_nvram_unlock() after this because
5919 * chip reset below will undo the nvram lock.
5920 */
5921 tp->nvram_lock_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005922
Michael Chanee6a99b2007-07-18 21:49:10 -07005923 /* GRC_MISC_CFG core clock reset will clear the memory
5924 * enable bit in PCI register 4 and the MSI enable bit
5925 * on some chips, so we save relevant registers here.
5926 */
5927 tg3_save_pci_state(tp);
5928
Michael Chand9ab5ad2006-03-20 22:27:35 -08005929 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -08005930 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07005931 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07005932 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -07005933 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5934 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chand9ab5ad2006-03-20 22:27:35 -08005935 tw32(GRC_FASTBOOT_PC, 0);
5936
Linus Torvalds1da177e2005-04-16 15:20:36 -07005937 /*
5938 * We must avoid the readl() that normally takes place.
5939 * It locks machines, causes machine checks, and other
5940 * fun things. So, temporarily disable the 5701
5941 * hardware workaround, while we do the reset.
5942 */
Michael Chan1ee582d2005-08-09 20:16:46 -07005943 write_op = tp->write32;
5944 if (write_op == tg3_write_flush_reg32)
5945 tp->write32 = tg3_write32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005946
Michael Chand18edcb2007-03-24 20:57:11 -07005947 /* Prevent the irq handler from reading or writing PCI registers
5948 * during chip reset when the memory enable bit in the PCI command
5949 * register may be cleared. The chip does not generate interrupt
5950 * at this time, but the irq handler may still be called due to irq
5951 * sharing or irqpoll.
5952 */
5953 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
Michael Chanb8fa2f32007-04-06 17:35:37 -07005954 if (tp->hw_status) {
5955 tp->hw_status->status = 0;
5956 tp->hw_status->status_tag = 0;
5957 }
Michael Chand18edcb2007-03-24 20:57:11 -07005958 tp->last_tag = 0;
5959 smp_mb();
5960 synchronize_irq(tp->pdev->irq);
5961
Linus Torvalds1da177e2005-04-16 15:20:36 -07005962 /* do the reset */
5963 val = GRC_MISC_CFG_CORECLK_RESET;
5964
5965 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5966 if (tr32(0x7e2c) == 0x60) {
5967 tw32(0x7e2c, 0x20);
5968 }
5969 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5970 tw32(GRC_MISC_CFG, (1 << 29));
5971 val |= (1 << 29);
5972 }
5973 }
5974
Michael Chanb5d37722006-09-27 16:06:21 -07005975 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5976 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5977 tw32(GRC_VCPU_EXT_CTRL,
5978 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5979 }
5980
Linus Torvalds1da177e2005-04-16 15:20:36 -07005981 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5982 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5983 tw32(GRC_MISC_CFG, val);
5984
Michael Chan1ee582d2005-08-09 20:16:46 -07005985 /* restore 5701 hardware bug workaround write method */
5986 tp->write32 = write_op;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005987
5988 /* Unfortunately, we have to delay before the PCI read back.
5989 * Some 575X chips even will not respond to a PCI cfg access
5990 * when the reset command is given to the chip.
5991 *
5992 * How do these hardware designers expect things to work
5993 * properly if the PCI write is posted for a long period
5994 * of time? It is always necessary to have some method by
5995 * which a register read back can occur to push the write
5996 * out which does the reset.
5997 *
5998 * For most tg3 variants the trick below was working.
5999 * Ho hum...
6000 */
6001 udelay(120);
6002
6003 /* Flush PCI posted writes. The normal MMIO registers
6004 * are inaccessible at this time so this is the only
6005 * way to make this reliably (actually, this is no longer
6006 * the case, see above). I tried to use indirect
6007 * register read/write but this upset some 5701 variants.
6008 */
6009 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6010
6011 udelay(120);
6012
6013 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6014 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6015 int i;
6016 u32 cfg_val;
6017
6018 /* Wait for link training to complete. */
6019 for (i = 0; i < 5000; i++)
6020 udelay(100);
6021
6022 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6023 pci_write_config_dword(tp->pdev, 0xc4,
6024 cfg_val | (1 << 15));
6025 }
Matt Carlsonfcb389d2008-11-03 16:55:44 -08006026 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
6027 /* Set PCIE max payload size and clear error status. */
6028 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006029 }
6030
Michael Chanee6a99b2007-07-18 21:49:10 -07006031 tg3_restore_pci_state(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006032
Michael Chand18edcb2007-03-24 20:57:11 -07006033 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6034
Michael Chanee6a99b2007-07-18 21:49:10 -07006035 val = 0;
6036 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan4cf78e42005-07-25 12:29:19 -07006037 val = tr32(MEMARB_MODE);
Michael Chanee6a99b2007-07-18 21:49:10 -07006038 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006039
6040 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6041 tg3_stop_fw(tp);
6042 tw32(0x5000, 0x400);
6043 }
6044
6045 tw32(GRC_MODE, tp->grc_mode);
6046
6047 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01006048 val = tr32(0xc4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006049
6050 tw32(0xc4, val | (1 << 15));
6051 }
6052
6053 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6054 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6055 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6056 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6057 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6058 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6059 }
6060
6061 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6062 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6063 tw32_f(MAC_MODE, tp->mac_mode);
Michael Chan747e8f82005-07-25 12:33:22 -07006064 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6065 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6066 tw32_f(MAC_MODE, tp->mac_mode);
Matt Carlson3bda1252008-08-15 14:08:22 -07006067 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6068 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6069 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6070 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6071 tw32_f(MAC_MODE, tp->mac_mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006072 } else
6073 tw32_f(MAC_MODE, 0);
6074 udelay(40);
6075
Matt Carlson158d7ab2008-05-29 01:37:54 -07006076 tg3_mdio_start(tp);
6077
Matt Carlson77b483f2008-08-15 14:07:24 -07006078 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6079
Michael Chan7a6f4362006-09-27 16:03:31 -07006080 err = tg3_poll_fw(tp);
6081 if (err)
6082 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006083
6084 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6085 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01006086 val = tr32(0x7c00);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006087
6088 tw32(0x7c00, val | (1 << 25));
6089 }
6090
6091 /* Reprobe ASF enable state. */
6092 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6093 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6094 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6095 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6096 u32 nic_cfg;
6097
6098 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6099 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6100 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
Matt Carlson4ba526c2008-08-15 14:10:04 -07006101 tp->last_event_jiffies = jiffies;
John W. Linvillecbf46852005-04-21 17:01:29 -07006102 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006103 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6104 }
6105 }
6106
6107 return 0;
6108}
6109
6110/* tp->lock is held. */
6111static void tg3_stop_fw(struct tg3 *tp)
6112{
Matt Carlson0d3031d2007-10-10 18:02:43 -07006113 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6114 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Matt Carlson7c5026a2008-05-02 16:49:29 -07006115 /* Wait for RX cpu to ACK the previous event. */
6116 tg3_wait_for_event_ack(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006117
6118 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
Matt Carlson4ba526c2008-08-15 14:10:04 -07006119
6120 tg3_generate_fw_event(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006121
Matt Carlson7c5026a2008-05-02 16:49:29 -07006122 /* Wait for RX cpu to ACK this event. */
6123 tg3_wait_for_event_ack(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006124 }
6125}
6126
6127/* tp->lock is held. */
Michael Chan944d9802005-05-29 14:57:48 -07006128static int tg3_halt(struct tg3 *tp, int kind, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006129{
6130 int err;
6131
6132 tg3_stop_fw(tp);
6133
Michael Chan944d9802005-05-29 14:57:48 -07006134 tg3_write_sig_pre_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006135
David S. Millerb3b7d6b2005-05-05 14:40:20 -07006136 tg3_abort_hw(tp, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006137 err = tg3_chip_reset(tp);
6138
Michael Chan944d9802005-05-29 14:57:48 -07006139 tg3_write_sig_legacy(tp, kind);
6140 tg3_write_sig_post_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006141
6142 if (err)
6143 return err;
6144
6145 return 0;
6146}
6147
6148#define TG3_FW_RELEASE_MAJOR 0x0
6149#define TG3_FW_RELASE_MINOR 0x0
6150#define TG3_FW_RELEASE_FIX 0x0
6151#define TG3_FW_START_ADDR 0x08000000
6152#define TG3_FW_TEXT_ADDR 0x08000000
6153#define TG3_FW_TEXT_LEN 0x9c0
6154#define TG3_FW_RODATA_ADDR 0x080009c0
6155#define TG3_FW_RODATA_LEN 0x60
6156#define TG3_FW_DATA_ADDR 0x08000a40
6157#define TG3_FW_DATA_LEN 0x20
6158#define TG3_FW_SBSS_ADDR 0x08000a60
6159#define TG3_FW_SBSS_LEN 0xc
6160#define TG3_FW_BSS_ADDR 0x08000a70
6161#define TG3_FW_BSS_LEN 0x10
6162
Andreas Mohr50da8592006-08-14 23:54:30 -07006163static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006164 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6165 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6166 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6167 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6168 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6169 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6170 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6171 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6172 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6173 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6174 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6175 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6176 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6177 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6178 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6179 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6180 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6181 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6182 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6183 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6184 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6185 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6186 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6187 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6188 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6189 0, 0, 0, 0, 0, 0,
6190 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6191 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6192 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6193 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6194 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6195 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6196 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6197 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6198 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6199 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6200 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6201 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6202 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6203 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6204 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6205 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6206 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6207 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6208 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6209 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6210 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6211 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6212 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6213 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6214 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6215 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6216 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6217 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6218 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6219 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6220 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6221 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6222 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6223 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6224 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6225 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6226 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6227 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6228 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6229 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6230 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6231 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6232 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6233 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6234 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6235 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6236 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6237 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6238 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6239 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6240 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6241 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6242 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6243 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6244 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6245 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6246 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6247 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6248 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6249 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6250 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6251 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6252 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6253 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6254 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6255};
6256
Andreas Mohr50da8592006-08-14 23:54:30 -07006257static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006258 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6259 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6260 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6261 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6262 0x00000000
6263};
6264
6265#if 0 /* All zeros, don't eat up space with it. */
6266u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6267 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6268 0x00000000, 0x00000000, 0x00000000, 0x00000000
6269};
6270#endif
6271
6272#define RX_CPU_SCRATCH_BASE 0x30000
6273#define RX_CPU_SCRATCH_SIZE 0x04000
6274#define TX_CPU_SCRATCH_BASE 0x34000
6275#define TX_CPU_SCRATCH_SIZE 0x04000
6276
6277/* tp->lock is held. */
6278static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6279{
6280 int i;
6281
Eric Sesterhenn5d9428d2006-04-02 13:52:48 +02006282 BUG_ON(offset == TX_CPU_BASE &&
6283 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006284
Michael Chanb5d37722006-09-27 16:06:21 -07006285 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6286 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6287
6288 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6289 return 0;
6290 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006291 if (offset == RX_CPU_BASE) {
6292 for (i = 0; i < 10000; i++) {
6293 tw32(offset + CPU_STATE, 0xffffffff);
6294 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6295 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6296 break;
6297 }
6298
6299 tw32(offset + CPU_STATE, 0xffffffff);
6300 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
6301 udelay(10);
6302 } else {
6303 for (i = 0; i < 10000; i++) {
6304 tw32(offset + CPU_STATE, 0xffffffff);
6305 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6306 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6307 break;
6308 }
6309 }
6310
6311 if (i >= 10000) {
6312 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6313 "and %s CPU\n",
6314 tp->dev->name,
6315 (offset == RX_CPU_BASE ? "RX" : "TX"));
6316 return -ENODEV;
6317 }
Michael Chanec41c7d2006-01-17 02:40:55 -08006318
6319 /* Clear firmware's nvram arbitration. */
6320 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6321 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006322 return 0;
6323}
6324
6325struct fw_info {
6326 unsigned int text_base;
6327 unsigned int text_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006328 const u32 *text_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006329 unsigned int rodata_base;
6330 unsigned int rodata_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006331 const u32 *rodata_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006332 unsigned int data_base;
6333 unsigned int data_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006334 const u32 *data_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006335};
6336
6337/* tp->lock is held. */
6338static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6339 int cpu_scratch_size, struct fw_info *info)
6340{
Michael Chanec41c7d2006-01-17 02:40:55 -08006341 int err, lock_err, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006342 void (*write_op)(struct tg3 *, u32, u32);
6343
6344 if (cpu_base == TX_CPU_BASE &&
6345 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6346 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6347 "TX cpu firmware on %s which is 5705.\n",
6348 tp->dev->name);
6349 return -EINVAL;
6350 }
6351
6352 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6353 write_op = tg3_write_mem;
6354 else
6355 write_op = tg3_write_indirect_reg32;
6356
Michael Chan1b628152005-05-29 14:59:49 -07006357 /* It is possible that bootcode is still loading at this point.
6358 * Get the nvram lock first before halting the cpu.
6359 */
Michael Chanec41c7d2006-01-17 02:40:55 -08006360 lock_err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006361 err = tg3_halt_cpu(tp, cpu_base);
Michael Chanec41c7d2006-01-17 02:40:55 -08006362 if (!lock_err)
6363 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006364 if (err)
6365 goto out;
6366
6367 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6368 write_op(tp, cpu_scratch_base + i, 0);
6369 tw32(cpu_base + CPU_STATE, 0xffffffff);
6370 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6371 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6372 write_op(tp, (cpu_scratch_base +
6373 (info->text_base & 0xffff) +
6374 (i * sizeof(u32))),
6375 (info->text_data ?
6376 info->text_data[i] : 0));
6377 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6378 write_op(tp, (cpu_scratch_base +
6379 (info->rodata_base & 0xffff) +
6380 (i * sizeof(u32))),
6381 (info->rodata_data ?
6382 info->rodata_data[i] : 0));
6383 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6384 write_op(tp, (cpu_scratch_base +
6385 (info->data_base & 0xffff) +
6386 (i * sizeof(u32))),
6387 (info->data_data ?
6388 info->data_data[i] : 0));
6389
6390 err = 0;
6391
6392out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006393 return err;
6394}
6395
6396/* tp->lock is held. */
6397static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6398{
6399 struct fw_info info;
6400 int err, i;
6401
6402 info.text_base = TG3_FW_TEXT_ADDR;
6403 info.text_len = TG3_FW_TEXT_LEN;
6404 info.text_data = &tg3FwText[0];
6405 info.rodata_base = TG3_FW_RODATA_ADDR;
6406 info.rodata_len = TG3_FW_RODATA_LEN;
6407 info.rodata_data = &tg3FwRodata[0];
6408 info.data_base = TG3_FW_DATA_ADDR;
6409 info.data_len = TG3_FW_DATA_LEN;
6410 info.data_data = NULL;
6411
6412 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6413 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6414 &info);
6415 if (err)
6416 return err;
6417
6418 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6419 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6420 &info);
6421 if (err)
6422 return err;
6423
6424 /* Now startup only the RX cpu. */
6425 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6426 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6427
6428 for (i = 0; i < 5; i++) {
6429 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6430 break;
6431 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6432 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
6433 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6434 udelay(1000);
6435 }
6436 if (i >= 5) {
6437 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6438 "to set RX CPU PC, is %08x should be %08x\n",
6439 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6440 TG3_FW_TEXT_ADDR);
6441 return -ENODEV;
6442 }
6443 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6444 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
6445
6446 return 0;
6447}
6448
Linus Torvalds1da177e2005-04-16 15:20:36 -07006449
6450#define TG3_TSO_FW_RELEASE_MAJOR 0x1
6451#define TG3_TSO_FW_RELASE_MINOR 0x6
6452#define TG3_TSO_FW_RELEASE_FIX 0x0
6453#define TG3_TSO_FW_START_ADDR 0x08000000
6454#define TG3_TSO_FW_TEXT_ADDR 0x08000000
6455#define TG3_TSO_FW_TEXT_LEN 0x1aa0
6456#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
6457#define TG3_TSO_FW_RODATA_LEN 0x60
6458#define TG3_TSO_FW_DATA_ADDR 0x08001b20
6459#define TG3_TSO_FW_DATA_LEN 0x30
6460#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
6461#define TG3_TSO_FW_SBSS_LEN 0x2c
6462#define TG3_TSO_FW_BSS_ADDR 0x08001b80
6463#define TG3_TSO_FW_BSS_LEN 0x894
6464
Andreas Mohr50da8592006-08-14 23:54:30 -07006465static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006466 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6467 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6468 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6469 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6470 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6471 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6472 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6473 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6474 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6475 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6476 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6477 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6478 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6479 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6480 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6481 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6482 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6483 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6484 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6485 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6486 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6487 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6488 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6489 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6490 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6491 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6492 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6493 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6494 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6495 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6496 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6497 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6498 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6499 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6500 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6501 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6502 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6503 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6504 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6505 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6506 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6507 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6508 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6509 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6510 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6511 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6512 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6513 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6514 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6515 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6516 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6517 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6518 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6519 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6520 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6521 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6522 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6523 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6524 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6525 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6526 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6527 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6528 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6529 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6530 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6531 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6532 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6533 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6534 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6535 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6536 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6537 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6538 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6539 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6540 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6541 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6542 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6543 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6544 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6545 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6546 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6547 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6548 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6549 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6550 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6551 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6552 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6553 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6554 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6555 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6556 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6557 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6558 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6559 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6560 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6561 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6562 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6563 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6564 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6565 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6566 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6567 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6568 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6569 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6570 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6571 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6572 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6573 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6574 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6575 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6576 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6577 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6578 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6579 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6580 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6581 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6582 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6583 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6584 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6585 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6586 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6587 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6588 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6589 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6590 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6591 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6592 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6593 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6594 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6595 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6596 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6597 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6598 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6599 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6600 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6601 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6602 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6603 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6604 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6605 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6606 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6607 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6608 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6609 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6610 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6611 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6612 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6613 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6614 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6615 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6616 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6617 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6618 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6619 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6620 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6621 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6622 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6623 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6624 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6625 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6626 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6627 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6628 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6629 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6630 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6631 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6632 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6633 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6634 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6635 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6636 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6637 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6638 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6639 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6640 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6641 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6642 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6643 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6644 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6645 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6646 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6647 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6648 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6649 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6650 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6651 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6652 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6653 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6654 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6655 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6656 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6657 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6658 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6659 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6660 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6661 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6662 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6663 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6664 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6665 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6666 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6667 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6668 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6669 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6670 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6671 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6672 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6673 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6674 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6675 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6676 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6677 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6678 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6679 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6680 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6681 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6682 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6683 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6684 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6685 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6686 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6687 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6688 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6689 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6690 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6691 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6692 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6693 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6694 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6695 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6696 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6697 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6698 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6699 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6700 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6701 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6702 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6703 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6704 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6705 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6706 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6707 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6708 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6709 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6710 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6711 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6712 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6713 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6714 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6715 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6716 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6717 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6718 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6719 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6720 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6721 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6722 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6723 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6724 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6725 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6726 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6727 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6728 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6729 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6730 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6731 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6732 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6733 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6734 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6735 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6736 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6737 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6738 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6739 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6740 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6741 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6742 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6743 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6744 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6745 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6746 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6747 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6748 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6749 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6750};
6751
Andreas Mohr50da8592006-08-14 23:54:30 -07006752static const u32 tg3TsoFwRodata[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006753 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6754 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6755 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6756 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6757 0x00000000,
6758};
6759
Andreas Mohr50da8592006-08-14 23:54:30 -07006760static const u32 tg3TsoFwData[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006761 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6762 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6763 0x00000000,
6764};
6765
6766/* 5705 needs a special version of the TSO firmware. */
6767#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
6768#define TG3_TSO5_FW_RELASE_MINOR 0x2
6769#define TG3_TSO5_FW_RELEASE_FIX 0x0
6770#define TG3_TSO5_FW_START_ADDR 0x00010000
6771#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
6772#define TG3_TSO5_FW_TEXT_LEN 0xe90
6773#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
6774#define TG3_TSO5_FW_RODATA_LEN 0x50
6775#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
6776#define TG3_TSO5_FW_DATA_LEN 0x20
6777#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
6778#define TG3_TSO5_FW_SBSS_LEN 0x28
6779#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
6780#define TG3_TSO5_FW_BSS_LEN 0x88
6781
Andreas Mohr50da8592006-08-14 23:54:30 -07006782static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006783 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6784 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6785 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6786 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6787 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6788 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6789 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6790 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6791 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6792 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6793 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6794 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6795 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6796 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6797 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6798 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6799 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6800 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6801 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6802 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6803 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6804 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6805 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6806 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6807 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6808 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6809 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6810 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6811 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6812 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6813 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6814 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6815 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6816 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6817 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6818 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6819 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6820 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6821 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6822 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6823 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6824 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6825 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6826 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6827 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6828 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6829 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6830 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6831 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6832 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6833 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6834 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6835 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6836 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6837 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6838 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6839 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6840 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6841 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6842 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6843 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6844 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6845 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6846 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6847 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6848 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6849 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6850 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6851 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6852 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6853 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6854 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6855 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6856 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6857 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6858 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6859 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6860 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6861 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6862 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6863 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6864 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6865 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6866 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6867 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6868 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6869 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6870 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6871 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6872 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6873 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6874 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6875 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6876 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6877 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6878 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6879 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6880 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6881 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6882 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6883 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6884 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6885 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6886 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6887 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6888 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6889 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6890 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6891 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6892 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6893 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6894 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6895 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6896 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6897 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6898 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6899 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6900 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6901 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6902 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6903 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6904 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6905 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6906 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6907 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6908 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6909 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6910 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6911 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6912 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6913 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6914 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6915 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6916 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6917 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6918 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6919 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6920 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6921 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6922 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6923 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6924 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6925 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6926 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6927 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6928 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6929 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6930 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6931 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6932 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6933 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6934 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6935 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6936 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6937 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6938 0x00000000, 0x00000000, 0x00000000,
6939};
6940
Andreas Mohr50da8592006-08-14 23:54:30 -07006941static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006942 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6943 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6944 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6945 0x00000000, 0x00000000, 0x00000000,
6946};
6947
Andreas Mohr50da8592006-08-14 23:54:30 -07006948static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006949 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6950 0x00000000, 0x00000000, 0x00000000,
6951};
6952
6953/* tp->lock is held. */
6954static int tg3_load_tso_firmware(struct tg3 *tp)
6955{
6956 struct fw_info info;
6957 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6958 int err, i;
6959
6960 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6961 return 0;
6962
6963 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6964 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6965 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6966 info.text_data = &tg3Tso5FwText[0];
6967 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6968 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6969 info.rodata_data = &tg3Tso5FwRodata[0];
6970 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6971 info.data_len = TG3_TSO5_FW_DATA_LEN;
6972 info.data_data = &tg3Tso5FwData[0];
6973 cpu_base = RX_CPU_BASE;
6974 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6975 cpu_scratch_size = (info.text_len +
6976 info.rodata_len +
6977 info.data_len +
6978 TG3_TSO5_FW_SBSS_LEN +
6979 TG3_TSO5_FW_BSS_LEN);
6980 } else {
6981 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6982 info.text_len = TG3_TSO_FW_TEXT_LEN;
6983 info.text_data = &tg3TsoFwText[0];
6984 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6985 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6986 info.rodata_data = &tg3TsoFwRodata[0];
6987 info.data_base = TG3_TSO_FW_DATA_ADDR;
6988 info.data_len = TG3_TSO_FW_DATA_LEN;
6989 info.data_data = &tg3TsoFwData[0];
6990 cpu_base = TX_CPU_BASE;
6991 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6992 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6993 }
6994
6995 err = tg3_load_firmware_cpu(tp, cpu_base,
6996 cpu_scratch_base, cpu_scratch_size,
6997 &info);
6998 if (err)
6999 return err;
7000
7001 /* Now startup the cpu. */
7002 tw32(cpu_base + CPU_STATE, 0xffffffff);
7003 tw32_f(cpu_base + CPU_PC, info.text_base);
7004
7005 for (i = 0; i < 5; i++) {
7006 if (tr32(cpu_base + CPU_PC) == info.text_base)
7007 break;
7008 tw32(cpu_base + CPU_STATE, 0xffffffff);
7009 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7010 tw32_f(cpu_base + CPU_PC, info.text_base);
7011 udelay(1000);
7012 }
7013 if (i >= 5) {
7014 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
7015 "to set CPU PC, is %08x should be %08x\n",
7016 tp->dev->name, tr32(cpu_base + CPU_PC),
7017 info.text_base);
7018 return -ENODEV;
7019 }
7020 tw32(cpu_base + CPU_STATE, 0xffffffff);
7021 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7022 return 0;
7023}
7024
Linus Torvalds1da177e2005-04-16 15:20:36 -07007025
Linus Torvalds1da177e2005-04-16 15:20:36 -07007026static int tg3_set_mac_addr(struct net_device *dev, void *p)
7027{
7028 struct tg3 *tp = netdev_priv(dev);
7029 struct sockaddr *addr = p;
Michael Chan986e0ae2007-05-05 12:10:20 -07007030 int err = 0, skip_mac_1 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007031
Michael Chanf9804dd2005-09-27 12:13:10 -07007032 if (!is_valid_ether_addr(addr->sa_data))
7033 return -EINVAL;
7034
Linus Torvalds1da177e2005-04-16 15:20:36 -07007035 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7036
Michael Chane75f7c92006-03-20 21:33:26 -08007037 if (!netif_running(dev))
7038 return 0;
7039
Michael Chan58712ef2006-04-29 18:58:01 -07007040 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
Michael Chan986e0ae2007-05-05 12:10:20 -07007041 u32 addr0_high, addr0_low, addr1_high, addr1_low;
Michael Chan58712ef2006-04-29 18:58:01 -07007042
Michael Chan986e0ae2007-05-05 12:10:20 -07007043 addr0_high = tr32(MAC_ADDR_0_HIGH);
7044 addr0_low = tr32(MAC_ADDR_0_LOW);
7045 addr1_high = tr32(MAC_ADDR_1_HIGH);
7046 addr1_low = tr32(MAC_ADDR_1_LOW);
7047
7048 /* Skip MAC addr 1 if ASF is using it. */
7049 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7050 !(addr1_high == 0 && addr1_low == 0))
7051 skip_mac_1 = 1;
Michael Chan58712ef2006-04-29 18:58:01 -07007052 }
Michael Chan986e0ae2007-05-05 12:10:20 -07007053 spin_lock_bh(&tp->lock);
7054 __tg3_set_mac_addr(tp, skip_mac_1);
7055 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007056
Michael Chanb9ec6c12006-07-25 16:37:27 -07007057 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007058}
7059
7060/* tp->lock is held. */
7061static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7062 dma_addr_t mapping, u32 maxlen_flags,
7063 u32 nic_addr)
7064{
7065 tg3_write_mem(tp,
7066 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7067 ((u64) mapping >> 32));
7068 tg3_write_mem(tp,
7069 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7070 ((u64) mapping & 0xffffffff));
7071 tg3_write_mem(tp,
7072 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7073 maxlen_flags);
7074
7075 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7076 tg3_write_mem(tp,
7077 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7078 nic_addr);
7079}
7080
7081static void __tg3_set_rx_mode(struct net_device *);
Michael Chand244c892005-07-05 14:42:33 -07007082static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
David S. Miller15f98502005-05-18 22:49:26 -07007083{
7084 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7085 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7086 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7087 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7088 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7089 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7090 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7091 }
7092 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7093 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7094 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7095 u32 val = ec->stats_block_coalesce_usecs;
7096
7097 if (!netif_carrier_ok(tp->dev))
7098 val = 0;
7099
7100 tw32(HOSTCC_STAT_COAL_TICKS, val);
7101 }
7102}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007103
7104/* tp->lock is held. */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007105static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007106{
7107 u32 val, rdmac_mode;
7108 int i, err, limit;
7109
7110 tg3_disable_ints(tp);
7111
7112 tg3_stop_fw(tp);
7113
7114 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7115
7116 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
Michael Chane6de8ad2005-05-05 14:42:41 -07007117 tg3_abort_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007118 }
7119
Matt Carlsondd477002008-05-25 23:45:58 -07007120 if (reset_phy &&
7121 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
Michael Chand4d2c552006-03-20 17:47:20 -08007122 tg3_phy_reset(tp);
7123
Linus Torvalds1da177e2005-04-16 15:20:36 -07007124 err = tg3_chip_reset(tp);
7125 if (err)
7126 return err;
7127
7128 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7129
Matt Carlsonbcb37f62008-11-03 16:52:09 -08007130 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07007131 val = tr32(TG3_CPMU_CTRL);
7132 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7133 tw32(TG3_CPMU_CTRL, val);
Matt Carlson9acb9612007-11-12 21:10:06 -08007134
7135 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7136 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7137 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7138 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7139
7140 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7141 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7142 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7143 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7144
7145 val = tr32(TG3_CPMU_HST_ACC);
7146 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7147 val |= CPMU_HST_ACC_MACCLK_6_25;
7148 tw32(TG3_CPMU_HST_ACC, val);
Matt Carlsond30cdd22007-10-07 23:28:35 -07007149 }
7150
Linus Torvalds1da177e2005-04-16 15:20:36 -07007151 /* This works around an issue with Athlon chipsets on
7152 * B3 tigon3 silicon. This bit has no effect on any
7153 * other revision. But do not set this on PCI Express
Matt Carlson795d01c2007-10-07 23:28:17 -07007154 * chips and don't even touch the clocks if the CPMU is present.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007155 */
Matt Carlson795d01c2007-10-07 23:28:17 -07007156 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7157 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7158 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7159 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7160 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007161
7162 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7163 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7164 val = tr32(TG3PCI_PCISTATE);
7165 val |= PCISTATE_RETRY_SAME_DMA;
7166 tw32(TG3PCI_PCISTATE, val);
7167 }
7168
Matt Carlson0d3031d2007-10-10 18:02:43 -07007169 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7170 /* Allow reads and writes to the
7171 * APE register and memory space.
7172 */
7173 val = tr32(TG3PCI_PCISTATE);
7174 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7175 PCISTATE_ALLOW_APE_SHMEM_WR;
7176 tw32(TG3PCI_PCISTATE, val);
7177 }
7178
Linus Torvalds1da177e2005-04-16 15:20:36 -07007179 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7180 /* Enable some hw fixes. */
7181 val = tr32(TG3PCI_MSI_DATA);
7182 val |= (1 << 26) | (1 << 28) | (1 << 29);
7183 tw32(TG3PCI_MSI_DATA, val);
7184 }
7185
7186 /* Descriptor ring init may make accesses to the
7187 * NIC SRAM area to setup the TX descriptors, so we
7188 * can only do this after the hardware has been
7189 * successfully reset.
7190 */
Michael Chan32d8c572006-07-25 16:38:29 -07007191 err = tg3_init_rings(tp);
7192 if (err)
7193 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007194
Matt Carlson9936bcf2007-10-10 18:03:07 -07007195 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
Matt Carlsonfcb389d2008-11-03 16:55:44 -08007196 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07007197 /* This value is determined during the probe time DMA
7198 * engine test, tg3_test_dma.
7199 */
7200 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7201 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007202
7203 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7204 GRC_MODE_4X_NIC_SEND_RINGS |
7205 GRC_MODE_NO_TX_PHDR_CSUM |
7206 GRC_MODE_NO_RX_PHDR_CSUM);
7207 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
Michael Chand2d746f2006-04-06 21:45:39 -07007208
7209 /* Pseudo-header checksum is done by hardware logic and not
7210 * the offload processers, so make the chip do the pseudo-
7211 * header checksums on receive. For transmit it is more
7212 * convenient to do the pseudo-header checksum in software
7213 * as Linux does that on transmit for us in all cases.
7214 */
7215 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007216
7217 tw32(GRC_MODE,
7218 tp->grc_mode |
7219 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7220
7221 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7222 val = tr32(GRC_MISC_CFG);
7223 val &= ~0xff;
7224 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7225 tw32(GRC_MISC_CFG, val);
7226
7227 /* Initialize MBUF/DESC pool. */
John W. Linvillecbf46852005-04-21 17:01:29 -07007228 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007229 /* Do nothing. */
7230 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7231 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7232 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7233 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7234 else
7235 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7236 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7237 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7238 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007239 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7240 int fw_len;
7241
7242 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7243 TG3_TSO5_FW_RODATA_LEN +
7244 TG3_TSO5_FW_DATA_LEN +
7245 TG3_TSO5_FW_SBSS_LEN +
7246 TG3_TSO5_FW_BSS_LEN);
7247 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7248 tw32(BUFMGR_MB_POOL_ADDR,
7249 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7250 tw32(BUFMGR_MB_POOL_SIZE,
7251 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7252 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007253
Michael Chan0f893dc2005-07-25 12:30:38 -07007254 if (tp->dev->mtu <= ETH_DATA_LEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007255 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7256 tp->bufmgr_config.mbuf_read_dma_low_water);
7257 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7258 tp->bufmgr_config.mbuf_mac_rx_low_water);
7259 tw32(BUFMGR_MB_HIGH_WATER,
7260 tp->bufmgr_config.mbuf_high_water);
7261 } else {
7262 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7263 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7264 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7265 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7266 tw32(BUFMGR_MB_HIGH_WATER,
7267 tp->bufmgr_config.mbuf_high_water_jumbo);
7268 }
7269 tw32(BUFMGR_DMA_LOW_WATER,
7270 tp->bufmgr_config.dma_low_water);
7271 tw32(BUFMGR_DMA_HIGH_WATER,
7272 tp->bufmgr_config.dma_high_water);
7273
7274 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7275 for (i = 0; i < 2000; i++) {
7276 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7277 break;
7278 udelay(10);
7279 }
7280 if (i >= 2000) {
7281 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7282 tp->dev->name);
7283 return -ENODEV;
7284 }
7285
7286 /* Setup replenish threshold. */
Michael Chanf92905d2006-06-29 20:14:29 -07007287 val = tp->rx_pending / 8;
7288 if (val == 0)
7289 val = 1;
7290 else if (val > tp->rx_std_max_post)
7291 val = tp->rx_std_max_post;
Michael Chanb5d37722006-09-27 16:06:21 -07007292 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7293 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7294 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7295
7296 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7297 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7298 }
Michael Chanf92905d2006-06-29 20:14:29 -07007299
7300 tw32(RCVBDI_STD_THRESH, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007301
7302 /* Initialize TG3_BDINFO's at:
7303 * RCVDBDI_STD_BD: standard eth size rx ring
7304 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7305 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7306 *
7307 * like so:
7308 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7309 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7310 * ring attribute flags
7311 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7312 *
7313 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7314 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7315 *
7316 * The size of each ring is fixed in the firmware, but the location is
7317 * configurable.
7318 */
7319 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7320 ((u64) tp->rx_std_mapping >> 32));
7321 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7322 ((u64) tp->rx_std_mapping & 0xffffffff));
7323 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7324 NIC_SRAM_RX_BUFFER_DESC);
7325
7326 /* Don't even try to program the JUMBO/MINI buffer descriptor
7327 * configs on 5705.
7328 */
7329 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7330 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7331 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7332 } else {
7333 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7334 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7335
7336 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7337 BDINFO_FLAGS_DISABLED);
7338
7339 /* Setup replenish threshold. */
7340 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7341
Michael Chan0f893dc2005-07-25 12:30:38 -07007342 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007343 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7344 ((u64) tp->rx_jumbo_mapping >> 32));
7345 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7346 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7347 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7348 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7349 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7350 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7351 } else {
7352 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7353 BDINFO_FLAGS_DISABLED);
7354 }
7355
7356 }
7357
7358 /* There is only one send ring on 5705/5750, no need to explicitly
7359 * disable the others.
7360 */
7361 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7362 /* Clear out send RCB ring in SRAM. */
7363 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7364 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7365 BDINFO_FLAGS_DISABLED);
7366 }
7367
7368 tp->tx_prod = 0;
7369 tp->tx_cons = 0;
7370 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7371 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7372
7373 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7374 tp->tx_desc_mapping,
7375 (TG3_TX_RING_SIZE <<
7376 BDINFO_FLAGS_MAXLEN_SHIFT),
7377 NIC_SRAM_TX_BUFFER_DESC);
7378
7379 /* There is only one receive return ring on 5705/5750, no need
7380 * to explicitly disable the others.
7381 */
7382 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7383 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7384 i += TG3_BDINFO_SIZE) {
7385 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7386 BDINFO_FLAGS_DISABLED);
7387 }
7388 }
7389
7390 tp->rx_rcb_ptr = 0;
7391 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7392
7393 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7394 tp->rx_rcb_mapping,
7395 (TG3_RX_RCB_RING_SIZE(tp) <<
7396 BDINFO_FLAGS_MAXLEN_SHIFT),
7397 0);
7398
7399 tp->rx_std_ptr = tp->rx_pending;
7400 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7401 tp->rx_std_ptr);
7402
Michael Chan0f893dc2005-07-25 12:30:38 -07007403 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
Linus Torvalds1da177e2005-04-16 15:20:36 -07007404 tp->rx_jumbo_pending : 0;
7405 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7406 tp->rx_jumbo_ptr);
7407
7408 /* Initialize MAC address and backoff seed. */
Michael Chan986e0ae2007-05-05 12:10:20 -07007409 __tg3_set_mac_addr(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007410
7411 /* MTU + ethernet header + FCS + optional VLAN tag */
7412 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7413
7414 /* The slot time is changed by tg3_setup_phy if we
7415 * run at gigabit with half duplex.
7416 */
7417 tw32(MAC_TX_LENGTHS,
7418 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7419 (6 << TX_LENGTHS_IPG_SHIFT) |
7420 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7421
7422 /* Receive rules. */
7423 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7424 tw32(RCVLPC_CONFIG, 0x0181);
7425
7426 /* Calculate RDMAC_MODE setting early, we need it to determine
7427 * the RCVLPC_STATE_ENABLE mask.
7428 */
7429 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7430 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7431 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7432 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7433 RDMAC_MODE_LNGREAD_ENAB);
Michael Chan85e94ce2005-04-21 17:05:28 -07007434
Matt Carlson57e69832008-05-25 23:48:31 -07007435 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7436 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsond30cdd22007-10-07 23:28:35 -07007437 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7438 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7439 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7440
Michael Chan85e94ce2005-04-21 17:05:28 -07007441 /* If statement applies to 5705 and 5750 PCI devices only */
7442 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7443 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7444 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007445 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
Matt Carlsonc13e3712007-05-05 11:50:04 -07007446 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007447 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7448 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7449 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7450 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7451 }
7452 }
7453
Michael Chan85e94ce2005-04-21 17:05:28 -07007454 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7455 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7456
Linus Torvalds1da177e2005-04-16 15:20:36 -07007457 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7458 rdmac_mode |= (1 << 27);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007459
7460 /* Receive/send statistics. */
Michael Chan16613942006-06-29 20:15:13 -07007461 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7462 val = tr32(RCVLPC_STATS_ENABLE);
7463 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7464 tw32(RCVLPC_STATS_ENABLE, val);
7465 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7466 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007467 val = tr32(RCVLPC_STATS_ENABLE);
7468 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7469 tw32(RCVLPC_STATS_ENABLE, val);
7470 } else {
7471 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7472 }
7473 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7474 tw32(SNDDATAI_STATSENAB, 0xffffff);
7475 tw32(SNDDATAI_STATSCTRL,
7476 (SNDDATAI_SCTRL_ENABLE |
7477 SNDDATAI_SCTRL_FASTUPD));
7478
7479 /* Setup host coalescing engine. */
7480 tw32(HOSTCC_MODE, 0);
7481 for (i = 0; i < 2000; i++) {
7482 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7483 break;
7484 udelay(10);
7485 }
7486
Michael Chand244c892005-07-05 14:42:33 -07007487 __tg3_set_coalesce(tp, &tp->coal);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007488
7489 /* set status block DMA address */
7490 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7491 ((u64) tp->status_mapping >> 32));
7492 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7493 ((u64) tp->status_mapping & 0xffffffff));
7494
7495 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7496 /* Status/statistics block address. See tg3_timer,
7497 * the tg3_periodic_fetch_stats call there, and
7498 * tg3_get_stats to see how this works for 5705/5750 chips.
7499 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007500 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7501 ((u64) tp->stats_mapping >> 32));
7502 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7503 ((u64) tp->stats_mapping & 0xffffffff));
7504 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7505 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7506 }
7507
7508 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7509
7510 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7511 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7512 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7513 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7514
7515 /* Clear statistics/status block in chip, and status block in ram. */
7516 for (i = NIC_SRAM_STATS_BLK;
7517 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7518 i += sizeof(u32)) {
7519 tg3_write_mem(tp, i, 0);
7520 udelay(40);
7521 }
7522 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7523
Michael Chanc94e3942005-09-27 12:12:42 -07007524 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7525 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7526 /* reset to prevent losing 1st rx packet intermittently */
7527 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7528 udelay(10);
7529 }
7530
Matt Carlson3bda1252008-08-15 14:08:22 -07007531 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7532 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7533 else
7534 tp->mac_mode = 0;
7535 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
Linus Torvalds1da177e2005-04-16 15:20:36 -07007536 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07007537 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7538 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7539 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7540 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007541 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7542 udelay(40);
7543
Michael Chan314fba32005-04-21 17:07:04 -07007544 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
Michael Chan9d26e212006-12-07 00:21:14 -08007545 * If TG3_FLG2_IS_NIC is zero, we should read the
Michael Chan314fba32005-04-21 17:07:04 -07007546 * register to preserve the GPIO settings for LOMs. The GPIOs,
7547 * whether used as inputs or outputs, are set by boot code after
7548 * reset.
7549 */
Michael Chan9d26e212006-12-07 00:21:14 -08007550 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
Michael Chan314fba32005-04-21 17:07:04 -07007551 u32 gpio_mask;
7552
Michael Chan9d26e212006-12-07 00:21:14 -08007553 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7554 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7555 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chan3e7d83b2005-04-21 17:10:36 -07007556
7557 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7558 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7559 GRC_LCLCTRL_GPIO_OUTPUT3;
7560
Michael Chanaf36e6b2006-03-23 01:28:06 -08007561 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7562 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7563
Gary Zambranoaaf84462007-05-05 11:51:45 -07007564 tp->grc_local_ctrl &= ~gpio_mask;
Michael Chan314fba32005-04-21 17:07:04 -07007565 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7566
7567 /* GPIO1 must be driven high for eeprom write protect */
Michael Chan9d26e212006-12-07 00:21:14 -08007568 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7569 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7570 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan314fba32005-04-21 17:07:04 -07007571 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007572 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7573 udelay(100);
7574
Michael Chan09ee9292005-08-09 20:17:00 -07007575 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
David S. Millerfac9b832005-05-18 22:46:34 -07007576 tp->last_tag = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007577
7578 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7579 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7580 udelay(40);
7581 }
7582
7583 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7584 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7585 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7586 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7587 WDMAC_MODE_LNGREAD_ENAB);
7588
Michael Chan85e94ce2005-04-21 17:05:28 -07007589 /* If statement applies to 5705 and 5750 PCI devices only */
7590 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7591 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7592 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007593 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7594 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7595 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7596 /* nothing */
7597 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7598 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7599 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7600 val |= WDMAC_MODE_RX_ACCEL;
7601 }
7602 }
7603
Michael Chand9ab5ad2006-03-20 22:27:35 -08007604 /* Enable host coalescing bug fix */
Michael Chanaf36e6b2006-03-23 01:28:06 -08007605 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07007606 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07007607 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
Matt Carlson57e69832008-05-25 23:48:31 -07007608 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7609 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
Matt Carlsonf51f3562008-05-25 23:45:08 -07007610 val |= WDMAC_MODE_STATUS_TAG_FIX;
Michael Chand9ab5ad2006-03-20 22:27:35 -08007611
Linus Torvalds1da177e2005-04-16 15:20:36 -07007612 tw32_f(WDMAC_MODE, val);
7613 udelay(40);
7614
Matt Carlson9974a352007-10-07 23:27:28 -07007615 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7616 u16 pcix_cmd;
7617
7618 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7619 &pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007620 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
Matt Carlson9974a352007-10-07 23:27:28 -07007621 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7622 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007623 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
Matt Carlson9974a352007-10-07 23:27:28 -07007624 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7625 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007626 }
Matt Carlson9974a352007-10-07 23:27:28 -07007627 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7628 pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007629 }
7630
7631 tw32_f(RDMAC_MODE, rdmac_mode);
7632 udelay(40);
7633
7634 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7635 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7636 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
Matt Carlson9936bcf2007-10-10 18:03:07 -07007637
7638 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7639 tw32(SNDDATAC_MODE,
7640 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7641 else
7642 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7643
Linus Torvalds1da177e2005-04-16 15:20:36 -07007644 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7645 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7646 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7647 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007648 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7649 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007650 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7651 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7652
7653 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7654 err = tg3_load_5701_a0_firmware_fix(tp);
7655 if (err)
7656 return err;
7657 }
7658
Linus Torvalds1da177e2005-04-16 15:20:36 -07007659 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7660 err = tg3_load_tso_firmware(tp);
7661 if (err)
7662 return err;
7663 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007664
7665 tp->tx_mode = TX_MODE_ENABLE;
7666 tw32_f(MAC_TX_MODE, tp->tx_mode);
7667 udelay(100);
7668
7669 tp->rx_mode = RX_MODE_ENABLE;
Matt Carlson9936bcf2007-10-10 18:03:07 -07007670 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlson57e69832008-05-25 23:48:31 -07007671 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7672 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7673 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chanaf36e6b2006-03-23 01:28:06 -08007674 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7675
Linus Torvalds1da177e2005-04-16 15:20:36 -07007676 tw32_f(MAC_RX_MODE, tp->rx_mode);
7677 udelay(10);
7678
Linus Torvalds1da177e2005-04-16 15:20:36 -07007679 tw32(MAC_LED_CTRL, tp->led_ctrl);
7680
7681 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
Michael Chanc94e3942005-09-27 12:12:42 -07007682 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007683 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7684 udelay(10);
7685 }
7686 tw32_f(MAC_RX_MODE, tp->rx_mode);
7687 udelay(10);
7688
7689 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7690 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7691 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7692 /* Set drive transmission level to 1.2V */
7693 /* only if the signal pre-emphasis bit is not set */
7694 val = tr32(MAC_SERDES_CFG);
7695 val &= 0xfffff000;
7696 val |= 0x880;
7697 tw32(MAC_SERDES_CFG, val);
7698 }
7699 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7700 tw32(MAC_SERDES_CFG, 0x616000);
7701 }
7702
7703 /* Prevent chip from dropping frames when flow control
7704 * is enabled.
7705 */
7706 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7707
7708 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7709 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7710 /* Use hardware link auto-negotiation */
7711 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7712 }
7713
Michael Chand4d2c552006-03-20 17:47:20 -08007714 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7715 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7716 u32 tmp;
7717
7718 tmp = tr32(SERDES_RX_CTRL);
7719 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7720 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7721 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7722 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7723 }
7724
Matt Carlsondd477002008-05-25 23:45:58 -07007725 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7726 if (tp->link_config.phy_is_low_power) {
7727 tp->link_config.phy_is_low_power = 0;
7728 tp->link_config.speed = tp->link_config.orig_speed;
7729 tp->link_config.duplex = tp->link_config.orig_duplex;
7730 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7731 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007732
Matt Carlsondd477002008-05-25 23:45:58 -07007733 err = tg3_setup_phy(tp, 0);
7734 if (err)
7735 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007736
Matt Carlsondd477002008-05-25 23:45:58 -07007737 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7738 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7739 u32 tmp;
7740
7741 /* Clear CRC stats. */
7742 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7743 tg3_writephy(tp, MII_TG3_TEST1,
7744 tmp | MII_TG3_TEST1_CRC_EN);
7745 tg3_readphy(tp, 0x14, &tmp);
7746 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007747 }
7748 }
7749
7750 __tg3_set_rx_mode(tp->dev);
7751
7752 /* Initialize receive rules. */
7753 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7754 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7755 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7756 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7757
Michael Chan4cf78e42005-07-25 12:29:19 -07007758 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
Michael Chana4e2b342005-10-26 15:46:52 -07007759 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007760 limit = 8;
7761 else
7762 limit = 16;
7763 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7764 limit -= 4;
7765 switch (limit) {
7766 case 16:
7767 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7768 case 15:
7769 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7770 case 14:
7771 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7772 case 13:
7773 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7774 case 12:
7775 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7776 case 11:
7777 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7778 case 10:
7779 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7780 case 9:
7781 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7782 case 8:
7783 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7784 case 7:
7785 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7786 case 6:
7787 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7788 case 5:
7789 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7790 case 4:
7791 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7792 case 3:
7793 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7794 case 2:
7795 case 1:
7796
7797 default:
7798 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07007799 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007800
Matt Carlson9ce768e2007-10-11 19:49:11 -07007801 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7802 /* Write our heartbeat update interval to APE. */
7803 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7804 APE_HOST_HEARTBEAT_INT_DISABLE);
Matt Carlson0d3031d2007-10-10 18:02:43 -07007805
Linus Torvalds1da177e2005-04-16 15:20:36 -07007806 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7807
Linus Torvalds1da177e2005-04-16 15:20:36 -07007808 return 0;
7809}
7810
7811/* Called at device open time to get the chip ready for
7812 * packet processing. Invoked with tp->lock held.
7813 */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007814static int tg3_init_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007815{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007816 tg3_switch_clocks(tp);
7817
7818 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7819
Matt Carlson2f751b62008-08-04 23:17:34 -07007820 return tg3_reset_hw(tp, reset_phy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007821}
7822
7823#define TG3_STAT_ADD32(PSTAT, REG) \
7824do { u32 __val = tr32(REG); \
7825 (PSTAT)->low += __val; \
7826 if ((PSTAT)->low < __val) \
7827 (PSTAT)->high += 1; \
7828} while (0)
7829
7830static void tg3_periodic_fetch_stats(struct tg3 *tp)
7831{
7832 struct tg3_hw_stats *sp = tp->hw_stats;
7833
7834 if (!netif_carrier_ok(tp->dev))
7835 return;
7836
7837 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7838 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7839 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7840 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7841 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7842 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7843 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7844 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7845 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7846 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7847 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7848 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7849 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7850
7851 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7852 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7853 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7854 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7855 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7856 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7857 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7858 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7859 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7860 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7861 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7862 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7863 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7864 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
Michael Chan463d3052006-05-22 16:36:27 -07007865
7866 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7867 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7868 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007869}
7870
7871static void tg3_timer(unsigned long __opaque)
7872{
7873 struct tg3 *tp = (struct tg3 *) __opaque;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007874
Michael Chanf475f162006-03-27 23:20:14 -08007875 if (tp->irq_sync)
7876 goto restart_timer;
7877
David S. Millerf47c11e2005-06-24 20:18:35 -07007878 spin_lock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007879
David S. Millerfac9b832005-05-18 22:46:34 -07007880 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7881 /* All of this garbage is because when using non-tagged
7882 * IRQ status the mailbox/status_block protocol the chip
7883 * uses with the cpu is race prone.
7884 */
7885 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7886 tw32(GRC_LOCAL_CTRL,
7887 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7888 } else {
7889 tw32(HOSTCC_MODE, tp->coalesce_mode |
7890 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7891 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007892
David S. Millerfac9b832005-05-18 22:46:34 -07007893 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7894 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
David S. Millerf47c11e2005-06-24 20:18:35 -07007895 spin_unlock(&tp->lock);
David S. Millerfac9b832005-05-18 22:46:34 -07007896 schedule_work(&tp->reset_task);
7897 return;
7898 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007899 }
7900
Linus Torvalds1da177e2005-04-16 15:20:36 -07007901 /* This part only runs once per second. */
7902 if (!--tp->timer_counter) {
David S. Millerfac9b832005-05-18 22:46:34 -07007903 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7904 tg3_periodic_fetch_stats(tp);
7905
Linus Torvalds1da177e2005-04-16 15:20:36 -07007906 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7907 u32 mac_stat;
7908 int phy_event;
7909
7910 mac_stat = tr32(MAC_STATUS);
7911
7912 phy_event = 0;
7913 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7914 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7915 phy_event = 1;
7916 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7917 phy_event = 1;
7918
7919 if (phy_event)
7920 tg3_setup_phy(tp, 0);
7921 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7922 u32 mac_stat = tr32(MAC_STATUS);
7923 int need_setup = 0;
7924
7925 if (netif_carrier_ok(tp->dev) &&
7926 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7927 need_setup = 1;
7928 }
7929 if (! netif_carrier_ok(tp->dev) &&
7930 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7931 MAC_STATUS_SIGNAL_DET))) {
7932 need_setup = 1;
7933 }
7934 if (need_setup) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07007935 if (!tp->serdes_counter) {
7936 tw32_f(MAC_MODE,
7937 (tp->mac_mode &
7938 ~MAC_MODE_PORT_MODE_MASK));
7939 udelay(40);
7940 tw32_f(MAC_MODE, tp->mac_mode);
7941 udelay(40);
7942 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007943 tg3_setup_phy(tp, 0);
7944 }
Michael Chan747e8f82005-07-25 12:33:22 -07007945 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7946 tg3_serdes_parallel_detect(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007947
7948 tp->timer_counter = tp->timer_multiplier;
7949 }
7950
Michael Chan130b8e42006-09-27 16:00:40 -07007951 /* Heartbeat is only sent once every 2 seconds.
7952 *
7953 * The heartbeat is to tell the ASF firmware that the host
7954 * driver is still alive. In the event that the OS crashes,
7955 * ASF needs to reset the hardware to free up the FIFO space
7956 * that may be filled with rx packets destined for the host.
7957 * If the FIFO is full, ASF will no longer function properly.
7958 *
7959 * Unintended resets have been reported on real time kernels
7960 * where the timer doesn't run on time. Netpoll will also have
7961 * same problem.
7962 *
7963 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7964 * to check the ring condition when the heartbeat is expiring
7965 * before doing the reset. This will prevent most unintended
7966 * resets.
7967 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007968 if (!--tp->asf_counter) {
Matt Carlsonbc7959b2008-08-15 14:08:55 -07007969 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7970 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Matt Carlson7c5026a2008-05-02 16:49:29 -07007971 tg3_wait_for_event_ack(tp);
7972
Michael Chanbbadf502006-04-06 21:46:34 -07007973 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
Michael Chan130b8e42006-09-27 16:00:40 -07007974 FWCMD_NICDRV_ALIVE3);
Michael Chanbbadf502006-04-06 21:46:34 -07007975 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
Michael Chan28fbef72005-10-26 15:48:35 -07007976 /* 5 seconds timeout */
Michael Chanbbadf502006-04-06 21:46:34 -07007977 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
Matt Carlson4ba526c2008-08-15 14:10:04 -07007978
7979 tg3_generate_fw_event(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007980 }
7981 tp->asf_counter = tp->asf_multiplier;
7982 }
7983
David S. Millerf47c11e2005-06-24 20:18:35 -07007984 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007985
Michael Chanf475f162006-03-27 23:20:14 -08007986restart_timer:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007987 tp->timer.expires = jiffies + tp->timer_offset;
7988 add_timer(&tp->timer);
7989}
7990
Adrian Bunk81789ef2006-03-20 23:00:14 -08007991static int tg3_request_irq(struct tg3 *tp)
Michael Chanfcfa0a32006-03-20 22:28:41 -08007992{
David Howells7d12e782006-10-05 14:55:46 +01007993 irq_handler_t fn;
Michael Chanfcfa0a32006-03-20 22:28:41 -08007994 unsigned long flags;
7995 struct net_device *dev = tp->dev;
7996
7997 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7998 fn = tg3_msi;
7999 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8000 fn = tg3_msi_1shot;
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07008001 flags = IRQF_SAMPLE_RANDOM;
Michael Chanfcfa0a32006-03-20 22:28:41 -08008002 } else {
8003 fn = tg3_interrupt;
8004 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8005 fn = tg3_interrupt_tagged;
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07008006 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
Michael Chanfcfa0a32006-03-20 22:28:41 -08008007 }
8008 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
8009}
8010
Michael Chan79381092005-04-21 17:13:59 -07008011static int tg3_test_interrupt(struct tg3 *tp)
8012{
8013 struct net_device *dev = tp->dev;
Michael Chanb16250e2006-09-27 16:10:14 -07008014 int err, i, intr_ok = 0;
Michael Chan79381092005-04-21 17:13:59 -07008015
Michael Chand4bc3922005-05-29 14:59:20 -07008016 if (!netif_running(dev))
8017 return -ENODEV;
8018
Michael Chan79381092005-04-21 17:13:59 -07008019 tg3_disable_ints(tp);
8020
8021 free_irq(tp->pdev->irq, dev);
8022
8023 err = request_irq(tp->pdev->irq, tg3_test_isr,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07008024 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
Michael Chan79381092005-04-21 17:13:59 -07008025 if (err)
8026 return err;
8027
Michael Chan38f38432005-09-05 17:53:32 -07008028 tp->hw_status->status &= ~SD_STATUS_UPDATED;
Michael Chan79381092005-04-21 17:13:59 -07008029 tg3_enable_ints(tp);
8030
8031 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8032 HOSTCC_MODE_NOW);
8033
8034 for (i = 0; i < 5; i++) {
Michael Chanb16250e2006-09-27 16:10:14 -07008035 u32 int_mbox, misc_host_ctrl;
8036
Michael Chan09ee9292005-08-09 20:17:00 -07008037 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
8038 TG3_64BIT_REG_LOW);
Michael Chanb16250e2006-09-27 16:10:14 -07008039 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8040
8041 if ((int_mbox != 0) ||
8042 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8043 intr_ok = 1;
Michael Chan79381092005-04-21 17:13:59 -07008044 break;
Michael Chanb16250e2006-09-27 16:10:14 -07008045 }
8046
Michael Chan79381092005-04-21 17:13:59 -07008047 msleep(10);
8048 }
8049
8050 tg3_disable_ints(tp);
8051
8052 free_irq(tp->pdev->irq, dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008053
Michael Chanfcfa0a32006-03-20 22:28:41 -08008054 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07008055
8056 if (err)
8057 return err;
8058
Michael Chanb16250e2006-09-27 16:10:14 -07008059 if (intr_ok)
Michael Chan79381092005-04-21 17:13:59 -07008060 return 0;
8061
8062 return -EIO;
8063}
8064
8065/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8066 * successfully restored
8067 */
8068static int tg3_test_msi(struct tg3 *tp)
8069{
8070 struct net_device *dev = tp->dev;
8071 int err;
8072 u16 pci_cmd;
8073
8074 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8075 return 0;
8076
8077 /* Turn off SERR reporting in case MSI terminates with Master
8078 * Abort.
8079 */
8080 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8081 pci_write_config_word(tp->pdev, PCI_COMMAND,
8082 pci_cmd & ~PCI_COMMAND_SERR);
8083
8084 err = tg3_test_interrupt(tp);
8085
8086 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8087
8088 if (!err)
8089 return 0;
8090
8091 /* other failures */
8092 if (err != -EIO)
8093 return err;
8094
8095 /* MSI test failed, go back to INTx mode */
8096 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
8097 "switching to INTx mode. Please report this failure to "
8098 "the PCI maintainer and include system chipset information.\n",
8099 tp->dev->name);
8100
8101 free_irq(tp->pdev->irq, dev);
8102 pci_disable_msi(tp->pdev);
8103
8104 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8105
Michael Chanfcfa0a32006-03-20 22:28:41 -08008106 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07008107 if (err)
8108 return err;
8109
8110 /* Need to reset the chip because the MSI cycle may have terminated
8111 * with Master Abort.
8112 */
David S. Millerf47c11e2005-06-24 20:18:35 -07008113 tg3_full_lock(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07008114
Michael Chan944d9802005-05-29 14:57:48 -07008115 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07008116 err = tg3_init_hw(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07008117
David S. Millerf47c11e2005-06-24 20:18:35 -07008118 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07008119
8120 if (err)
8121 free_irq(tp->pdev->irq, dev);
8122
8123 return err;
8124}
8125
Linus Torvalds1da177e2005-04-16 15:20:36 -07008126static int tg3_open(struct net_device *dev)
8127{
8128 struct tg3 *tp = netdev_priv(dev);
8129 int err;
8130
Michael Chanc49a1562006-12-17 17:07:29 -08008131 netif_carrier_off(tp->dev);
8132
Michael Chanbc1c7562006-03-20 17:48:03 -08008133 err = tg3_set_power_state(tp, PCI_D0);
Matt Carlson2f751b62008-08-04 23:17:34 -07008134 if (err)
Michael Chanbc1c7562006-03-20 17:48:03 -08008135 return err;
Matt Carlson2f751b62008-08-04 23:17:34 -07008136
8137 tg3_full_lock(tp, 0);
Michael Chanbc1c7562006-03-20 17:48:03 -08008138
Linus Torvalds1da177e2005-04-16 15:20:36 -07008139 tg3_disable_ints(tp);
8140 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8141
David S. Millerf47c11e2005-06-24 20:18:35 -07008142 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008143
8144 /* The placement of this call is tied
8145 * to the setup and use of Host TX descriptors.
8146 */
8147 err = tg3_alloc_consistent(tp);
8148 if (err)
8149 return err;
8150
Michael Chan7544b092007-05-05 13:08:32 -07008151 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
David S. Millerfac9b832005-05-18 22:46:34 -07008152 /* All MSI supporting chips should support tagged
8153 * status. Assert that this is the case.
8154 */
8155 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8156 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8157 "Not using MSI.\n", tp->dev->name);
8158 } else if (pci_enable_msi(tp->pdev) == 0) {
Michael Chan88b06bc2005-04-21 17:13:25 -07008159 u32 msi_mode;
8160
8161 msi_mode = tr32(MSGINT_MODE);
8162 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8163 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8164 }
8165 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08008166 err = tg3_request_irq(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008167
8168 if (err) {
Michael Chan88b06bc2005-04-21 17:13:25 -07008169 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8170 pci_disable_msi(tp->pdev);
8171 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8172 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008173 tg3_free_consistent(tp);
8174 return err;
8175 }
8176
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008177 napi_enable(&tp->napi);
8178
David S. Millerf47c11e2005-06-24 20:18:35 -07008179 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008180
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07008181 err = tg3_init_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008182 if (err) {
Michael Chan944d9802005-05-29 14:57:48 -07008183 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008184 tg3_free_rings(tp);
8185 } else {
David S. Millerfac9b832005-05-18 22:46:34 -07008186 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8187 tp->timer_offset = HZ;
8188 else
8189 tp->timer_offset = HZ / 10;
8190
8191 BUG_ON(tp->timer_offset > HZ);
8192 tp->timer_counter = tp->timer_multiplier =
8193 (HZ / tp->timer_offset);
8194 tp->asf_counter = tp->asf_multiplier =
Michael Chan28fbef72005-10-26 15:48:35 -07008195 ((HZ / tp->timer_offset) * 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008196
8197 init_timer(&tp->timer);
8198 tp->timer.expires = jiffies + tp->timer_offset;
8199 tp->timer.data = (unsigned long) tp;
8200 tp->timer.function = tg3_timer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008201 }
8202
David S. Millerf47c11e2005-06-24 20:18:35 -07008203 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008204
8205 if (err) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008206 napi_disable(&tp->napi);
Michael Chan88b06bc2005-04-21 17:13:25 -07008207 free_irq(tp->pdev->irq, dev);
8208 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8209 pci_disable_msi(tp->pdev);
8210 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8211 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008212 tg3_free_consistent(tp);
8213 return err;
8214 }
8215
Michael Chan79381092005-04-21 17:13:59 -07008216 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8217 err = tg3_test_msi(tp);
David S. Millerfac9b832005-05-18 22:46:34 -07008218
Michael Chan79381092005-04-21 17:13:59 -07008219 if (err) {
David S. Millerf47c11e2005-06-24 20:18:35 -07008220 tg3_full_lock(tp, 0);
Michael Chan79381092005-04-21 17:13:59 -07008221
8222 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8223 pci_disable_msi(tp->pdev);
8224 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8225 }
Michael Chan944d9802005-05-29 14:57:48 -07008226 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan79381092005-04-21 17:13:59 -07008227 tg3_free_rings(tp);
8228 tg3_free_consistent(tp);
8229
David S. Millerf47c11e2005-06-24 20:18:35 -07008230 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07008231
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008232 napi_disable(&tp->napi);
8233
Michael Chan79381092005-04-21 17:13:59 -07008234 return err;
8235 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08008236
8237 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8238 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
Michael Chanb5d37722006-09-27 16:06:21 -07008239 u32 val = tr32(PCIE_TRANSACTION_CFG);
Michael Chanfcfa0a32006-03-20 22:28:41 -08008240
Michael Chanb5d37722006-09-27 16:06:21 -07008241 tw32(PCIE_TRANSACTION_CFG,
8242 val | PCIE_TRANS_CFG_1SHOT_MSI);
Michael Chanfcfa0a32006-03-20 22:28:41 -08008243 }
8244 }
Michael Chan79381092005-04-21 17:13:59 -07008245 }
8246
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07008247 tg3_phy_start(tp);
8248
David S. Millerf47c11e2005-06-24 20:18:35 -07008249 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008250
Michael Chan79381092005-04-21 17:13:59 -07008251 add_timer(&tp->timer);
8252 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008253 tg3_enable_ints(tp);
8254
David S. Millerf47c11e2005-06-24 20:18:35 -07008255 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008256
8257 netif_start_queue(dev);
8258
8259 return 0;
8260}
8261
8262#if 0
8263/*static*/ void tg3_dump_state(struct tg3 *tp)
8264{
8265 u32 val32, val32_2, val32_3, val32_4, val32_5;
8266 u16 val16;
8267 int i;
8268
8269 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8270 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8271 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8272 val16, val32);
8273
8274 /* MAC block */
8275 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8276 tr32(MAC_MODE), tr32(MAC_STATUS));
8277 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8278 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8279 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8280 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8281 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8282 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8283
8284 /* Send data initiator control block */
8285 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8286 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8287 printk(" SNDDATAI_STATSCTRL[%08x]\n",
8288 tr32(SNDDATAI_STATSCTRL));
8289
8290 /* Send data completion control block */
8291 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8292
8293 /* Send BD ring selector block */
8294 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8295 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8296
8297 /* Send BD initiator control block */
8298 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8299 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8300
8301 /* Send BD completion control block */
8302 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8303
8304 /* Receive list placement control block */
8305 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8306 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8307 printk(" RCVLPC_STATSCTRL[%08x]\n",
8308 tr32(RCVLPC_STATSCTRL));
8309
8310 /* Receive data and receive BD initiator control block */
8311 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8312 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8313
8314 /* Receive data completion control block */
8315 printk("DEBUG: RCVDCC_MODE[%08x]\n",
8316 tr32(RCVDCC_MODE));
8317
8318 /* Receive BD initiator control block */
8319 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8320 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8321
8322 /* Receive BD completion control block */
8323 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8324 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8325
8326 /* Receive list selector control block */
8327 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8328 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8329
8330 /* Mbuf cluster free block */
8331 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8332 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8333
8334 /* Host coalescing control block */
8335 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8336 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8337 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8338 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8339 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8340 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8341 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8342 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8343 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8344 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8345 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8346 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8347
8348 /* Memory arbiter control block */
8349 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8350 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8351
8352 /* Buffer manager control block */
8353 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8354 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8355 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8356 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8357 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8358 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8359 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8360 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8361
8362 /* Read DMA control block */
8363 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8364 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8365
8366 /* Write DMA control block */
8367 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8368 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8369
8370 /* DMA completion block */
8371 printk("DEBUG: DMAC_MODE[%08x]\n",
8372 tr32(DMAC_MODE));
8373
8374 /* GRC block */
8375 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8376 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8377 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8378 tr32(GRC_LOCAL_CTRL));
8379
8380 /* TG3_BDINFOs */
8381 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8382 tr32(RCVDBDI_JUMBO_BD + 0x0),
8383 tr32(RCVDBDI_JUMBO_BD + 0x4),
8384 tr32(RCVDBDI_JUMBO_BD + 0x8),
8385 tr32(RCVDBDI_JUMBO_BD + 0xc));
8386 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8387 tr32(RCVDBDI_STD_BD + 0x0),
8388 tr32(RCVDBDI_STD_BD + 0x4),
8389 tr32(RCVDBDI_STD_BD + 0x8),
8390 tr32(RCVDBDI_STD_BD + 0xc));
8391 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8392 tr32(RCVDBDI_MINI_BD + 0x0),
8393 tr32(RCVDBDI_MINI_BD + 0x4),
8394 tr32(RCVDBDI_MINI_BD + 0x8),
8395 tr32(RCVDBDI_MINI_BD + 0xc));
8396
8397 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8398 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8399 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8400 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8401 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8402 val32, val32_2, val32_3, val32_4);
8403
8404 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8405 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8406 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8407 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8408 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8409 val32, val32_2, val32_3, val32_4);
8410
8411 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8412 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8413 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8414 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8415 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8416 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8417 val32, val32_2, val32_3, val32_4, val32_5);
8418
8419 /* SW status block */
8420 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8421 tp->hw_status->status,
8422 tp->hw_status->status_tag,
8423 tp->hw_status->rx_jumbo_consumer,
8424 tp->hw_status->rx_consumer,
8425 tp->hw_status->rx_mini_consumer,
8426 tp->hw_status->idx[0].rx_producer,
8427 tp->hw_status->idx[0].tx_consumer);
8428
8429 /* SW statistics block */
8430 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8431 ((u32 *)tp->hw_stats)[0],
8432 ((u32 *)tp->hw_stats)[1],
8433 ((u32 *)tp->hw_stats)[2],
8434 ((u32 *)tp->hw_stats)[3]);
8435
8436 /* Mailboxes */
8437 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
Michael Chan09ee9292005-08-09 20:17:00 -07008438 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8439 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8440 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8441 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008442
8443 /* NIC side send descriptors. */
8444 for (i = 0; i < 6; i++) {
8445 unsigned long txd;
8446
8447 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8448 + (i * sizeof(struct tg3_tx_buffer_desc));
8449 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8450 i,
8451 readl(txd + 0x0), readl(txd + 0x4),
8452 readl(txd + 0x8), readl(txd + 0xc));
8453 }
8454
8455 /* NIC side RX descriptors. */
8456 for (i = 0; i < 6; i++) {
8457 unsigned long rxd;
8458
8459 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8460 + (i * sizeof(struct tg3_rx_buffer_desc));
8461 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8462 i,
8463 readl(rxd + 0x0), readl(rxd + 0x4),
8464 readl(rxd + 0x8), readl(rxd + 0xc));
8465 rxd += (4 * sizeof(u32));
8466 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8467 i,
8468 readl(rxd + 0x0), readl(rxd + 0x4),
8469 readl(rxd + 0x8), readl(rxd + 0xc));
8470 }
8471
8472 for (i = 0; i < 6; i++) {
8473 unsigned long rxd;
8474
8475 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8476 + (i * sizeof(struct tg3_rx_buffer_desc));
8477 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8478 i,
8479 readl(rxd + 0x0), readl(rxd + 0x4),
8480 readl(rxd + 0x8), readl(rxd + 0xc));
8481 rxd += (4 * sizeof(u32));
8482 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8483 i,
8484 readl(rxd + 0x0), readl(rxd + 0x4),
8485 readl(rxd + 0x8), readl(rxd + 0xc));
8486 }
8487}
8488#endif
8489
8490static struct net_device_stats *tg3_get_stats(struct net_device *);
8491static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8492
8493static int tg3_close(struct net_device *dev)
8494{
8495 struct tg3 *tp = netdev_priv(dev);
8496
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008497 napi_disable(&tp->napi);
Oleg Nesterov28e53bd2007-05-09 02:34:22 -07008498 cancel_work_sync(&tp->reset_task);
Michael Chan7faa0062006-02-02 17:29:28 -08008499
Linus Torvalds1da177e2005-04-16 15:20:36 -07008500 netif_stop_queue(dev);
8501
8502 del_timer_sync(&tp->timer);
8503
David S. Millerf47c11e2005-06-24 20:18:35 -07008504 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008505#if 0
8506 tg3_dump_state(tp);
8507#endif
8508
8509 tg3_disable_ints(tp);
8510
Michael Chan944d9802005-05-29 14:57:48 -07008511 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008512 tg3_free_rings(tp);
Michael Chan5cf64b82007-05-05 12:11:21 -07008513 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008514
David S. Millerf47c11e2005-06-24 20:18:35 -07008515 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008516
Michael Chan88b06bc2005-04-21 17:13:25 -07008517 free_irq(tp->pdev->irq, dev);
8518 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8519 pci_disable_msi(tp->pdev);
8520 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8521 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008522
8523 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8524 sizeof(tp->net_stats_prev));
8525 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8526 sizeof(tp->estats_prev));
8527
8528 tg3_free_consistent(tp);
8529
Michael Chanbc1c7562006-03-20 17:48:03 -08008530 tg3_set_power_state(tp, PCI_D3hot);
8531
8532 netif_carrier_off(tp->dev);
8533
Linus Torvalds1da177e2005-04-16 15:20:36 -07008534 return 0;
8535}
8536
8537static inline unsigned long get_stat64(tg3_stat64_t *val)
8538{
8539 unsigned long ret;
8540
8541#if (BITS_PER_LONG == 32)
8542 ret = val->low;
8543#else
8544 ret = ((u64)val->high << 32) | ((u64)val->low);
8545#endif
8546 return ret;
8547}
8548
Stefan Buehler816f8b82008-08-15 14:10:54 -07008549static inline u64 get_estat64(tg3_stat64_t *val)
8550{
8551 return ((u64)val->high << 32) | ((u64)val->low);
8552}
8553
Linus Torvalds1da177e2005-04-16 15:20:36 -07008554static unsigned long calc_crc_errors(struct tg3 *tp)
8555{
8556 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8557
8558 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8559 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8560 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008561 u32 val;
8562
David S. Millerf47c11e2005-06-24 20:18:35 -07008563 spin_lock_bh(&tp->lock);
Michael Chan569a5df2007-02-13 12:18:15 -08008564 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8565 tg3_writephy(tp, MII_TG3_TEST1,
8566 val | MII_TG3_TEST1_CRC_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008567 tg3_readphy(tp, 0x14, &val);
8568 } else
8569 val = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07008570 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008571
8572 tp->phy_crc_errors += val;
8573
8574 return tp->phy_crc_errors;
8575 }
8576
8577 return get_stat64(&hw_stats->rx_fcs_errors);
8578}
8579
8580#define ESTAT_ADD(member) \
8581 estats->member = old_estats->member + \
Stefan Buehler816f8b82008-08-15 14:10:54 -07008582 get_estat64(&hw_stats->member)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008583
8584static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8585{
8586 struct tg3_ethtool_stats *estats = &tp->estats;
8587 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8588 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8589
8590 if (!hw_stats)
8591 return old_estats;
8592
8593 ESTAT_ADD(rx_octets);
8594 ESTAT_ADD(rx_fragments);
8595 ESTAT_ADD(rx_ucast_packets);
8596 ESTAT_ADD(rx_mcast_packets);
8597 ESTAT_ADD(rx_bcast_packets);
8598 ESTAT_ADD(rx_fcs_errors);
8599 ESTAT_ADD(rx_align_errors);
8600 ESTAT_ADD(rx_xon_pause_rcvd);
8601 ESTAT_ADD(rx_xoff_pause_rcvd);
8602 ESTAT_ADD(rx_mac_ctrl_rcvd);
8603 ESTAT_ADD(rx_xoff_entered);
8604 ESTAT_ADD(rx_frame_too_long_errors);
8605 ESTAT_ADD(rx_jabbers);
8606 ESTAT_ADD(rx_undersize_packets);
8607 ESTAT_ADD(rx_in_length_errors);
8608 ESTAT_ADD(rx_out_length_errors);
8609 ESTAT_ADD(rx_64_or_less_octet_packets);
8610 ESTAT_ADD(rx_65_to_127_octet_packets);
8611 ESTAT_ADD(rx_128_to_255_octet_packets);
8612 ESTAT_ADD(rx_256_to_511_octet_packets);
8613 ESTAT_ADD(rx_512_to_1023_octet_packets);
8614 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8615 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8616 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8617 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8618 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8619
8620 ESTAT_ADD(tx_octets);
8621 ESTAT_ADD(tx_collisions);
8622 ESTAT_ADD(tx_xon_sent);
8623 ESTAT_ADD(tx_xoff_sent);
8624 ESTAT_ADD(tx_flow_control);
8625 ESTAT_ADD(tx_mac_errors);
8626 ESTAT_ADD(tx_single_collisions);
8627 ESTAT_ADD(tx_mult_collisions);
8628 ESTAT_ADD(tx_deferred);
8629 ESTAT_ADD(tx_excessive_collisions);
8630 ESTAT_ADD(tx_late_collisions);
8631 ESTAT_ADD(tx_collide_2times);
8632 ESTAT_ADD(tx_collide_3times);
8633 ESTAT_ADD(tx_collide_4times);
8634 ESTAT_ADD(tx_collide_5times);
8635 ESTAT_ADD(tx_collide_6times);
8636 ESTAT_ADD(tx_collide_7times);
8637 ESTAT_ADD(tx_collide_8times);
8638 ESTAT_ADD(tx_collide_9times);
8639 ESTAT_ADD(tx_collide_10times);
8640 ESTAT_ADD(tx_collide_11times);
8641 ESTAT_ADD(tx_collide_12times);
8642 ESTAT_ADD(tx_collide_13times);
8643 ESTAT_ADD(tx_collide_14times);
8644 ESTAT_ADD(tx_collide_15times);
8645 ESTAT_ADD(tx_ucast_packets);
8646 ESTAT_ADD(tx_mcast_packets);
8647 ESTAT_ADD(tx_bcast_packets);
8648 ESTAT_ADD(tx_carrier_sense_errors);
8649 ESTAT_ADD(tx_discards);
8650 ESTAT_ADD(tx_errors);
8651
8652 ESTAT_ADD(dma_writeq_full);
8653 ESTAT_ADD(dma_write_prioq_full);
8654 ESTAT_ADD(rxbds_empty);
8655 ESTAT_ADD(rx_discards);
8656 ESTAT_ADD(rx_errors);
8657 ESTAT_ADD(rx_threshold_hit);
8658
8659 ESTAT_ADD(dma_readq_full);
8660 ESTAT_ADD(dma_read_prioq_full);
8661 ESTAT_ADD(tx_comp_queue_full);
8662
8663 ESTAT_ADD(ring_set_send_prod_index);
8664 ESTAT_ADD(ring_status_update);
8665 ESTAT_ADD(nic_irqs);
8666 ESTAT_ADD(nic_avoided_irqs);
8667 ESTAT_ADD(nic_tx_threshold_hit);
8668
8669 return estats;
8670}
8671
8672static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8673{
8674 struct tg3 *tp = netdev_priv(dev);
8675 struct net_device_stats *stats = &tp->net_stats;
8676 struct net_device_stats *old_stats = &tp->net_stats_prev;
8677 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8678
8679 if (!hw_stats)
8680 return old_stats;
8681
8682 stats->rx_packets = old_stats->rx_packets +
8683 get_stat64(&hw_stats->rx_ucast_packets) +
8684 get_stat64(&hw_stats->rx_mcast_packets) +
8685 get_stat64(&hw_stats->rx_bcast_packets);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008686
Linus Torvalds1da177e2005-04-16 15:20:36 -07008687 stats->tx_packets = old_stats->tx_packets +
8688 get_stat64(&hw_stats->tx_ucast_packets) +
8689 get_stat64(&hw_stats->tx_mcast_packets) +
8690 get_stat64(&hw_stats->tx_bcast_packets);
8691
8692 stats->rx_bytes = old_stats->rx_bytes +
8693 get_stat64(&hw_stats->rx_octets);
8694 stats->tx_bytes = old_stats->tx_bytes +
8695 get_stat64(&hw_stats->tx_octets);
8696
8697 stats->rx_errors = old_stats->rx_errors +
John W. Linville4f63b872005-09-12 14:43:18 -07008698 get_stat64(&hw_stats->rx_errors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008699 stats->tx_errors = old_stats->tx_errors +
8700 get_stat64(&hw_stats->tx_errors) +
8701 get_stat64(&hw_stats->tx_mac_errors) +
8702 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8703 get_stat64(&hw_stats->tx_discards);
8704
8705 stats->multicast = old_stats->multicast +
8706 get_stat64(&hw_stats->rx_mcast_packets);
8707 stats->collisions = old_stats->collisions +
8708 get_stat64(&hw_stats->tx_collisions);
8709
8710 stats->rx_length_errors = old_stats->rx_length_errors +
8711 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8712 get_stat64(&hw_stats->rx_undersize_packets);
8713
8714 stats->rx_over_errors = old_stats->rx_over_errors +
8715 get_stat64(&hw_stats->rxbds_empty);
8716 stats->rx_frame_errors = old_stats->rx_frame_errors +
8717 get_stat64(&hw_stats->rx_align_errors);
8718 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8719 get_stat64(&hw_stats->tx_discards);
8720 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8721 get_stat64(&hw_stats->tx_carrier_sense_errors);
8722
8723 stats->rx_crc_errors = old_stats->rx_crc_errors +
8724 calc_crc_errors(tp);
8725
John W. Linville4f63b872005-09-12 14:43:18 -07008726 stats->rx_missed_errors = old_stats->rx_missed_errors +
8727 get_stat64(&hw_stats->rx_discards);
8728
Linus Torvalds1da177e2005-04-16 15:20:36 -07008729 return stats;
8730}
8731
8732static inline u32 calc_crc(unsigned char *buf, int len)
8733{
8734 u32 reg;
8735 u32 tmp;
8736 int j, k;
8737
8738 reg = 0xffffffff;
8739
8740 for (j = 0; j < len; j++) {
8741 reg ^= buf[j];
8742
8743 for (k = 0; k < 8; k++) {
8744 tmp = reg & 0x01;
8745
8746 reg >>= 1;
8747
8748 if (tmp) {
8749 reg ^= 0xedb88320;
8750 }
8751 }
8752 }
8753
8754 return ~reg;
8755}
8756
8757static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8758{
8759 /* accept or reject all multicast frames */
8760 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8761 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8762 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8763 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8764}
8765
8766static void __tg3_set_rx_mode(struct net_device *dev)
8767{
8768 struct tg3 *tp = netdev_priv(dev);
8769 u32 rx_mode;
8770
8771 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8772 RX_MODE_KEEP_VLAN_TAG);
8773
8774 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8775 * flag clear.
8776 */
8777#if TG3_VLAN_TAG_USED
8778 if (!tp->vlgrp &&
8779 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8780 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8781#else
8782 /* By definition, VLAN is disabled always in this
8783 * case.
8784 */
8785 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8786 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8787#endif
8788
8789 if (dev->flags & IFF_PROMISC) {
8790 /* Promiscuous mode. */
8791 rx_mode |= RX_MODE_PROMISC;
8792 } else if (dev->flags & IFF_ALLMULTI) {
8793 /* Accept all multicast. */
8794 tg3_set_multi (tp, 1);
8795 } else if (dev->mc_count < 1) {
8796 /* Reject all multicast. */
8797 tg3_set_multi (tp, 0);
8798 } else {
8799 /* Accept one or more multicast(s). */
8800 struct dev_mc_list *mclist;
8801 unsigned int i;
8802 u32 mc_filter[4] = { 0, };
8803 u32 regidx;
8804 u32 bit;
8805 u32 crc;
8806
8807 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8808 i++, mclist = mclist->next) {
8809
8810 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8811 bit = ~crc & 0x7f;
8812 regidx = (bit & 0x60) >> 5;
8813 bit &= 0x1f;
8814 mc_filter[regidx] |= (1 << bit);
8815 }
8816
8817 tw32(MAC_HASH_REG_0, mc_filter[0]);
8818 tw32(MAC_HASH_REG_1, mc_filter[1]);
8819 tw32(MAC_HASH_REG_2, mc_filter[2]);
8820 tw32(MAC_HASH_REG_3, mc_filter[3]);
8821 }
8822
8823 if (rx_mode != tp->rx_mode) {
8824 tp->rx_mode = rx_mode;
8825 tw32_f(MAC_RX_MODE, rx_mode);
8826 udelay(10);
8827 }
8828}
8829
8830static void tg3_set_rx_mode(struct net_device *dev)
8831{
8832 struct tg3 *tp = netdev_priv(dev);
8833
Michael Chane75f7c92006-03-20 21:33:26 -08008834 if (!netif_running(dev))
8835 return;
8836
David S. Millerf47c11e2005-06-24 20:18:35 -07008837 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008838 __tg3_set_rx_mode(dev);
David S. Millerf47c11e2005-06-24 20:18:35 -07008839 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008840}
8841
8842#define TG3_REGDUMP_LEN (32 * 1024)
8843
8844static int tg3_get_regs_len(struct net_device *dev)
8845{
8846 return TG3_REGDUMP_LEN;
8847}
8848
8849static void tg3_get_regs(struct net_device *dev,
8850 struct ethtool_regs *regs, void *_p)
8851{
8852 u32 *p = _p;
8853 struct tg3 *tp = netdev_priv(dev);
8854 u8 *orig_p = _p;
8855 int i;
8856
8857 regs->version = 0;
8858
8859 memset(p, 0, TG3_REGDUMP_LEN);
8860
Michael Chanbc1c7562006-03-20 17:48:03 -08008861 if (tp->link_config.phy_is_low_power)
8862 return;
8863
David S. Millerf47c11e2005-06-24 20:18:35 -07008864 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008865
8866#define __GET_REG32(reg) (*(p)++ = tr32(reg))
8867#define GET_REG32_LOOP(base,len) \
8868do { p = (u32 *)(orig_p + (base)); \
8869 for (i = 0; i < len; i += 4) \
8870 __GET_REG32((base) + i); \
8871} while (0)
8872#define GET_REG32_1(reg) \
8873do { p = (u32 *)(orig_p + (reg)); \
8874 __GET_REG32((reg)); \
8875} while (0)
8876
8877 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8878 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8879 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8880 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8881 GET_REG32_1(SNDDATAC_MODE);
8882 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8883 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8884 GET_REG32_1(SNDBDC_MODE);
8885 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8886 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8887 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8888 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8889 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8890 GET_REG32_1(RCVDCC_MODE);
8891 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8892 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8893 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8894 GET_REG32_1(MBFREE_MODE);
8895 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8896 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8897 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8898 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8899 GET_REG32_LOOP(WDMAC_MODE, 0x08);
Chris Elmquist091465d2005-12-20 13:25:19 -08008900 GET_REG32_1(RX_CPU_MODE);
8901 GET_REG32_1(RX_CPU_STATE);
8902 GET_REG32_1(RX_CPU_PGMCTR);
8903 GET_REG32_1(RX_CPU_HWBKPT);
8904 GET_REG32_1(TX_CPU_MODE);
8905 GET_REG32_1(TX_CPU_STATE);
8906 GET_REG32_1(TX_CPU_PGMCTR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008907 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8908 GET_REG32_LOOP(FTQ_RESET, 0x120);
8909 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8910 GET_REG32_1(DMAC_MODE);
8911 GET_REG32_LOOP(GRC_MODE, 0x4c);
8912 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8913 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8914
8915#undef __GET_REG32
8916#undef GET_REG32_LOOP
8917#undef GET_REG32_1
8918
David S. Millerf47c11e2005-06-24 20:18:35 -07008919 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008920}
8921
8922static int tg3_get_eeprom_len(struct net_device *dev)
8923{
8924 struct tg3 *tp = netdev_priv(dev);
8925
8926 return tp->nvram_size;
8927}
8928
8929static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
Al Virob9fc7dc2007-12-17 22:59:57 -08008930static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
Michael Chan18201802006-03-20 22:29:15 -08008931static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008932
8933static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8934{
8935 struct tg3 *tp = netdev_priv(dev);
8936 int ret;
8937 u8 *pd;
Al Virob9fc7dc2007-12-17 22:59:57 -08008938 u32 i, offset, len, b_offset, b_count;
8939 __le32 val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008940
Michael Chanbc1c7562006-03-20 17:48:03 -08008941 if (tp->link_config.phy_is_low_power)
8942 return -EAGAIN;
8943
Linus Torvalds1da177e2005-04-16 15:20:36 -07008944 offset = eeprom->offset;
8945 len = eeprom->len;
8946 eeprom->len = 0;
8947
8948 eeprom->magic = TG3_EEPROM_MAGIC;
8949
8950 if (offset & 3) {
8951 /* adjustments to start on required 4 byte boundary */
8952 b_offset = offset & 3;
8953 b_count = 4 - b_offset;
8954 if (b_count > len) {
8955 /* i.e. offset=1 len=2 */
8956 b_count = len;
8957 }
Al Virob9fc7dc2007-12-17 22:59:57 -08008958 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008959 if (ret)
8960 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008961 memcpy(data, ((char*)&val) + b_offset, b_count);
8962 len -= b_count;
8963 offset += b_count;
8964 eeprom->len += b_count;
8965 }
8966
8967 /* read bytes upto the last 4 byte boundary */
8968 pd = &data[eeprom->len];
8969 for (i = 0; i < (len - (len & 3)); i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -08008970 ret = tg3_nvram_read_le(tp, offset + i, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008971 if (ret) {
8972 eeprom->len += i;
8973 return ret;
8974 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008975 memcpy(pd + i, &val, 4);
8976 }
8977 eeprom->len += i;
8978
8979 if (len & 3) {
8980 /* read last bytes not ending on 4 byte boundary */
8981 pd = &data[eeprom->len];
8982 b_count = len & 3;
8983 b_offset = offset + len - b_count;
Al Virob9fc7dc2007-12-17 22:59:57 -08008984 ret = tg3_nvram_read_le(tp, b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008985 if (ret)
8986 return ret;
Al Virob9fc7dc2007-12-17 22:59:57 -08008987 memcpy(pd, &val, b_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008988 eeprom->len += b_count;
8989 }
8990 return 0;
8991}
8992
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008993static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008994
8995static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8996{
8997 struct tg3 *tp = netdev_priv(dev);
8998 int ret;
Al Virob9fc7dc2007-12-17 22:59:57 -08008999 u32 offset, len, b_offset, odd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009000 u8 *buf;
Al Virob9fc7dc2007-12-17 22:59:57 -08009001 __le32 start, end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009002
Michael Chanbc1c7562006-03-20 17:48:03 -08009003 if (tp->link_config.phy_is_low_power)
9004 return -EAGAIN;
9005
Linus Torvalds1da177e2005-04-16 15:20:36 -07009006 if (eeprom->magic != TG3_EEPROM_MAGIC)
9007 return -EINVAL;
9008
9009 offset = eeprom->offset;
9010 len = eeprom->len;
9011
9012 if ((b_offset = (offset & 3))) {
9013 /* adjustments to start on required 4 byte boundary */
Al Virob9fc7dc2007-12-17 22:59:57 -08009014 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009015 if (ret)
9016 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009017 len += b_offset;
9018 offset &= ~3;
Michael Chan1c8594b2005-04-21 17:12:46 -07009019 if (len < 4)
9020 len = 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009021 }
9022
9023 odd_len = 0;
Michael Chan1c8594b2005-04-21 17:12:46 -07009024 if (len & 3) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009025 /* adjustments to end on required 4 byte boundary */
9026 odd_len = 1;
9027 len = (len + 3) & ~3;
Al Virob9fc7dc2007-12-17 22:59:57 -08009028 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009029 if (ret)
9030 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009031 }
9032
9033 buf = data;
9034 if (b_offset || odd_len) {
9035 buf = kmalloc(len, GFP_KERNEL);
Andy Gospodarekab0049b2007-09-06 20:42:14 +01009036 if (!buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009037 return -ENOMEM;
9038 if (b_offset)
9039 memcpy(buf, &start, 4);
9040 if (odd_len)
9041 memcpy(buf+len-4, &end, 4);
9042 memcpy(buf + b_offset, data, eeprom->len);
9043 }
9044
9045 ret = tg3_nvram_write_block(tp, offset, len, buf);
9046
9047 if (buf != data)
9048 kfree(buf);
9049
9050 return ret;
9051}
9052
9053static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9054{
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009055 struct tg3 *tp = netdev_priv(dev);
9056
9057 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9058 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9059 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009060 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009061 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009062
Linus Torvalds1da177e2005-04-16 15:20:36 -07009063 cmd->supported = (SUPPORTED_Autoneg);
9064
9065 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9066 cmd->supported |= (SUPPORTED_1000baseT_Half |
9067 SUPPORTED_1000baseT_Full);
9068
Karsten Keilef348142006-05-12 12:49:08 -07009069 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009070 cmd->supported |= (SUPPORTED_100baseT_Half |
9071 SUPPORTED_100baseT_Full |
9072 SUPPORTED_10baseT_Half |
9073 SUPPORTED_10baseT_Full |
Matt Carlson3bebab52007-11-12 21:22:40 -08009074 SUPPORTED_TP);
Karsten Keilef348142006-05-12 12:49:08 -07009075 cmd->port = PORT_TP;
9076 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009077 cmd->supported |= SUPPORTED_FIBRE;
Karsten Keilef348142006-05-12 12:49:08 -07009078 cmd->port = PORT_FIBRE;
9079 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009080
Linus Torvalds1da177e2005-04-16 15:20:36 -07009081 cmd->advertising = tp->link_config.advertising;
9082 if (netif_running(dev)) {
9083 cmd->speed = tp->link_config.active_speed;
9084 cmd->duplex = tp->link_config.active_duplex;
9085 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009086 cmd->phy_address = PHY_ADDR;
9087 cmd->transceiver = 0;
9088 cmd->autoneg = tp->link_config.autoneg;
9089 cmd->maxtxpkt = 0;
9090 cmd->maxrxpkt = 0;
9091 return 0;
9092}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009093
Linus Torvalds1da177e2005-04-16 15:20:36 -07009094static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9095{
9096 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009097
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009098 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9099 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9100 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009101 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009102 }
9103
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009104 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009105 /* These are the only valid advertisement bits allowed. */
9106 if (cmd->autoneg == AUTONEG_ENABLE &&
9107 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
9108 ADVERTISED_1000baseT_Full |
9109 ADVERTISED_Autoneg |
9110 ADVERTISED_FIBRE)))
9111 return -EINVAL;
Michael Chan37ff2382005-10-26 15:49:51 -07009112 /* Fiber can only do SPEED_1000. */
9113 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9114 (cmd->speed != SPEED_1000))
9115 return -EINVAL;
9116 /* Copper cannot force SPEED_1000. */
9117 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9118 (cmd->speed == SPEED_1000))
9119 return -EINVAL;
9120 else if ((cmd->speed == SPEED_1000) &&
Matt Carlson0ba11fb2008-06-09 15:40:26 -07009121 (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
Michael Chan37ff2382005-10-26 15:49:51 -07009122 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009123
David S. Millerf47c11e2005-06-24 20:18:35 -07009124 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009125
9126 tp->link_config.autoneg = cmd->autoneg;
9127 if (cmd->autoneg == AUTONEG_ENABLE) {
Andy Gospodarek405d8e52007-10-08 01:08:47 -07009128 tp->link_config.advertising = (cmd->advertising |
9129 ADVERTISED_Autoneg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009130 tp->link_config.speed = SPEED_INVALID;
9131 tp->link_config.duplex = DUPLEX_INVALID;
9132 } else {
9133 tp->link_config.advertising = 0;
9134 tp->link_config.speed = cmd->speed;
9135 tp->link_config.duplex = cmd->duplex;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009136 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009137
Michael Chan24fcad62006-12-17 17:06:46 -08009138 tp->link_config.orig_speed = tp->link_config.speed;
9139 tp->link_config.orig_duplex = tp->link_config.duplex;
9140 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9141
Linus Torvalds1da177e2005-04-16 15:20:36 -07009142 if (netif_running(dev))
9143 tg3_setup_phy(tp, 1);
9144
David S. Millerf47c11e2005-06-24 20:18:35 -07009145 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009146
Linus Torvalds1da177e2005-04-16 15:20:36 -07009147 return 0;
9148}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009149
Linus Torvalds1da177e2005-04-16 15:20:36 -07009150static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9151{
9152 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009153
Linus Torvalds1da177e2005-04-16 15:20:36 -07009154 strcpy(info->driver, DRV_MODULE_NAME);
9155 strcpy(info->version, DRV_MODULE_VERSION);
Michael Chanc4e65752006-03-20 22:29:32 -08009156 strcpy(info->fw_version, tp->fw_ver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009157 strcpy(info->bus_info, pci_name(tp->pdev));
9158}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009159
Linus Torvalds1da177e2005-04-16 15:20:36 -07009160static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9161{
9162 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009163
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009164 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9165 device_can_wakeup(&tp->pdev->dev))
Gary Zambranoa85feb82007-05-05 11:52:19 -07009166 wol->supported = WAKE_MAGIC;
9167 else
9168 wol->supported = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009169 wol->wolopts = 0;
Matt Carlson05ac4cb2008-11-03 16:53:46 -08009170 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9171 device_can_wakeup(&tp->pdev->dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009172 wol->wolopts = WAKE_MAGIC;
9173 memset(&wol->sopass, 0, sizeof(wol->sopass));
9174}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009175
Linus Torvalds1da177e2005-04-16 15:20:36 -07009176static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9177{
9178 struct tg3 *tp = netdev_priv(dev);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009179 struct device *dp = &tp->pdev->dev;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009180
Linus Torvalds1da177e2005-04-16 15:20:36 -07009181 if (wol->wolopts & ~WAKE_MAGIC)
9182 return -EINVAL;
9183 if ((wol->wolopts & WAKE_MAGIC) &&
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009184 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009185 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009186
David S. Millerf47c11e2005-06-24 20:18:35 -07009187 spin_lock_bh(&tp->lock);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009188 if (wol->wolopts & WAKE_MAGIC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009189 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009190 device_set_wakeup_enable(dp, true);
9191 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009192 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009193 device_set_wakeup_enable(dp, false);
9194 }
David S. Millerf47c11e2005-06-24 20:18:35 -07009195 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009196
Linus Torvalds1da177e2005-04-16 15:20:36 -07009197 return 0;
9198}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009199
Linus Torvalds1da177e2005-04-16 15:20:36 -07009200static u32 tg3_get_msglevel(struct net_device *dev)
9201{
9202 struct tg3 *tp = netdev_priv(dev);
9203 return tp->msg_enable;
9204}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009205
Linus Torvalds1da177e2005-04-16 15:20:36 -07009206static void tg3_set_msglevel(struct net_device *dev, u32 value)
9207{
9208 struct tg3 *tp = netdev_priv(dev);
9209 tp->msg_enable = value;
9210}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009211
Linus Torvalds1da177e2005-04-16 15:20:36 -07009212static int tg3_set_tso(struct net_device *dev, u32 value)
9213{
9214 struct tg3 *tp = netdev_priv(dev);
9215
9216 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9217 if (value)
9218 return -EINVAL;
9219 return 0;
9220 }
Michael Chanb5d37722006-09-27 16:06:21 -07009221 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
9222 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
Matt Carlson9936bcf2007-10-10 18:03:07 -07009223 if (value) {
Michael Chanb0026622006-07-03 19:42:14 -07009224 dev->features |= NETIF_F_TSO6;
Matt Carlson57e69832008-05-25 23:48:31 -07009225 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9226 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9227 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9228 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlson9936bcf2007-10-10 18:03:07 -07009229 dev->features |= NETIF_F_TSO_ECN;
9230 } else
9231 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
Michael Chanb0026622006-07-03 19:42:14 -07009232 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009233 return ethtool_op_set_tso(dev, value);
9234}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009235
Linus Torvalds1da177e2005-04-16 15:20:36 -07009236static int tg3_nway_reset(struct net_device *dev)
9237{
9238 struct tg3 *tp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009239 int r;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009240
Linus Torvalds1da177e2005-04-16 15:20:36 -07009241 if (!netif_running(dev))
9242 return -EAGAIN;
9243
Michael Chanc94e3942005-09-27 12:12:42 -07009244 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9245 return -EINVAL;
9246
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009247 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9248 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9249 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009250 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009251 } else {
9252 u32 bmcr;
9253
9254 spin_lock_bh(&tp->lock);
9255 r = -EINVAL;
9256 tg3_readphy(tp, MII_BMCR, &bmcr);
9257 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9258 ((bmcr & BMCR_ANENABLE) ||
9259 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9260 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9261 BMCR_ANENABLE);
9262 r = 0;
9263 }
9264 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009265 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009266
Linus Torvalds1da177e2005-04-16 15:20:36 -07009267 return r;
9268}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009269
Linus Torvalds1da177e2005-04-16 15:20:36 -07009270static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9271{
9272 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009273
Linus Torvalds1da177e2005-04-16 15:20:36 -07009274 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9275 ering->rx_mini_max_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08009276 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9277 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9278 else
9279 ering->rx_jumbo_max_pending = 0;
9280
9281 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009282
9283 ering->rx_pending = tp->rx_pending;
9284 ering->rx_mini_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08009285 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9286 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9287 else
9288 ering->rx_jumbo_pending = 0;
9289
Linus Torvalds1da177e2005-04-16 15:20:36 -07009290 ering->tx_pending = tp->tx_pending;
9291}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009292
Linus Torvalds1da177e2005-04-16 15:20:36 -07009293static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9294{
9295 struct tg3 *tp = netdev_priv(dev);
Michael Chanb9ec6c12006-07-25 16:37:27 -07009296 int irq_sync = 0, err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009297
Linus Torvalds1da177e2005-04-16 15:20:36 -07009298 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9299 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
Michael Chanbc3a9252006-10-18 20:55:18 -07009300 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9301 (ering->tx_pending <= MAX_SKB_FRAGS) ||
Michael Chan7f62ad52007-02-20 23:25:40 -08009302 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
Michael Chanbc3a9252006-10-18 20:55:18 -07009303 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009304 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009305
Michael Chanbbe832c2005-06-24 20:20:04 -07009306 if (netif_running(dev)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009307 tg3_phy_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009308 tg3_netif_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07009309 irq_sync = 1;
9310 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009311
Michael Chanbbe832c2005-06-24 20:20:04 -07009312 tg3_full_lock(tp, irq_sync);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009313
Linus Torvalds1da177e2005-04-16 15:20:36 -07009314 tp->rx_pending = ering->rx_pending;
9315
9316 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9317 tp->rx_pending > 63)
9318 tp->rx_pending = 63;
9319 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9320 tp->tx_pending = ering->tx_pending;
9321
9322 if (netif_running(dev)) {
Michael Chan944d9802005-05-29 14:57:48 -07009323 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chanb9ec6c12006-07-25 16:37:27 -07009324 err = tg3_restart_hw(tp, 1);
9325 if (!err)
9326 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009327 }
9328
David S. Millerf47c11e2005-06-24 20:18:35 -07009329 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009330
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009331 if (irq_sync && !err)
9332 tg3_phy_start(tp);
9333
Michael Chanb9ec6c12006-07-25 16:37:27 -07009334 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009335}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009336
Linus Torvalds1da177e2005-04-16 15:20:36 -07009337static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9338{
9339 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009340
Linus Torvalds1da177e2005-04-16 15:20:36 -07009341 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
Matt Carlson8d018622007-12-20 20:05:44 -08009342
9343 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
9344 epause->rx_pause = 1;
9345 else
9346 epause->rx_pause = 0;
9347
9348 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
9349 epause->tx_pause = 1;
9350 else
9351 epause->tx_pause = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009352}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009353
Linus Torvalds1da177e2005-04-16 15:20:36 -07009354static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9355{
9356 struct tg3 *tp = netdev_priv(dev);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009357 int err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009358
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009359 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9360 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9361 return -EAGAIN;
9362
9363 if (epause->autoneg) {
9364 u32 newadv;
9365 struct phy_device *phydev;
9366
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009367 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009368
9369 if (epause->rx_pause) {
9370 if (epause->tx_pause)
9371 newadv = ADVERTISED_Pause;
9372 else
9373 newadv = ADVERTISED_Pause |
9374 ADVERTISED_Asym_Pause;
9375 } else if (epause->tx_pause) {
9376 newadv = ADVERTISED_Asym_Pause;
9377 } else
9378 newadv = 0;
9379
9380 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9381 u32 oldadv = phydev->advertising &
9382 (ADVERTISED_Pause |
9383 ADVERTISED_Asym_Pause);
9384 if (oldadv != newadv) {
9385 phydev->advertising &=
9386 ~(ADVERTISED_Pause |
9387 ADVERTISED_Asym_Pause);
9388 phydev->advertising |= newadv;
9389 err = phy_start_aneg(phydev);
9390 }
9391 } else {
9392 tp->link_config.advertising &=
9393 ~(ADVERTISED_Pause |
9394 ADVERTISED_Asym_Pause);
9395 tp->link_config.advertising |= newadv;
9396 }
9397 } else {
9398 if (epause->rx_pause)
9399 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9400 else
9401 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9402
9403 if (epause->tx_pause)
9404 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9405 else
9406 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9407
9408 if (netif_running(dev))
9409 tg3_setup_flow_control(tp, 0, 0);
9410 }
9411 } else {
9412 int irq_sync = 0;
9413
9414 if (netif_running(dev)) {
9415 tg3_netif_stop(tp);
9416 irq_sync = 1;
9417 }
9418
9419 tg3_full_lock(tp, irq_sync);
9420
9421 if (epause->autoneg)
9422 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9423 else
9424 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9425 if (epause->rx_pause)
9426 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9427 else
9428 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9429 if (epause->tx_pause)
9430 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9431 else
9432 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9433
9434 if (netif_running(dev)) {
9435 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9436 err = tg3_restart_hw(tp, 1);
9437 if (!err)
9438 tg3_netif_start(tp);
9439 }
9440
9441 tg3_full_unlock(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07009442 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009443
Michael Chanb9ec6c12006-07-25 16:37:27 -07009444 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009445}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009446
Linus Torvalds1da177e2005-04-16 15:20:36 -07009447static u32 tg3_get_rx_csum(struct net_device *dev)
9448{
9449 struct tg3 *tp = netdev_priv(dev);
9450 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9451}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009452
Linus Torvalds1da177e2005-04-16 15:20:36 -07009453static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9454{
9455 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009456
Linus Torvalds1da177e2005-04-16 15:20:36 -07009457 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9458 if (data != 0)
9459 return -EINVAL;
9460 return 0;
9461 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009462
David S. Millerf47c11e2005-06-24 20:18:35 -07009463 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009464 if (data)
9465 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9466 else
9467 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
David S. Millerf47c11e2005-06-24 20:18:35 -07009468 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009469
Linus Torvalds1da177e2005-04-16 15:20:36 -07009470 return 0;
9471}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009472
Linus Torvalds1da177e2005-04-16 15:20:36 -07009473static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9474{
9475 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009476
Linus Torvalds1da177e2005-04-16 15:20:36 -07009477 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9478 if (data != 0)
9479 return -EINVAL;
9480 return 0;
9481 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009482
Michael Chanaf36e6b2006-03-23 01:28:06 -08009483 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07009484 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07009485 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -07009486 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9487 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan6460d942007-07-14 19:07:52 -07009488 ethtool_op_set_tx_ipv6_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009489 else
Michael Chan9c27dbd2006-03-20 22:28:27 -08009490 ethtool_op_set_tx_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009491
9492 return 0;
9493}
9494
Jeff Garzikb9f2c042007-10-03 18:07:32 -07009495static int tg3_get_sset_count (struct net_device *dev, int sset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009496{
Jeff Garzikb9f2c042007-10-03 18:07:32 -07009497 switch (sset) {
9498 case ETH_SS_TEST:
9499 return TG3_NUM_TEST;
9500 case ETH_SS_STATS:
9501 return TG3_NUM_STATS;
9502 default:
9503 return -EOPNOTSUPP;
9504 }
Michael Chan4cafd3f2005-05-29 14:56:34 -07009505}
9506
Linus Torvalds1da177e2005-04-16 15:20:36 -07009507static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9508{
9509 switch (stringset) {
9510 case ETH_SS_STATS:
9511 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9512 break;
Michael Chan4cafd3f2005-05-29 14:56:34 -07009513 case ETH_SS_TEST:
9514 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9515 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009516 default:
9517 WARN_ON(1); /* we need a WARN() */
9518 break;
9519 }
9520}
9521
Michael Chan4009a932005-09-05 17:52:54 -07009522static int tg3_phys_id(struct net_device *dev, u32 data)
9523{
9524 struct tg3 *tp = netdev_priv(dev);
9525 int i;
9526
9527 if (!netif_running(tp->dev))
9528 return -EAGAIN;
9529
9530 if (data == 0)
Stephen Hemminger759afc32008-02-23 19:51:59 -08009531 data = UINT_MAX / 2;
Michael Chan4009a932005-09-05 17:52:54 -07009532
9533 for (i = 0; i < (data * 2); i++) {
9534 if ((i % 2) == 0)
9535 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9536 LED_CTRL_1000MBPS_ON |
9537 LED_CTRL_100MBPS_ON |
9538 LED_CTRL_10MBPS_ON |
9539 LED_CTRL_TRAFFIC_OVERRIDE |
9540 LED_CTRL_TRAFFIC_BLINK |
9541 LED_CTRL_TRAFFIC_LED);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009542
Michael Chan4009a932005-09-05 17:52:54 -07009543 else
9544 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9545 LED_CTRL_TRAFFIC_OVERRIDE);
9546
9547 if (msleep_interruptible(500))
9548 break;
9549 }
9550 tw32(MAC_LED_CTRL, tp->led_ctrl);
9551 return 0;
9552}
9553
Linus Torvalds1da177e2005-04-16 15:20:36 -07009554static void tg3_get_ethtool_stats (struct net_device *dev,
9555 struct ethtool_stats *estats, u64 *tmp_stats)
9556{
9557 struct tg3 *tp = netdev_priv(dev);
9558 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9559}
9560
Michael Chan566f86a2005-05-29 14:56:58 -07009561#define NVRAM_TEST_SIZE 0x100
Matt Carlsona5767de2007-11-12 21:10:58 -08009562#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
9563#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
9564#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
Michael Chanb16250e2006-09-27 16:10:14 -07009565#define NVRAM_SELFBOOT_HW_SIZE 0x20
9566#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
Michael Chan566f86a2005-05-29 14:56:58 -07009567
9568static int tg3_test_nvram(struct tg3 *tp)
9569{
Al Virob9fc7dc2007-12-17 22:59:57 -08009570 u32 csum, magic;
9571 __le32 *buf;
Andy Gospodarekab0049b2007-09-06 20:42:14 +01009572 int i, j, k, err = 0, size;
Michael Chan566f86a2005-05-29 14:56:58 -07009573
Michael Chan18201802006-03-20 22:29:15 -08009574 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Michael Chan1b277772006-03-20 22:27:48 -08009575 return -EIO;
9576
Michael Chan1b277772006-03-20 22:27:48 -08009577 if (magic == TG3_EEPROM_MAGIC)
9578 size = NVRAM_TEST_SIZE;
Michael Chanb16250e2006-09-27 16:10:14 -07009579 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
Matt Carlsona5767de2007-11-12 21:10:58 -08009580 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9581 TG3_EEPROM_SB_FORMAT_1) {
9582 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9583 case TG3_EEPROM_SB_REVISION_0:
9584 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9585 break;
9586 case TG3_EEPROM_SB_REVISION_2:
9587 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9588 break;
9589 case TG3_EEPROM_SB_REVISION_3:
9590 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9591 break;
9592 default:
9593 return 0;
9594 }
9595 } else
Michael Chan1b277772006-03-20 22:27:48 -08009596 return 0;
Michael Chanb16250e2006-09-27 16:10:14 -07009597 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9598 size = NVRAM_SELFBOOT_HW_SIZE;
9599 else
Michael Chan1b277772006-03-20 22:27:48 -08009600 return -EIO;
9601
9602 buf = kmalloc(size, GFP_KERNEL);
Michael Chan566f86a2005-05-29 14:56:58 -07009603 if (buf == NULL)
9604 return -ENOMEM;
9605
Michael Chan1b277772006-03-20 22:27:48 -08009606 err = -EIO;
9607 for (i = 0, j = 0; i < size; i += 4, j++) {
Al Virob9fc7dc2007-12-17 22:59:57 -08009608 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
Michael Chan566f86a2005-05-29 14:56:58 -07009609 break;
Michael Chan566f86a2005-05-29 14:56:58 -07009610 }
Michael Chan1b277772006-03-20 22:27:48 -08009611 if (i < size)
Michael Chan566f86a2005-05-29 14:56:58 -07009612 goto out;
9613
Michael Chan1b277772006-03-20 22:27:48 -08009614 /* Selfboot format */
Al Virob9fc7dc2007-12-17 22:59:57 -08009615 magic = swab32(le32_to_cpu(buf[0]));
9616 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -07009617 TG3_EEPROM_MAGIC_FW) {
Michael Chan1b277772006-03-20 22:27:48 -08009618 u8 *buf8 = (u8 *) buf, csum8 = 0;
9619
Al Virob9fc7dc2007-12-17 22:59:57 -08009620 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
Matt Carlsona5767de2007-11-12 21:10:58 -08009621 TG3_EEPROM_SB_REVISION_2) {
9622 /* For rev 2, the csum doesn't include the MBA. */
9623 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9624 csum8 += buf8[i];
9625 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9626 csum8 += buf8[i];
9627 } else {
9628 for (i = 0; i < size; i++)
9629 csum8 += buf8[i];
9630 }
Michael Chan1b277772006-03-20 22:27:48 -08009631
Adrian Bunkad96b482006-04-05 22:21:04 -07009632 if (csum8 == 0) {
9633 err = 0;
9634 goto out;
9635 }
9636
9637 err = -EIO;
9638 goto out;
Michael Chan1b277772006-03-20 22:27:48 -08009639 }
Michael Chan566f86a2005-05-29 14:56:58 -07009640
Al Virob9fc7dc2007-12-17 22:59:57 -08009641 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -07009642 TG3_EEPROM_MAGIC_HW) {
9643 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9644 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9645 u8 *buf8 = (u8 *) buf;
Michael Chanb16250e2006-09-27 16:10:14 -07009646
9647 /* Separate the parity bits and the data bytes. */
9648 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9649 if ((i == 0) || (i == 8)) {
9650 int l;
9651 u8 msk;
9652
9653 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9654 parity[k++] = buf8[i] & msk;
9655 i++;
9656 }
9657 else if (i == 16) {
9658 int l;
9659 u8 msk;
9660
9661 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9662 parity[k++] = buf8[i] & msk;
9663 i++;
9664
9665 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9666 parity[k++] = buf8[i] & msk;
9667 i++;
9668 }
9669 data[j++] = buf8[i];
9670 }
9671
9672 err = -EIO;
9673 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9674 u8 hw8 = hweight8(data[i]);
9675
9676 if ((hw8 & 0x1) && parity[i])
9677 goto out;
9678 else if (!(hw8 & 0x1) && !parity[i])
9679 goto out;
9680 }
9681 err = 0;
9682 goto out;
9683 }
9684
Michael Chan566f86a2005-05-29 14:56:58 -07009685 /* Bootstrap checksum at offset 0x10 */
9686 csum = calc_crc((unsigned char *) buf, 0x10);
Al Virob9fc7dc2007-12-17 22:59:57 -08009687 if(csum != le32_to_cpu(buf[0x10/4]))
Michael Chan566f86a2005-05-29 14:56:58 -07009688 goto out;
9689
9690 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9691 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
Al Virob9fc7dc2007-12-17 22:59:57 -08009692 if (csum != le32_to_cpu(buf[0xfc/4]))
Michael Chan566f86a2005-05-29 14:56:58 -07009693 goto out;
9694
9695 err = 0;
9696
9697out:
9698 kfree(buf);
9699 return err;
9700}
9701
Michael Chanca430072005-05-29 14:57:23 -07009702#define TG3_SERDES_TIMEOUT_SEC 2
9703#define TG3_COPPER_TIMEOUT_SEC 6
9704
9705static int tg3_test_link(struct tg3 *tp)
9706{
9707 int i, max;
9708
9709 if (!netif_running(tp->dev))
9710 return -ENODEV;
9711
Michael Chan4c987482005-09-05 17:52:38 -07009712 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Michael Chanca430072005-05-29 14:57:23 -07009713 max = TG3_SERDES_TIMEOUT_SEC;
9714 else
9715 max = TG3_COPPER_TIMEOUT_SEC;
9716
9717 for (i = 0; i < max; i++) {
9718 if (netif_carrier_ok(tp->dev))
9719 return 0;
9720
9721 if (msleep_interruptible(1000))
9722 break;
9723 }
9724
9725 return -EIO;
9726}
9727
Michael Chana71116d2005-05-29 14:58:11 -07009728/* Only test the commonly used registers */
David S. Miller30ca3e32006-03-20 23:02:36 -08009729static int tg3_test_registers(struct tg3 *tp)
Michael Chana71116d2005-05-29 14:58:11 -07009730{
Michael Chanb16250e2006-09-27 16:10:14 -07009731 int i, is_5705, is_5750;
Michael Chana71116d2005-05-29 14:58:11 -07009732 u32 offset, read_mask, write_mask, val, save_val, read_val;
9733 static struct {
9734 u16 offset;
9735 u16 flags;
9736#define TG3_FL_5705 0x1
9737#define TG3_FL_NOT_5705 0x2
9738#define TG3_FL_NOT_5788 0x4
Michael Chanb16250e2006-09-27 16:10:14 -07009739#define TG3_FL_NOT_5750 0x8
Michael Chana71116d2005-05-29 14:58:11 -07009740 u32 read_mask;
9741 u32 write_mask;
9742 } reg_tbl[] = {
9743 /* MAC Control Registers */
9744 { MAC_MODE, TG3_FL_NOT_5705,
9745 0x00000000, 0x00ef6f8c },
9746 { MAC_MODE, TG3_FL_5705,
9747 0x00000000, 0x01ef6b8c },
9748 { MAC_STATUS, TG3_FL_NOT_5705,
9749 0x03800107, 0x00000000 },
9750 { MAC_STATUS, TG3_FL_5705,
9751 0x03800100, 0x00000000 },
9752 { MAC_ADDR_0_HIGH, 0x0000,
9753 0x00000000, 0x0000ffff },
9754 { MAC_ADDR_0_LOW, 0x0000,
9755 0x00000000, 0xffffffff },
9756 { MAC_RX_MTU_SIZE, 0x0000,
9757 0x00000000, 0x0000ffff },
9758 { MAC_TX_MODE, 0x0000,
9759 0x00000000, 0x00000070 },
9760 { MAC_TX_LENGTHS, 0x0000,
9761 0x00000000, 0x00003fff },
9762 { MAC_RX_MODE, TG3_FL_NOT_5705,
9763 0x00000000, 0x000007fc },
9764 { MAC_RX_MODE, TG3_FL_5705,
9765 0x00000000, 0x000007dc },
9766 { MAC_HASH_REG_0, 0x0000,
9767 0x00000000, 0xffffffff },
9768 { MAC_HASH_REG_1, 0x0000,
9769 0x00000000, 0xffffffff },
9770 { MAC_HASH_REG_2, 0x0000,
9771 0x00000000, 0xffffffff },
9772 { MAC_HASH_REG_3, 0x0000,
9773 0x00000000, 0xffffffff },
9774
9775 /* Receive Data and Receive BD Initiator Control Registers. */
9776 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9777 0x00000000, 0xffffffff },
9778 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9779 0x00000000, 0xffffffff },
9780 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9781 0x00000000, 0x00000003 },
9782 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9783 0x00000000, 0xffffffff },
9784 { RCVDBDI_STD_BD+0, 0x0000,
9785 0x00000000, 0xffffffff },
9786 { RCVDBDI_STD_BD+4, 0x0000,
9787 0x00000000, 0xffffffff },
9788 { RCVDBDI_STD_BD+8, 0x0000,
9789 0x00000000, 0xffff0002 },
9790 { RCVDBDI_STD_BD+0xc, 0x0000,
9791 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009792
Michael Chana71116d2005-05-29 14:58:11 -07009793 /* Receive BD Initiator Control Registers. */
9794 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9795 0x00000000, 0xffffffff },
9796 { RCVBDI_STD_THRESH, TG3_FL_5705,
9797 0x00000000, 0x000003ff },
9798 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9799 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009800
Michael Chana71116d2005-05-29 14:58:11 -07009801 /* Host Coalescing Control Registers. */
9802 { HOSTCC_MODE, TG3_FL_NOT_5705,
9803 0x00000000, 0x00000004 },
9804 { HOSTCC_MODE, TG3_FL_5705,
9805 0x00000000, 0x000000f6 },
9806 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9807 0x00000000, 0xffffffff },
9808 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9809 0x00000000, 0x000003ff },
9810 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9811 0x00000000, 0xffffffff },
9812 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9813 0x00000000, 0x000003ff },
9814 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9815 0x00000000, 0xffffffff },
9816 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9817 0x00000000, 0x000000ff },
9818 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9819 0x00000000, 0xffffffff },
9820 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9821 0x00000000, 0x000000ff },
9822 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9823 0x00000000, 0xffffffff },
9824 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9825 0x00000000, 0xffffffff },
9826 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9827 0x00000000, 0xffffffff },
9828 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9829 0x00000000, 0x000000ff },
9830 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9831 0x00000000, 0xffffffff },
9832 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9833 0x00000000, 0x000000ff },
9834 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9835 0x00000000, 0xffffffff },
9836 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9837 0x00000000, 0xffffffff },
9838 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9839 0x00000000, 0xffffffff },
9840 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9841 0x00000000, 0xffffffff },
9842 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9843 0x00000000, 0xffffffff },
9844 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9845 0xffffffff, 0x00000000 },
9846 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9847 0xffffffff, 0x00000000 },
9848
9849 /* Buffer Manager Control Registers. */
Michael Chanb16250e2006-09-27 16:10:14 -07009850 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -07009851 0x00000000, 0x007fff80 },
Michael Chanb16250e2006-09-27 16:10:14 -07009852 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -07009853 0x00000000, 0x007fffff },
9854 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9855 0x00000000, 0x0000003f },
9856 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9857 0x00000000, 0x000001ff },
9858 { BUFMGR_MB_HIGH_WATER, 0x0000,
9859 0x00000000, 0x000001ff },
9860 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9861 0xffffffff, 0x00000000 },
9862 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9863 0xffffffff, 0x00000000 },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009864
Michael Chana71116d2005-05-29 14:58:11 -07009865 /* Mailbox Registers */
9866 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9867 0x00000000, 0x000001ff },
9868 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9869 0x00000000, 0x000001ff },
9870 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9871 0x00000000, 0x000007ff },
9872 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9873 0x00000000, 0x000001ff },
9874
9875 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9876 };
9877
Michael Chanb16250e2006-09-27 16:10:14 -07009878 is_5705 = is_5750 = 0;
9879 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
Michael Chana71116d2005-05-29 14:58:11 -07009880 is_5705 = 1;
Michael Chanb16250e2006-09-27 16:10:14 -07009881 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9882 is_5750 = 1;
9883 }
Michael Chana71116d2005-05-29 14:58:11 -07009884
9885 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9886 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9887 continue;
9888
9889 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9890 continue;
9891
9892 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9893 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9894 continue;
9895
Michael Chanb16250e2006-09-27 16:10:14 -07009896 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9897 continue;
9898
Michael Chana71116d2005-05-29 14:58:11 -07009899 offset = (u32) reg_tbl[i].offset;
9900 read_mask = reg_tbl[i].read_mask;
9901 write_mask = reg_tbl[i].write_mask;
9902
9903 /* Save the original register content */
9904 save_val = tr32(offset);
9905
9906 /* Determine the read-only value. */
9907 read_val = save_val & read_mask;
9908
9909 /* Write zero to the register, then make sure the read-only bits
9910 * are not changed and the read/write bits are all zeros.
9911 */
9912 tw32(offset, 0);
9913
9914 val = tr32(offset);
9915
9916 /* Test the read-only and read/write bits. */
9917 if (((val & read_mask) != read_val) || (val & write_mask))
9918 goto out;
9919
9920 /* Write ones to all the bits defined by RdMask and WrMask, then
9921 * make sure the read-only bits are not changed and the
9922 * read/write bits are all ones.
9923 */
9924 tw32(offset, read_mask | write_mask);
9925
9926 val = tr32(offset);
9927
9928 /* Test the read-only bits. */
9929 if ((val & read_mask) != read_val)
9930 goto out;
9931
9932 /* Test the read/write bits. */
9933 if ((val & write_mask) != write_mask)
9934 goto out;
9935
9936 tw32(offset, save_val);
9937 }
9938
9939 return 0;
9940
9941out:
Michael Chan9f88f292006-12-07 00:22:54 -08009942 if (netif_msg_hw(tp))
9943 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9944 offset);
Michael Chana71116d2005-05-29 14:58:11 -07009945 tw32(offset, save_val);
9946 return -EIO;
9947}
9948
Michael Chan7942e1d2005-05-29 14:58:36 -07009949static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9950{
Arjan van de Venf71e1302006-03-03 21:33:57 -05009951 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
Michael Chan7942e1d2005-05-29 14:58:36 -07009952 int i;
9953 u32 j;
9954
Alejandro Martinez Ruize9edda62007-10-15 03:37:43 +02009955 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
Michael Chan7942e1d2005-05-29 14:58:36 -07009956 for (j = 0; j < len; j += 4) {
9957 u32 val;
9958
9959 tg3_write_mem(tp, offset + j, test_pattern[i]);
9960 tg3_read_mem(tp, offset + j, &val);
9961 if (val != test_pattern[i])
9962 return -EIO;
9963 }
9964 }
9965 return 0;
9966}
9967
9968static int tg3_test_memory(struct tg3 *tp)
9969{
9970 static struct mem_entry {
9971 u32 offset;
9972 u32 len;
9973 } mem_tbl_570x[] = {
Michael Chan38690192005-12-19 16:27:28 -08009974 { 0x00000000, 0x00b50},
Michael Chan7942e1d2005-05-29 14:58:36 -07009975 { 0x00002000, 0x1c000},
9976 { 0xffffffff, 0x00000}
9977 }, mem_tbl_5705[] = {
9978 { 0x00000100, 0x0000c},
9979 { 0x00000200, 0x00008},
Michael Chan7942e1d2005-05-29 14:58:36 -07009980 { 0x00004000, 0x00800},
9981 { 0x00006000, 0x01000},
9982 { 0x00008000, 0x02000},
9983 { 0x00010000, 0x0e000},
9984 { 0xffffffff, 0x00000}
Michael Chan79f4d132006-03-20 22:28:57 -08009985 }, mem_tbl_5755[] = {
9986 { 0x00000200, 0x00008},
9987 { 0x00004000, 0x00800},
9988 { 0x00006000, 0x00800},
9989 { 0x00008000, 0x02000},
9990 { 0x00010000, 0x0c000},
9991 { 0xffffffff, 0x00000}
Michael Chanb16250e2006-09-27 16:10:14 -07009992 }, mem_tbl_5906[] = {
9993 { 0x00000200, 0x00008},
9994 { 0x00004000, 0x00400},
9995 { 0x00006000, 0x00400},
9996 { 0x00008000, 0x01000},
9997 { 0x00010000, 0x01000},
9998 { 0xffffffff, 0x00000}
Michael Chan7942e1d2005-05-29 14:58:36 -07009999 };
10000 struct mem_entry *mem_tbl;
10001 int err = 0;
10002 int i;
10003
Michael Chan79f4d132006-03-20 22:28:57 -080010004 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
Michael Chanaf36e6b2006-03-23 01:28:06 -080010005 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070010006 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070010007 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010008 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10009 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan79f4d132006-03-20 22:28:57 -080010010 mem_tbl = mem_tbl_5755;
Michael Chanb16250e2006-09-27 16:10:14 -070010011 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10012 mem_tbl = mem_tbl_5906;
Michael Chan79f4d132006-03-20 22:28:57 -080010013 else
10014 mem_tbl = mem_tbl_5705;
10015 } else
Michael Chan7942e1d2005-05-29 14:58:36 -070010016 mem_tbl = mem_tbl_570x;
10017
10018 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10019 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
10020 mem_tbl[i].len)) != 0)
10021 break;
10022 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010023
Michael Chan7942e1d2005-05-29 14:58:36 -070010024 return err;
10025}
10026
Michael Chan9f40dea2005-09-05 17:53:06 -070010027#define TG3_MAC_LOOPBACK 0
10028#define TG3_PHY_LOOPBACK 1
10029
10030static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
Michael Chanc76949a2005-05-29 14:58:59 -070010031{
Michael Chan9f40dea2005-09-05 17:53:06 -070010032 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
Michael Chanc76949a2005-05-29 14:58:59 -070010033 u32 desc_idx;
10034 struct sk_buff *skb, *rx_skb;
10035 u8 *tx_data;
10036 dma_addr_t map;
10037 int num_pkts, tx_len, rx_len, i, err;
10038 struct tg3_rx_buffer_desc *desc;
10039
Michael Chan9f40dea2005-09-05 17:53:06 -070010040 if (loopback_mode == TG3_MAC_LOOPBACK) {
Michael Chanc94e3942005-09-27 12:12:42 -070010041 /* HW errata - mac loopback fails in some cases on 5780.
10042 * Normal traffic and PHY loopback are not affected by
10043 * errata.
10044 */
10045 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10046 return 0;
10047
Michael Chan9f40dea2005-09-05 17:53:06 -070010048 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
Matt Carlsone8f3f6c2007-07-11 19:47:55 -070010049 MAC_MODE_PORT_INT_LPBACK;
10050 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10051 mac_mode |= MAC_MODE_LINK_POLARITY;
Michael Chan3f7045c2006-09-27 16:02:29 -070010052 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10053 mac_mode |= MAC_MODE_PORT_MODE_MII;
10054 else
10055 mac_mode |= MAC_MODE_PORT_MODE_GMII;
Michael Chan9f40dea2005-09-05 17:53:06 -070010056 tw32(MAC_MODE, mac_mode);
10057 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
Michael Chan3f7045c2006-09-27 16:02:29 -070010058 u32 val;
10059
Michael Chanb16250e2006-09-27 16:10:14 -070010060 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10061 u32 phytest;
10062
10063 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
10064 u32 phy;
10065
10066 tg3_writephy(tp, MII_TG3_EPHY_TEST,
10067 phytest | MII_TG3_EPHY_SHADOW_EN);
10068 if (!tg3_readphy(tp, 0x1b, &phy))
10069 tg3_writephy(tp, 0x1b, phy & ~0x20);
Michael Chanb16250e2006-09-27 16:10:14 -070010070 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
10071 }
Michael Chan5d64ad32006-12-07 00:19:40 -080010072 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10073 } else
10074 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
Michael Chan3f7045c2006-09-27 16:02:29 -070010075
Matt Carlson9ef8ca92007-07-11 19:48:29 -070010076 tg3_phy_toggle_automdix(tp, 0);
10077
Michael Chan3f7045c2006-09-27 16:02:29 -070010078 tg3_writephy(tp, MII_BMCR, val);
Michael Chanc94e3942005-09-27 12:12:42 -070010079 udelay(40);
Michael Chan5d64ad32006-12-07 00:19:40 -080010080
Matt Carlsone8f3f6c2007-07-11 19:47:55 -070010081 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
Michael Chan5d64ad32006-12-07 00:19:40 -080010082 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chanb16250e2006-09-27 16:10:14 -070010083 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
Michael Chan5d64ad32006-12-07 00:19:40 -080010084 mac_mode |= MAC_MODE_PORT_MODE_MII;
10085 } else
10086 mac_mode |= MAC_MODE_PORT_MODE_GMII;
Michael Chanb16250e2006-09-27 16:10:14 -070010087
Michael Chanc94e3942005-09-27 12:12:42 -070010088 /* reset to prevent losing 1st rx packet intermittently */
10089 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10090 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10091 udelay(10);
10092 tw32_f(MAC_RX_MODE, tp->rx_mode);
10093 }
Matt Carlsone8f3f6c2007-07-11 19:47:55 -070010094 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10095 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10096 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10097 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10098 mac_mode |= MAC_MODE_LINK_POLARITY;
Michael Chanff18ff02006-03-27 23:17:27 -080010099 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10100 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10101 }
Michael Chan9f40dea2005-09-05 17:53:06 -070010102 tw32(MAC_MODE, mac_mode);
Michael Chan9f40dea2005-09-05 17:53:06 -070010103 }
10104 else
10105 return -EINVAL;
Michael Chanc76949a2005-05-29 14:58:59 -070010106
10107 err = -EIO;
10108
Michael Chanc76949a2005-05-29 14:58:59 -070010109 tx_len = 1514;
David S. Millera20e9c62006-07-31 22:38:16 -070010110 skb = netdev_alloc_skb(tp->dev, tx_len);
Jesper Juhla50bb7b2006-05-09 23:14:35 -070010111 if (!skb)
10112 return -ENOMEM;
10113
Michael Chanc76949a2005-05-29 14:58:59 -070010114 tx_data = skb_put(skb, tx_len);
10115 memcpy(tx_data, tp->dev->dev_addr, 6);
10116 memset(tx_data + 6, 0x0, 8);
10117
10118 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10119
10120 for (i = 14; i < tx_len; i++)
10121 tx_data[i] = (u8) (i & 0xff);
10122
10123 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10124
10125 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10126 HOSTCC_MODE_NOW);
10127
10128 udelay(10);
10129
10130 rx_start_idx = tp->hw_status->idx[0].rx_producer;
10131
Michael Chanc76949a2005-05-29 14:58:59 -070010132 num_pkts = 0;
10133
Michael Chan9f40dea2005-09-05 17:53:06 -070010134 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
Michael Chanc76949a2005-05-29 14:58:59 -070010135
Michael Chan9f40dea2005-09-05 17:53:06 -070010136 tp->tx_prod++;
Michael Chanc76949a2005-05-29 14:58:59 -070010137 num_pkts++;
10138
Michael Chan9f40dea2005-09-05 17:53:06 -070010139 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10140 tp->tx_prod);
Michael Chan09ee9292005-08-09 20:17:00 -070010141 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
Michael Chanc76949a2005-05-29 14:58:59 -070010142
10143 udelay(10);
10144
Michael Chan3f7045c2006-09-27 16:02:29 -070010145 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
10146 for (i = 0; i < 25; i++) {
Michael Chanc76949a2005-05-29 14:58:59 -070010147 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10148 HOSTCC_MODE_NOW);
10149
10150 udelay(10);
10151
10152 tx_idx = tp->hw_status->idx[0].tx_consumer;
10153 rx_idx = tp->hw_status->idx[0].rx_producer;
Michael Chan9f40dea2005-09-05 17:53:06 -070010154 if ((tx_idx == tp->tx_prod) &&
Michael Chanc76949a2005-05-29 14:58:59 -070010155 (rx_idx == (rx_start_idx + num_pkts)))
10156 break;
10157 }
10158
10159 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10160 dev_kfree_skb(skb);
10161
Michael Chan9f40dea2005-09-05 17:53:06 -070010162 if (tx_idx != tp->tx_prod)
Michael Chanc76949a2005-05-29 14:58:59 -070010163 goto out;
10164
10165 if (rx_idx != rx_start_idx + num_pkts)
10166 goto out;
10167
10168 desc = &tp->rx_rcb[rx_start_idx];
10169 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10170 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10171 if (opaque_key != RXD_OPAQUE_RING_STD)
10172 goto out;
10173
10174 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10175 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10176 goto out;
10177
10178 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10179 if (rx_len != tx_len)
10180 goto out;
10181
10182 rx_skb = tp->rx_std_buffers[desc_idx].skb;
10183
10184 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10185 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10186
10187 for (i = 14; i < tx_len; i++) {
10188 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10189 goto out;
10190 }
10191 err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010192
Michael Chanc76949a2005-05-29 14:58:59 -070010193 /* tg3_free_rings will unmap and free the rx_skb */
10194out:
10195 return err;
10196}
10197
Michael Chan9f40dea2005-09-05 17:53:06 -070010198#define TG3_MAC_LOOPBACK_FAILED 1
10199#define TG3_PHY_LOOPBACK_FAILED 2
10200#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10201 TG3_PHY_LOOPBACK_FAILED)
10202
10203static int tg3_test_loopback(struct tg3 *tp)
10204{
10205 int err = 0;
Matt Carlson9936bcf2007-10-10 18:03:07 -070010206 u32 cpmuctrl = 0;
Michael Chan9f40dea2005-09-05 17:53:06 -070010207
10208 if (!netif_running(tp->dev))
10209 return TG3_LOOPBACK_FAILED;
10210
Michael Chanb9ec6c12006-07-25 16:37:27 -070010211 err = tg3_reset_hw(tp, 1);
10212 if (err)
10213 return TG3_LOOPBACK_FAILED;
Michael Chan9f40dea2005-09-05 17:53:06 -070010214
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010215 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010216 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10217 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Matt Carlson9936bcf2007-10-10 18:03:07 -070010218 int i;
10219 u32 status;
10220
10221 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10222
10223 /* Wait for up to 40 microseconds to acquire lock. */
10224 for (i = 0; i < 4; i++) {
10225 status = tr32(TG3_CPMU_MUTEX_GNT);
10226 if (status == CPMU_MUTEX_GNT_DRIVER)
10227 break;
10228 udelay(10);
10229 }
10230
10231 if (status != CPMU_MUTEX_GNT_DRIVER)
10232 return TG3_LOOPBACK_FAILED;
10233
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010234 /* Turn off link-based power management. */
Matt Carlsone8750932007-11-12 21:11:51 -080010235 cpmuctrl = tr32(TG3_CPMU_CTRL);
Matt Carlson109115e2008-05-02 16:48:59 -070010236 tw32(TG3_CPMU_CTRL,
10237 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10238 CPMU_CTRL_LINK_AWARE_MODE));
Matt Carlson9936bcf2007-10-10 18:03:07 -070010239 }
10240
Michael Chan9f40dea2005-09-05 17:53:06 -070010241 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10242 err |= TG3_MAC_LOOPBACK_FAILED;
Matt Carlson9936bcf2007-10-10 18:03:07 -070010243
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010244 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010245 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10246 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Matt Carlson9936bcf2007-10-10 18:03:07 -070010247 tw32(TG3_CPMU_CTRL, cpmuctrl);
10248
10249 /* Release the mutex */
10250 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10251 }
10252
Matt Carlsondd477002008-05-25 23:45:58 -070010253 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10254 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
Michael Chan9f40dea2005-09-05 17:53:06 -070010255 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10256 err |= TG3_PHY_LOOPBACK_FAILED;
10257 }
10258
10259 return err;
10260}
10261
Michael Chan4cafd3f2005-05-29 14:56:34 -070010262static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10263 u64 *data)
10264{
Michael Chan566f86a2005-05-29 14:56:58 -070010265 struct tg3 *tp = netdev_priv(dev);
10266
Michael Chanbc1c7562006-03-20 17:48:03 -080010267 if (tp->link_config.phy_is_low_power)
10268 tg3_set_power_state(tp, PCI_D0);
10269
Michael Chan566f86a2005-05-29 14:56:58 -070010270 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10271
10272 if (tg3_test_nvram(tp) != 0) {
10273 etest->flags |= ETH_TEST_FL_FAILED;
10274 data[0] = 1;
10275 }
Michael Chanca430072005-05-29 14:57:23 -070010276 if (tg3_test_link(tp) != 0) {
10277 etest->flags |= ETH_TEST_FL_FAILED;
10278 data[1] = 1;
10279 }
Michael Chana71116d2005-05-29 14:58:11 -070010280 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010281 int err, err2 = 0, irq_sync = 0;
Michael Chana71116d2005-05-29 14:58:11 -070010282
Michael Chanbbe832c2005-06-24 20:20:04 -070010283 if (netif_running(dev)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010284 tg3_phy_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -070010285 tg3_netif_stop(tp);
10286 irq_sync = 1;
10287 }
10288
10289 tg3_full_lock(tp, irq_sync);
Michael Chana71116d2005-05-29 14:58:11 -070010290
10291 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
Michael Chanec41c7d2006-01-17 02:40:55 -080010292 err = tg3_nvram_lock(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010293 tg3_halt_cpu(tp, RX_CPU_BASE);
10294 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10295 tg3_halt_cpu(tp, TX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -080010296 if (!err)
10297 tg3_nvram_unlock(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010298
Michael Chand9ab5ad2006-03-20 22:27:35 -080010299 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10300 tg3_phy_reset(tp);
10301
Michael Chana71116d2005-05-29 14:58:11 -070010302 if (tg3_test_registers(tp) != 0) {
10303 etest->flags |= ETH_TEST_FL_FAILED;
10304 data[2] = 1;
10305 }
Michael Chan7942e1d2005-05-29 14:58:36 -070010306 if (tg3_test_memory(tp) != 0) {
10307 etest->flags |= ETH_TEST_FL_FAILED;
10308 data[3] = 1;
10309 }
Michael Chan9f40dea2005-09-05 17:53:06 -070010310 if ((data[4] = tg3_test_loopback(tp)) != 0)
Michael Chanc76949a2005-05-29 14:58:59 -070010311 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chana71116d2005-05-29 14:58:11 -070010312
David S. Millerf47c11e2005-06-24 20:18:35 -070010313 tg3_full_unlock(tp);
10314
Michael Chand4bc3922005-05-29 14:59:20 -070010315 if (tg3_test_interrupt(tp) != 0) {
10316 etest->flags |= ETH_TEST_FL_FAILED;
10317 data[5] = 1;
10318 }
David S. Millerf47c11e2005-06-24 20:18:35 -070010319
10320 tg3_full_lock(tp, 0);
Michael Chand4bc3922005-05-29 14:59:20 -070010321
Michael Chana71116d2005-05-29 14:58:11 -070010322 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10323 if (netif_running(dev)) {
10324 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010325 err2 = tg3_restart_hw(tp, 1);
10326 if (!err2)
Michael Chanb9ec6c12006-07-25 16:37:27 -070010327 tg3_netif_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010328 }
David S. Millerf47c11e2005-06-24 20:18:35 -070010329
10330 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010331
10332 if (irq_sync && !err2)
10333 tg3_phy_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010334 }
Michael Chanbc1c7562006-03-20 17:48:03 -080010335 if (tp->link_config.phy_is_low_power)
10336 tg3_set_power_state(tp, PCI_D3hot);
10337
Michael Chan4cafd3f2005-05-29 14:56:34 -070010338}
10339
Linus Torvalds1da177e2005-04-16 15:20:36 -070010340static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10341{
10342 struct mii_ioctl_data *data = if_mii(ifr);
10343 struct tg3 *tp = netdev_priv(dev);
10344 int err;
10345
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010346 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10347 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10348 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -070010349 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010350 }
10351
Linus Torvalds1da177e2005-04-16 15:20:36 -070010352 switch(cmd) {
10353 case SIOCGMIIPHY:
10354 data->phy_id = PHY_ADDR;
10355
10356 /* fallthru */
10357 case SIOCGMIIREG: {
10358 u32 mii_regval;
10359
10360 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10361 break; /* We have no PHY */
10362
Michael Chanbc1c7562006-03-20 17:48:03 -080010363 if (tp->link_config.phy_is_low_power)
10364 return -EAGAIN;
10365
David S. Millerf47c11e2005-06-24 20:18:35 -070010366 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010367 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
David S. Millerf47c11e2005-06-24 20:18:35 -070010368 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010369
10370 data->val_out = mii_regval;
10371
10372 return err;
10373 }
10374
10375 case SIOCSMIIREG:
10376 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10377 break; /* We have no PHY */
10378
10379 if (!capable(CAP_NET_ADMIN))
10380 return -EPERM;
10381
Michael Chanbc1c7562006-03-20 17:48:03 -080010382 if (tp->link_config.phy_is_low_power)
10383 return -EAGAIN;
10384
David S. Millerf47c11e2005-06-24 20:18:35 -070010385 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010386 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
David S. Millerf47c11e2005-06-24 20:18:35 -070010387 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010388
10389 return err;
10390
10391 default:
10392 /* do nothing */
10393 break;
10394 }
10395 return -EOPNOTSUPP;
10396}
10397
10398#if TG3_VLAN_TAG_USED
10399static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10400{
10401 struct tg3 *tp = netdev_priv(dev);
10402
Michael Chan29315e82006-06-29 20:12:30 -070010403 if (netif_running(dev))
10404 tg3_netif_stop(tp);
10405
David S. Millerf47c11e2005-06-24 20:18:35 -070010406 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010407
10408 tp->vlgrp = grp;
10409
10410 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10411 __tg3_set_rx_mode(dev);
10412
Michael Chan29315e82006-06-29 20:12:30 -070010413 if (netif_running(dev))
10414 tg3_netif_start(tp);
Michael Chan46966542007-07-11 19:47:19 -070010415
10416 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010417}
Linus Torvalds1da177e2005-04-16 15:20:36 -070010418#endif
10419
David S. Miller15f98502005-05-18 22:49:26 -070010420static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10421{
10422 struct tg3 *tp = netdev_priv(dev);
10423
10424 memcpy(ec, &tp->coal, sizeof(*ec));
10425 return 0;
10426}
10427
Michael Chand244c892005-07-05 14:42:33 -070010428static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10429{
10430 struct tg3 *tp = netdev_priv(dev);
10431 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10432 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10433
10434 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10435 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10436 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10437 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10438 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10439 }
10440
10441 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10442 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10443 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10444 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10445 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10446 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10447 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10448 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10449 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10450 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10451 return -EINVAL;
10452
10453 /* No rx interrupts will be generated if both are zero */
10454 if ((ec->rx_coalesce_usecs == 0) &&
10455 (ec->rx_max_coalesced_frames == 0))
10456 return -EINVAL;
10457
10458 /* No tx interrupts will be generated if both are zero */
10459 if ((ec->tx_coalesce_usecs == 0) &&
10460 (ec->tx_max_coalesced_frames == 0))
10461 return -EINVAL;
10462
10463 /* Only copy relevant parameters, ignore all others. */
10464 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10465 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10466 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10467 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10468 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10469 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10470 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10471 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10472 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10473
10474 if (netif_running(dev)) {
10475 tg3_full_lock(tp, 0);
10476 __tg3_set_coalesce(tp, &tp->coal);
10477 tg3_full_unlock(tp);
10478 }
10479 return 0;
10480}
10481
Jeff Garzik7282d492006-09-13 14:30:00 -040010482static const struct ethtool_ops tg3_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010483 .get_settings = tg3_get_settings,
10484 .set_settings = tg3_set_settings,
10485 .get_drvinfo = tg3_get_drvinfo,
10486 .get_regs_len = tg3_get_regs_len,
10487 .get_regs = tg3_get_regs,
10488 .get_wol = tg3_get_wol,
10489 .set_wol = tg3_set_wol,
10490 .get_msglevel = tg3_get_msglevel,
10491 .set_msglevel = tg3_set_msglevel,
10492 .nway_reset = tg3_nway_reset,
10493 .get_link = ethtool_op_get_link,
10494 .get_eeprom_len = tg3_get_eeprom_len,
10495 .get_eeprom = tg3_get_eeprom,
10496 .set_eeprom = tg3_set_eeprom,
10497 .get_ringparam = tg3_get_ringparam,
10498 .set_ringparam = tg3_set_ringparam,
10499 .get_pauseparam = tg3_get_pauseparam,
10500 .set_pauseparam = tg3_set_pauseparam,
10501 .get_rx_csum = tg3_get_rx_csum,
10502 .set_rx_csum = tg3_set_rx_csum,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010503 .set_tx_csum = tg3_set_tx_csum,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010504 .set_sg = ethtool_op_set_sg,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010505 .set_tso = tg3_set_tso,
Michael Chan4cafd3f2005-05-29 14:56:34 -070010506 .self_test = tg3_self_test,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010507 .get_strings = tg3_get_strings,
Michael Chan4009a932005-09-05 17:52:54 -070010508 .phys_id = tg3_phys_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010509 .get_ethtool_stats = tg3_get_ethtool_stats,
David S. Miller15f98502005-05-18 22:49:26 -070010510 .get_coalesce = tg3_get_coalesce,
Michael Chand244c892005-07-05 14:42:33 -070010511 .set_coalesce = tg3_set_coalesce,
Jeff Garzikb9f2c042007-10-03 18:07:32 -070010512 .get_sset_count = tg3_get_sset_count,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010513};
10514
10515static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10516{
Michael Chan1b277772006-03-20 22:27:48 -080010517 u32 cursize, val, magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010518
10519 tp->nvram_size = EEPROM_CHIP_SIZE;
10520
Michael Chan18201802006-03-20 22:29:15 -080010521 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010522 return;
10523
Michael Chanb16250e2006-09-27 16:10:14 -070010524 if ((magic != TG3_EEPROM_MAGIC) &&
10525 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10526 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
Linus Torvalds1da177e2005-04-16 15:20:36 -070010527 return;
10528
10529 /*
10530 * Size the chip by reading offsets at increasing powers of two.
10531 * When we encounter our validation signature, we know the addressing
10532 * has wrapped around, and thus have our chip size.
10533 */
Michael Chan1b277772006-03-20 22:27:48 -080010534 cursize = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010535
10536 while (cursize < tp->nvram_size) {
Michael Chan18201802006-03-20 22:29:15 -080010537 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010538 return;
10539
Michael Chan18201802006-03-20 22:29:15 -080010540 if (val == magic)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010541 break;
10542
10543 cursize <<= 1;
10544 }
10545
10546 tp->nvram_size = cursize;
10547}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010548
Linus Torvalds1da177e2005-04-16 15:20:36 -070010549static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10550{
10551 u32 val;
10552
Michael Chan18201802006-03-20 22:29:15 -080010553 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
Michael Chan1b277772006-03-20 22:27:48 -080010554 return;
10555
10556 /* Selfboot format */
Michael Chan18201802006-03-20 22:29:15 -080010557 if (val != TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -080010558 tg3_get_eeprom_size(tp);
10559 return;
10560 }
10561
Linus Torvalds1da177e2005-04-16 15:20:36 -070010562 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10563 if (val != 0) {
10564 tp->nvram_size = (val >> 16) * 1024;
10565 return;
10566 }
10567 }
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010568 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010569}
10570
10571static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10572{
10573 u32 nvcfg1;
10574
10575 nvcfg1 = tr32(NVRAM_CFG1);
10576 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10577 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10578 }
10579 else {
10580 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10581 tw32(NVRAM_CFG1, nvcfg1);
10582 }
10583
Michael Chan4c987482005-09-05 17:52:38 -070010584 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
Michael Chana4e2b342005-10-26 15:46:52 -070010585 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010586 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10587 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10588 tp->nvram_jedecnum = JEDEC_ATMEL;
10589 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10590 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10591 break;
10592 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10593 tp->nvram_jedecnum = JEDEC_ATMEL;
10594 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10595 break;
10596 case FLASH_VENDOR_ATMEL_EEPROM:
10597 tp->nvram_jedecnum = JEDEC_ATMEL;
10598 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10599 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10600 break;
10601 case FLASH_VENDOR_ST:
10602 tp->nvram_jedecnum = JEDEC_ST;
10603 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10604 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10605 break;
10606 case FLASH_VENDOR_SAIFUN:
10607 tp->nvram_jedecnum = JEDEC_SAIFUN;
10608 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10609 break;
10610 case FLASH_VENDOR_SST_SMALL:
10611 case FLASH_VENDOR_SST_LARGE:
10612 tp->nvram_jedecnum = JEDEC_SST;
10613 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10614 break;
10615 }
10616 }
10617 else {
10618 tp->nvram_jedecnum = JEDEC_ATMEL;
10619 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10620 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10621 }
10622}
10623
Michael Chan361b4ac2005-04-21 17:11:21 -070010624static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10625{
10626 u32 nvcfg1;
10627
10628 nvcfg1 = tr32(NVRAM_CFG1);
10629
Michael Chane6af3012005-04-21 17:12:05 -070010630 /* NVRAM protection for TPM */
10631 if (nvcfg1 & (1 << 27))
10632 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10633
Michael Chan361b4ac2005-04-21 17:11:21 -070010634 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10635 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10636 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10637 tp->nvram_jedecnum = JEDEC_ATMEL;
10638 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10639 break;
10640 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10641 tp->nvram_jedecnum = JEDEC_ATMEL;
10642 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10643 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10644 break;
10645 case FLASH_5752VENDOR_ST_M45PE10:
10646 case FLASH_5752VENDOR_ST_M45PE20:
10647 case FLASH_5752VENDOR_ST_M45PE40:
10648 tp->nvram_jedecnum = JEDEC_ST;
10649 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10650 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10651 break;
10652 }
10653
10654 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10655 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10656 case FLASH_5752PAGE_SIZE_256:
10657 tp->nvram_pagesize = 256;
10658 break;
10659 case FLASH_5752PAGE_SIZE_512:
10660 tp->nvram_pagesize = 512;
10661 break;
10662 case FLASH_5752PAGE_SIZE_1K:
10663 tp->nvram_pagesize = 1024;
10664 break;
10665 case FLASH_5752PAGE_SIZE_2K:
10666 tp->nvram_pagesize = 2048;
10667 break;
10668 case FLASH_5752PAGE_SIZE_4K:
10669 tp->nvram_pagesize = 4096;
10670 break;
10671 case FLASH_5752PAGE_SIZE_264:
10672 tp->nvram_pagesize = 264;
10673 break;
10674 }
10675 }
10676 else {
10677 /* For eeprom, set pagesize to maximum eeprom size */
10678 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10679
10680 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10681 tw32(NVRAM_CFG1, nvcfg1);
10682 }
10683}
10684
Michael Chand3c7b882006-03-23 01:28:25 -080010685static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10686{
Matt Carlson989a9d22007-05-05 11:51:05 -070010687 u32 nvcfg1, protect = 0;
Michael Chand3c7b882006-03-23 01:28:25 -080010688
10689 nvcfg1 = tr32(NVRAM_CFG1);
10690
10691 /* NVRAM protection for TPM */
Matt Carlson989a9d22007-05-05 11:51:05 -070010692 if (nvcfg1 & (1 << 27)) {
Michael Chand3c7b882006-03-23 01:28:25 -080010693 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
Matt Carlson989a9d22007-05-05 11:51:05 -070010694 protect = 1;
10695 }
Michael Chand3c7b882006-03-23 01:28:25 -080010696
Matt Carlson989a9d22007-05-05 11:51:05 -070010697 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10698 switch (nvcfg1) {
Michael Chand3c7b882006-03-23 01:28:25 -080010699 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10700 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10701 case FLASH_5755VENDOR_ATMEL_FLASH_3:
Matt Carlson70b65a22007-07-11 19:48:50 -070010702 case FLASH_5755VENDOR_ATMEL_FLASH_5:
Michael Chand3c7b882006-03-23 01:28:25 -080010703 tp->nvram_jedecnum = JEDEC_ATMEL;
10704 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10705 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10706 tp->nvram_pagesize = 264;
Matt Carlson70b65a22007-07-11 19:48:50 -070010707 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10708 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010709 tp->nvram_size = (protect ? 0x3e200 :
10710 TG3_NVRAM_SIZE_512KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010711 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010712 tp->nvram_size = (protect ? 0x1f200 :
10713 TG3_NVRAM_SIZE_256KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010714 else
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010715 tp->nvram_size = (protect ? 0x1f200 :
10716 TG3_NVRAM_SIZE_128KB);
Michael Chand3c7b882006-03-23 01:28:25 -080010717 break;
10718 case FLASH_5752VENDOR_ST_M45PE10:
10719 case FLASH_5752VENDOR_ST_M45PE20:
10720 case FLASH_5752VENDOR_ST_M45PE40:
10721 tp->nvram_jedecnum = JEDEC_ST;
10722 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10723 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10724 tp->nvram_pagesize = 256;
Matt Carlson989a9d22007-05-05 11:51:05 -070010725 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010726 tp->nvram_size = (protect ?
10727 TG3_NVRAM_SIZE_64KB :
10728 TG3_NVRAM_SIZE_128KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010729 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010730 tp->nvram_size = (protect ?
10731 TG3_NVRAM_SIZE_64KB :
10732 TG3_NVRAM_SIZE_256KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010733 else
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010734 tp->nvram_size = (protect ?
10735 TG3_NVRAM_SIZE_128KB :
10736 TG3_NVRAM_SIZE_512KB);
Michael Chand3c7b882006-03-23 01:28:25 -080010737 break;
10738 }
10739}
10740
Michael Chan1b277772006-03-20 22:27:48 -080010741static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10742{
10743 u32 nvcfg1;
10744
10745 nvcfg1 = tr32(NVRAM_CFG1);
10746
10747 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10748 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10749 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10750 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10751 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10752 tp->nvram_jedecnum = JEDEC_ATMEL;
10753 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10754 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10755
10756 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10757 tw32(NVRAM_CFG1, nvcfg1);
10758 break;
10759 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10760 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10761 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10762 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10763 tp->nvram_jedecnum = JEDEC_ATMEL;
10764 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10765 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10766 tp->nvram_pagesize = 264;
10767 break;
10768 case FLASH_5752VENDOR_ST_M45PE10:
10769 case FLASH_5752VENDOR_ST_M45PE20:
10770 case FLASH_5752VENDOR_ST_M45PE40:
10771 tp->nvram_jedecnum = JEDEC_ST;
10772 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10773 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10774 tp->nvram_pagesize = 256;
10775 break;
10776 }
10777}
10778
Matt Carlson6b91fa02007-10-10 18:01:09 -070010779static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10780{
10781 u32 nvcfg1, protect = 0;
10782
10783 nvcfg1 = tr32(NVRAM_CFG1);
10784
10785 /* NVRAM protection for TPM */
10786 if (nvcfg1 & (1 << 27)) {
10787 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10788 protect = 1;
10789 }
10790
10791 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10792 switch (nvcfg1) {
10793 case FLASH_5761VENDOR_ATMEL_ADB021D:
10794 case FLASH_5761VENDOR_ATMEL_ADB041D:
10795 case FLASH_5761VENDOR_ATMEL_ADB081D:
10796 case FLASH_5761VENDOR_ATMEL_ADB161D:
10797 case FLASH_5761VENDOR_ATMEL_MDB021D:
10798 case FLASH_5761VENDOR_ATMEL_MDB041D:
10799 case FLASH_5761VENDOR_ATMEL_MDB081D:
10800 case FLASH_5761VENDOR_ATMEL_MDB161D:
10801 tp->nvram_jedecnum = JEDEC_ATMEL;
10802 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10803 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10804 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10805 tp->nvram_pagesize = 256;
10806 break;
10807 case FLASH_5761VENDOR_ST_A_M45PE20:
10808 case FLASH_5761VENDOR_ST_A_M45PE40:
10809 case FLASH_5761VENDOR_ST_A_M45PE80:
10810 case FLASH_5761VENDOR_ST_A_M45PE16:
10811 case FLASH_5761VENDOR_ST_M_M45PE20:
10812 case FLASH_5761VENDOR_ST_M_M45PE40:
10813 case FLASH_5761VENDOR_ST_M_M45PE80:
10814 case FLASH_5761VENDOR_ST_M_M45PE16:
10815 tp->nvram_jedecnum = JEDEC_ST;
10816 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10817 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10818 tp->nvram_pagesize = 256;
10819 break;
10820 }
10821
10822 if (protect) {
10823 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10824 } else {
10825 switch (nvcfg1) {
10826 case FLASH_5761VENDOR_ATMEL_ADB161D:
10827 case FLASH_5761VENDOR_ATMEL_MDB161D:
10828 case FLASH_5761VENDOR_ST_A_M45PE16:
10829 case FLASH_5761VENDOR_ST_M_M45PE16:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010830 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010831 break;
10832 case FLASH_5761VENDOR_ATMEL_ADB081D:
10833 case FLASH_5761VENDOR_ATMEL_MDB081D:
10834 case FLASH_5761VENDOR_ST_A_M45PE80:
10835 case FLASH_5761VENDOR_ST_M_M45PE80:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010836 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010837 break;
10838 case FLASH_5761VENDOR_ATMEL_ADB041D:
10839 case FLASH_5761VENDOR_ATMEL_MDB041D:
10840 case FLASH_5761VENDOR_ST_A_M45PE40:
10841 case FLASH_5761VENDOR_ST_M_M45PE40:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010842 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010843 break;
10844 case FLASH_5761VENDOR_ATMEL_ADB021D:
10845 case FLASH_5761VENDOR_ATMEL_MDB021D:
10846 case FLASH_5761VENDOR_ST_A_M45PE20:
10847 case FLASH_5761VENDOR_ST_M_M45PE20:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010848 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010849 break;
10850 }
10851 }
10852}
10853
Michael Chanb5d37722006-09-27 16:06:21 -070010854static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10855{
10856 tp->nvram_jedecnum = JEDEC_ATMEL;
10857 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10858 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10859}
10860
Linus Torvalds1da177e2005-04-16 15:20:36 -070010861/* Chips other than 5700/5701 use the NVRAM for fetching info. */
10862static void __devinit tg3_nvram_init(struct tg3 *tp)
10863{
Linus Torvalds1da177e2005-04-16 15:20:36 -070010864 tw32_f(GRC_EEPROM_ADDR,
10865 (EEPROM_ADDR_FSM_RESET |
10866 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10867 EEPROM_ADDR_CLKPERD_SHIFT)));
10868
Michael Chan9d57f012006-12-07 00:23:25 -080010869 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010870
10871 /* Enable seeprom accesses. */
10872 tw32_f(GRC_LOCAL_CTRL,
10873 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10874 udelay(100);
10875
10876 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10877 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10878 tp->tg3_flags |= TG3_FLAG_NVRAM;
10879
Michael Chanec41c7d2006-01-17 02:40:55 -080010880 if (tg3_nvram_lock(tp)) {
10881 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10882 "tg3_nvram_init failed.\n", tp->dev->name);
10883 return;
10884 }
Michael Chane6af3012005-04-21 17:12:05 -070010885 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010886
Matt Carlson989a9d22007-05-05 11:51:05 -070010887 tp->nvram_size = 0;
10888
Michael Chan361b4ac2005-04-21 17:11:21 -070010889 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10890 tg3_get_5752_nvram_info(tp);
Michael Chand3c7b882006-03-23 01:28:25 -080010891 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10892 tg3_get_5755_nvram_info(tp);
Matt Carlsond30cdd22007-10-07 23:28:35 -070010893 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010894 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10895 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan1b277772006-03-20 22:27:48 -080010896 tg3_get_5787_nvram_info(tp);
Matt Carlson6b91fa02007-10-10 18:01:09 -070010897 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10898 tg3_get_5761_nvram_info(tp);
Michael Chanb5d37722006-09-27 16:06:21 -070010899 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10900 tg3_get_5906_nvram_info(tp);
Michael Chan361b4ac2005-04-21 17:11:21 -070010901 else
10902 tg3_get_nvram_info(tp);
10903
Matt Carlson989a9d22007-05-05 11:51:05 -070010904 if (tp->nvram_size == 0)
10905 tg3_get_nvram_size(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010906
Michael Chane6af3012005-04-21 17:12:05 -070010907 tg3_disable_nvram_access(tp);
Michael Chan381291b2005-12-13 21:08:21 -080010908 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010909
10910 } else {
10911 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10912
10913 tg3_get_eeprom_size(tp);
10914 }
10915}
10916
10917static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10918 u32 offset, u32 *val)
10919{
10920 u32 tmp;
10921 int i;
10922
10923 if (offset > EEPROM_ADDR_ADDR_MASK ||
10924 (offset % 4) != 0)
10925 return -EINVAL;
10926
10927 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10928 EEPROM_ADDR_DEVID_MASK |
10929 EEPROM_ADDR_READ);
10930 tw32(GRC_EEPROM_ADDR,
10931 tmp |
10932 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10933 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10934 EEPROM_ADDR_ADDR_MASK) |
10935 EEPROM_ADDR_READ | EEPROM_ADDR_START);
10936
Michael Chan9d57f012006-12-07 00:23:25 -080010937 for (i = 0; i < 1000; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010938 tmp = tr32(GRC_EEPROM_ADDR);
10939
10940 if (tmp & EEPROM_ADDR_COMPLETE)
10941 break;
Michael Chan9d57f012006-12-07 00:23:25 -080010942 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010943 }
10944 if (!(tmp & EEPROM_ADDR_COMPLETE))
10945 return -EBUSY;
10946
10947 *val = tr32(GRC_EEPROM_DATA);
10948 return 0;
10949}
10950
10951#define NVRAM_CMD_TIMEOUT 10000
10952
10953static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10954{
10955 int i;
10956
10957 tw32(NVRAM_CMD, nvram_cmd);
10958 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10959 udelay(10);
10960 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10961 udelay(10);
10962 break;
10963 }
10964 }
10965 if (i == NVRAM_CMD_TIMEOUT) {
10966 return -EBUSY;
10967 }
10968 return 0;
10969}
10970
Michael Chan18201802006-03-20 22:29:15 -080010971static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10972{
10973 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10974 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10975 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
Matt Carlson6b91fa02007-10-10 18:01:09 -070010976 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
Michael Chan18201802006-03-20 22:29:15 -080010977 (tp->nvram_jedecnum == JEDEC_ATMEL))
10978
10979 addr = ((addr / tp->nvram_pagesize) <<
10980 ATMEL_AT45DB0X1B_PAGE_POS) +
10981 (addr % tp->nvram_pagesize);
10982
10983 return addr;
10984}
10985
Michael Chanc4e65752006-03-20 22:29:32 -080010986static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10987{
10988 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10989 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10990 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
Matt Carlson6b91fa02007-10-10 18:01:09 -070010991 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
Michael Chanc4e65752006-03-20 22:29:32 -080010992 (tp->nvram_jedecnum == JEDEC_ATMEL))
10993
10994 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10995 tp->nvram_pagesize) +
10996 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10997
10998 return addr;
10999}
11000
Linus Torvalds1da177e2005-04-16 15:20:36 -070011001static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
11002{
11003 int ret;
11004
Linus Torvalds1da177e2005-04-16 15:20:36 -070011005 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
11006 return tg3_nvram_read_using_eeprom(tp, offset, val);
11007
Michael Chan18201802006-03-20 22:29:15 -080011008 offset = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011009
11010 if (offset > NVRAM_ADDR_MSK)
11011 return -EINVAL;
11012
Michael Chanec41c7d2006-01-17 02:40:55 -080011013 ret = tg3_nvram_lock(tp);
11014 if (ret)
11015 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011016
Michael Chane6af3012005-04-21 17:12:05 -070011017 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011018
11019 tw32(NVRAM_ADDR, offset);
11020 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
11021 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
11022
11023 if (ret == 0)
11024 *val = swab32(tr32(NVRAM_RDDATA));
11025
Michael Chane6af3012005-04-21 17:12:05 -070011026 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011027
Michael Chan381291b2005-12-13 21:08:21 -080011028 tg3_nvram_unlock(tp);
11029
Linus Torvalds1da177e2005-04-16 15:20:36 -070011030 return ret;
11031}
11032
Al Virob9fc7dc2007-12-17 22:59:57 -080011033static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
11034{
11035 u32 v;
11036 int res = tg3_nvram_read(tp, offset, &v);
11037 if (!res)
11038 *val = cpu_to_le32(v);
11039 return res;
11040}
11041
Michael Chan18201802006-03-20 22:29:15 -080011042static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
11043{
11044 int err;
11045 u32 tmp;
11046
11047 err = tg3_nvram_read(tp, offset, &tmp);
11048 *val = swab32(tmp);
11049 return err;
11050}
11051
Linus Torvalds1da177e2005-04-16 15:20:36 -070011052static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11053 u32 offset, u32 len, u8 *buf)
11054{
11055 int i, j, rc = 0;
11056 u32 val;
11057
11058 for (i = 0; i < len; i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011059 u32 addr;
11060 __le32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011061
11062 addr = offset + i;
11063
11064 memcpy(&data, buf + i, 4);
11065
Al Virob9fc7dc2007-12-17 22:59:57 -080011066 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011067
11068 val = tr32(GRC_EEPROM_ADDR);
11069 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11070
11071 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11072 EEPROM_ADDR_READ);
11073 tw32(GRC_EEPROM_ADDR, val |
11074 (0 << EEPROM_ADDR_DEVID_SHIFT) |
11075 (addr & EEPROM_ADDR_ADDR_MASK) |
11076 EEPROM_ADDR_START |
11077 EEPROM_ADDR_WRITE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011078
Michael Chan9d57f012006-12-07 00:23:25 -080011079 for (j = 0; j < 1000; j++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011080 val = tr32(GRC_EEPROM_ADDR);
11081
11082 if (val & EEPROM_ADDR_COMPLETE)
11083 break;
Michael Chan9d57f012006-12-07 00:23:25 -080011084 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011085 }
11086 if (!(val & EEPROM_ADDR_COMPLETE)) {
11087 rc = -EBUSY;
11088 break;
11089 }
11090 }
11091
11092 return rc;
11093}
11094
11095/* offset and length are dword aligned */
11096static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11097 u8 *buf)
11098{
11099 int ret = 0;
11100 u32 pagesize = tp->nvram_pagesize;
11101 u32 pagemask = pagesize - 1;
11102 u32 nvram_cmd;
11103 u8 *tmp;
11104
11105 tmp = kmalloc(pagesize, GFP_KERNEL);
11106 if (tmp == NULL)
11107 return -ENOMEM;
11108
11109 while (len) {
11110 int j;
Michael Chane6af3012005-04-21 17:12:05 -070011111 u32 phy_addr, page_off, size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011112
11113 phy_addr = offset & ~pagemask;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011114
Linus Torvalds1da177e2005-04-16 15:20:36 -070011115 for (j = 0; j < pagesize; j += 4) {
Al Viro286e3102007-12-17 23:00:31 -080011116 if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
Al Virob9fc7dc2007-12-17 22:59:57 -080011117 (__le32 *) (tmp + j))))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011118 break;
11119 }
11120 if (ret)
11121 break;
11122
11123 page_off = offset & pagemask;
11124 size = pagesize;
11125 if (len < size)
11126 size = len;
11127
11128 len -= size;
11129
11130 memcpy(tmp + page_off, buf, size);
11131
11132 offset = offset + (pagesize - page_off);
11133
Michael Chane6af3012005-04-21 17:12:05 -070011134 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011135
11136 /*
11137 * Before we can erase the flash page, we need
11138 * to issue a special "write enable" command.
11139 */
11140 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11141
11142 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11143 break;
11144
11145 /* Erase the target page */
11146 tw32(NVRAM_ADDR, phy_addr);
11147
11148 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11149 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11150
11151 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11152 break;
11153
11154 /* Issue another write enable to start the write. */
11155 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11156
11157 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11158 break;
11159
11160 for (j = 0; j < pagesize; j += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011161 __be32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011162
Al Virob9fc7dc2007-12-17 22:59:57 -080011163 data = *((__be32 *) (tmp + j));
11164 /* swab32(le32_to_cpu(data)), actually */
11165 tw32(NVRAM_WRDATA, be32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011166
11167 tw32(NVRAM_ADDR, phy_addr + j);
11168
11169 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11170 NVRAM_CMD_WR;
11171
11172 if (j == 0)
11173 nvram_cmd |= NVRAM_CMD_FIRST;
11174 else if (j == (pagesize - 4))
11175 nvram_cmd |= NVRAM_CMD_LAST;
11176
11177 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11178 break;
11179 }
11180 if (ret)
11181 break;
11182 }
11183
11184 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11185 tg3_nvram_exec_cmd(tp, nvram_cmd);
11186
11187 kfree(tmp);
11188
11189 return ret;
11190}
11191
11192/* offset and length are dword aligned */
11193static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11194 u8 *buf)
11195{
11196 int i, ret = 0;
11197
11198 for (i = 0; i < len; i += 4, offset += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011199 u32 page_off, phy_addr, nvram_cmd;
11200 __be32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011201
11202 memcpy(&data, buf + i, 4);
Al Virob9fc7dc2007-12-17 22:59:57 -080011203 tw32(NVRAM_WRDATA, be32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011204
11205 page_off = offset % tp->nvram_pagesize;
11206
Michael Chan18201802006-03-20 22:29:15 -080011207 phy_addr = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011208
11209 tw32(NVRAM_ADDR, phy_addr);
11210
11211 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11212
11213 if ((page_off == 0) || (i == 0))
11214 nvram_cmd |= NVRAM_CMD_FIRST;
Michael Chanf6d9a252006-04-29 19:00:24 -070011215 if (page_off == (tp->nvram_pagesize - 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011216 nvram_cmd |= NVRAM_CMD_LAST;
11217
11218 if (i == (len - 4))
11219 nvram_cmd |= NVRAM_CMD_LAST;
11220
Michael Chan4c987482005-09-05 17:52:38 -070011221 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
Michael Chanaf36e6b2006-03-23 01:28:06 -080011222 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
Michael Chan1b277772006-03-20 22:27:48 -080011223 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
Matt Carlsond30cdd22007-10-07 23:28:35 -070011224 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
Matt Carlson9936bcf2007-10-10 18:03:07 -070011225 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
Matt Carlson57e69832008-05-25 23:48:31 -070011226 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
Michael Chan4c987482005-09-05 17:52:38 -070011227 (tp->nvram_jedecnum == JEDEC_ST) &&
11228 (nvram_cmd & NVRAM_CMD_FIRST)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011229
11230 if ((ret = tg3_nvram_exec_cmd(tp,
11231 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11232 NVRAM_CMD_DONE)))
11233
11234 break;
11235 }
11236 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11237 /* We always do complete word writes to eeprom. */
11238 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11239 }
11240
11241 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11242 break;
11243 }
11244 return ret;
11245}
11246
11247/* offset and length are dword aligned */
11248static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11249{
11250 int ret;
11251
Linus Torvalds1da177e2005-04-16 15:20:36 -070011252 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -070011253 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11254 ~GRC_LCLCTRL_GPIO_OUTPUT1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011255 udelay(40);
11256 }
11257
11258 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11259 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11260 }
11261 else {
11262 u32 grc_mode;
11263
Michael Chanec41c7d2006-01-17 02:40:55 -080011264 ret = tg3_nvram_lock(tp);
11265 if (ret)
11266 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011267
Michael Chane6af3012005-04-21 17:12:05 -070011268 tg3_enable_nvram_access(tp);
11269 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11270 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011271 tw32(NVRAM_WRITE1, 0x406);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011272
11273 grc_mode = tr32(GRC_MODE);
11274 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11275
11276 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11277 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11278
11279 ret = tg3_nvram_write_block_buffered(tp, offset, len,
11280 buf);
11281 }
11282 else {
11283 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11284 buf);
11285 }
11286
11287 grc_mode = tr32(GRC_MODE);
11288 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11289
Michael Chane6af3012005-04-21 17:12:05 -070011290 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011291 tg3_nvram_unlock(tp);
11292 }
11293
11294 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -070011295 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011296 udelay(40);
11297 }
11298
11299 return ret;
11300}
11301
11302struct subsys_tbl_ent {
11303 u16 subsys_vendor, subsys_devid;
11304 u32 phy_id;
11305};
11306
11307static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11308 /* Broadcom boards. */
11309 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11310 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11311 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11312 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
11313 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11314 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11315 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
11316 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11317 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11318 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11319 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11320
11321 /* 3com boards. */
11322 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11323 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11324 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
11325 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11326 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11327
11328 /* DELL boards. */
11329 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11330 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11331 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11332 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11333
11334 /* Compaq boards. */
11335 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11336 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11337 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
11338 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11339 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11340
11341 /* IBM boards. */
11342 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11343};
11344
11345static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11346{
11347 int i;
11348
11349 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11350 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11351 tp->pdev->subsystem_vendor) &&
11352 (subsys_id_to_phy_id[i].subsys_devid ==
11353 tp->pdev->subsystem_device))
11354 return &subsys_id_to_phy_id[i];
11355 }
11356 return NULL;
11357}
11358
Michael Chan7d0c41e2005-04-21 17:06:20 -070011359static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011360{
Linus Torvalds1da177e2005-04-16 15:20:36 -070011361 u32 val;
Michael Chancaf636c72006-03-22 01:05:31 -080011362 u16 pmcsr;
11363
11364 /* On some early chips the SRAM cannot be accessed in D3hot state,
11365 * so need make sure we're in D0.
11366 */
11367 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11368 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11369 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11370 msleep(1);
Michael Chan7d0c41e2005-04-21 17:06:20 -070011371
11372 /* Make sure register accesses (indirect or otherwise)
11373 * will function correctly.
11374 */
11375 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11376 tp->misc_host_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011377
David S. Millerf49639e2006-06-09 11:58:36 -070011378 /* The memory arbiter has to be enabled in order for SRAM accesses
11379 * to succeed. Normally on powerup the tg3 chip firmware will make
11380 * sure it is enabled, but other entities such as system netboot
11381 * code might disable it.
11382 */
11383 val = tr32(MEMARB_MODE);
11384 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11385
Linus Torvalds1da177e2005-04-16 15:20:36 -070011386 tp->phy_id = PHY_ID_INVALID;
Michael Chan7d0c41e2005-04-21 17:06:20 -070011387 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11388
Gary Zambranoa85feb82007-05-05 11:52:19 -070011389 /* Assume an onboard device and WOL capable by default. */
11390 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
David S. Miller72b845e2006-03-14 14:11:48 -080011391
Michael Chanb5d37722006-09-27 16:06:21 -070011392 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan9d26e212006-12-07 00:21:14 -080011393 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
Michael Chanb5d37722006-09-27 16:06:21 -070011394 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011395 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11396 }
Matt Carlson0527ba32007-10-10 18:03:30 -070011397 val = tr32(VCPU_CFGSHDW);
11398 if (val & VCPU_CFGSHDW_ASPM_DBNC)
Matt Carlson8ed5d972007-05-07 00:25:49 -070011399 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
Matt Carlson0527ba32007-10-10 18:03:30 -070011400 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070011401 (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11402 device_may_wakeup(&tp->pdev->dev))
Matt Carlson0527ba32007-10-10 18:03:30 -070011403 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
Matt Carlson05ac4cb2008-11-03 16:53:46 -080011404 goto done;
Michael Chanb5d37722006-09-27 16:06:21 -070011405 }
11406
Linus Torvalds1da177e2005-04-16 15:20:36 -070011407 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11408 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11409 u32 nic_cfg, led_cfg;
Matt Carlsona9daf362008-05-25 23:49:44 -070011410 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
Michael Chan7d0c41e2005-04-21 17:06:20 -070011411 int eeprom_phy_serdes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011412
11413 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11414 tp->nic_sram_data_cfg = nic_cfg;
11415
11416 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11417 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11418 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11419 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11420 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11421 (ver > 0) && (ver < 0x100))
11422 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11423
Matt Carlsona9daf362008-05-25 23:49:44 -070011424 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11425 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11426
Linus Torvalds1da177e2005-04-16 15:20:36 -070011427 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11428 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11429 eeprom_phy_serdes = 1;
11430
11431 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11432 if (nic_phy_id != 0) {
11433 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11434 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11435
11436 eeprom_phy_id = (id1 >> 16) << 10;
11437 eeprom_phy_id |= (id2 & 0xfc00) << 16;
11438 eeprom_phy_id |= (id2 & 0x03ff) << 0;
11439 } else
11440 eeprom_phy_id = 0;
11441
Michael Chan7d0c41e2005-04-21 17:06:20 -070011442 tp->phy_id = eeprom_phy_id;
Michael Chan747e8f82005-07-25 12:33:22 -070011443 if (eeprom_phy_serdes) {
Michael Chana4e2b342005-10-26 15:46:52 -070011444 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan747e8f82005-07-25 12:33:22 -070011445 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11446 else
11447 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11448 }
Michael Chan7d0c41e2005-04-21 17:06:20 -070011449
John W. Linvillecbf46852005-04-21 17:01:29 -070011450 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011451 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11452 SHASTA_EXT_LED_MODE_MASK);
John W. Linvillecbf46852005-04-21 17:01:29 -070011453 else
Linus Torvalds1da177e2005-04-16 15:20:36 -070011454 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11455
11456 switch (led_cfg) {
11457 default:
11458 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11459 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11460 break;
11461
11462 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11463 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11464 break;
11465
11466 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11467 tp->led_ctrl = LED_CTRL_MODE_MAC;
Michael Chan9ba27792005-06-06 15:16:20 -070011468
11469 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11470 * read on some older 5700/5701 bootcode.
11471 */
11472 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11473 ASIC_REV_5700 ||
11474 GET_ASIC_REV(tp->pci_chip_rev_id) ==
11475 ASIC_REV_5701)
11476 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11477
Linus Torvalds1da177e2005-04-16 15:20:36 -070011478 break;
11479
11480 case SHASTA_EXT_LED_SHARED:
11481 tp->led_ctrl = LED_CTRL_MODE_SHARED;
11482 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11483 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11484 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11485 LED_CTRL_MODE_PHY_2);
11486 break;
11487
11488 case SHASTA_EXT_LED_MAC:
11489 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11490 break;
11491
11492 case SHASTA_EXT_LED_COMBO:
11493 tp->led_ctrl = LED_CTRL_MODE_COMBO;
11494 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11495 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11496 LED_CTRL_MODE_PHY_2);
11497 break;
11498
Stephen Hemminger855e1112008-04-16 16:37:28 -070011499 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011500
11501 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11502 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11503 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11504 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11505
Matt Carlsonb2a5c192008-04-03 21:44:44 -070011506 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11507 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
Matt Carlson5f608912007-11-12 21:17:07 -080011508
Michael Chan9d26e212006-12-07 00:21:14 -080011509 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011510 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011511 if ((tp->pdev->subsystem_vendor ==
11512 PCI_VENDOR_ID_ARIMA) &&
11513 (tp->pdev->subsystem_device == 0x205a ||
11514 tp->pdev->subsystem_device == 0x2063))
11515 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11516 } else {
David S. Millerf49639e2006-06-09 11:58:36 -070011517 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011518 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11519 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011520
11521 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11522 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
John W. Linvillecbf46852005-04-21 17:01:29 -070011523 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011524 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11525 }
Matt Carlsonb2b98d42008-11-03 16:52:32 -080011526
11527 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11528 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
Matt Carlson0d3031d2007-10-10 18:02:43 -070011529 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
Matt Carlsonb2b98d42008-11-03 16:52:32 -080011530
Gary Zambranoa85feb82007-05-05 11:52:19 -070011531 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11532 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11533 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011534
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070011535 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
Matt Carlson05ac4cb2008-11-03 16:53:46 -080011536 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
Matt Carlson0527ba32007-10-10 18:03:30 -070011537 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11538
Linus Torvalds1da177e2005-04-16 15:20:36 -070011539 if (cfg2 & (1 << 17))
11540 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11541
11542 /* serdes signal pre-emphasis in register 0x590 set by */
11543 /* bootcode if bit 18 is set */
11544 if (cfg2 & (1 << 18))
11545 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
Matt Carlson8ed5d972007-05-07 00:25:49 -070011546
11547 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11548 u32 cfg3;
11549
11550 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11551 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11552 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11553 }
Matt Carlsona9daf362008-05-25 23:49:44 -070011554
11555 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11556 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11557 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11558 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11559 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11560 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011561 }
Matt Carlson05ac4cb2008-11-03 16:53:46 -080011562done:
11563 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
11564 device_set_wakeup_enable(&tp->pdev->dev,
11565 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
Michael Chan7d0c41e2005-04-21 17:06:20 -070011566}
11567
Matt Carlsonb2a5c192008-04-03 21:44:44 -070011568static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11569{
11570 int i;
11571 u32 val;
11572
11573 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11574 tw32(OTP_CTRL, cmd);
11575
11576 /* Wait for up to 1 ms for command to execute. */
11577 for (i = 0; i < 100; i++) {
11578 val = tr32(OTP_STATUS);
11579 if (val & OTP_STATUS_CMD_DONE)
11580 break;
11581 udelay(10);
11582 }
11583
11584 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11585}
11586
11587/* Read the gphy configuration from the OTP region of the chip. The gphy
11588 * configuration is a 32-bit value that straddles the alignment boundary.
11589 * We do two 32-bit reads and then shift and merge the results.
11590 */
11591static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11592{
11593 u32 bhalf_otp, thalf_otp;
11594
11595 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11596
11597 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11598 return 0;
11599
11600 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11601
11602 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11603 return 0;
11604
11605 thalf_otp = tr32(OTP_READ_DATA);
11606
11607 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11608
11609 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11610 return 0;
11611
11612 bhalf_otp = tr32(OTP_READ_DATA);
11613
11614 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11615}
11616
Michael Chan7d0c41e2005-04-21 17:06:20 -070011617static int __devinit tg3_phy_probe(struct tg3 *tp)
11618{
11619 u32 hw_phy_id_1, hw_phy_id_2;
11620 u32 hw_phy_id, hw_phy_id_masked;
11621 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011622
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011623 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11624 return tg3_phy_init(tp);
11625
Linus Torvalds1da177e2005-04-16 15:20:36 -070011626 /* Reading the PHY ID register can conflict with ASF
11627 * firwmare access to the PHY hardware.
11628 */
11629 err = 0;
Matt Carlson0d3031d2007-10-10 18:02:43 -070011630 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11631 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011632 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11633 } else {
11634 /* Now read the physical PHY_ID from the chip and verify
11635 * that it is sane. If it doesn't look good, we fall back
11636 * to either the hard-coded table based PHY_ID and failing
11637 * that the value found in the eeprom area.
11638 */
11639 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11640 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11641
11642 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
11643 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11644 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
11645
11646 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11647 }
11648
11649 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11650 tp->phy_id = hw_phy_id;
11651 if (hw_phy_id_masked == PHY_ID_BCM8002)
11652 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
Michael Chanda6b2d02005-08-19 12:54:29 -070011653 else
11654 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011655 } else {
Michael Chan7d0c41e2005-04-21 17:06:20 -070011656 if (tp->phy_id != PHY_ID_INVALID) {
11657 /* Do nothing, phy ID already set up in
11658 * tg3_get_eeprom_hw_cfg().
11659 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070011660 } else {
11661 struct subsys_tbl_ent *p;
11662
11663 /* No eeprom signature? Try the hardcoded
11664 * subsys device table.
11665 */
11666 p = lookup_by_subsys(tp);
11667 if (!p)
11668 return -ENODEV;
11669
11670 tp->phy_id = p->phy_id;
11671 if (!tp->phy_id ||
11672 tp->phy_id == PHY_ID_BCM8002)
11673 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11674 }
11675 }
11676
Michael Chan747e8f82005-07-25 12:33:22 -070011677 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
Matt Carlson0d3031d2007-10-10 18:02:43 -070011678 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070011679 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chan3600d912006-12-07 00:21:48 -080011680 u32 bmsr, adv_reg, tg3_ctrl, mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011681
11682 tg3_readphy(tp, MII_BMSR, &bmsr);
11683 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11684 (bmsr & BMSR_LSTATUS))
11685 goto skip_phy_reset;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011686
Linus Torvalds1da177e2005-04-16 15:20:36 -070011687 err = tg3_phy_reset(tp);
11688 if (err)
11689 return err;
11690
11691 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11692 ADVERTISE_100HALF | ADVERTISE_100FULL |
11693 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11694 tg3_ctrl = 0;
11695 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11696 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11697 MII_TG3_CTRL_ADV_1000_FULL);
11698 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11699 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11700 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11701 MII_TG3_CTRL_ENABLE_AS_MASTER);
11702 }
11703
Michael Chan3600d912006-12-07 00:21:48 -080011704 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11705 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11706 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11707 if (!tg3_copper_is_advertising_all(tp, mask)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011708 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11709
11710 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11711 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11712
11713 tg3_writephy(tp, MII_BMCR,
11714 BMCR_ANENABLE | BMCR_ANRESTART);
11715 }
11716 tg3_phy_set_wirespeed(tp);
11717
11718 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11719 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11720 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11721 }
11722
11723skip_phy_reset:
11724 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11725 err = tg3_init_5401phy_dsp(tp);
11726 if (err)
11727 return err;
11728 }
11729
11730 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11731 err = tg3_init_5401phy_dsp(tp);
11732 }
11733
Michael Chan747e8f82005-07-25 12:33:22 -070011734 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011735 tp->link_config.advertising =
11736 (ADVERTISED_1000baseT_Half |
11737 ADVERTISED_1000baseT_Full |
11738 ADVERTISED_Autoneg |
11739 ADVERTISED_FIBRE);
11740 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11741 tp->link_config.advertising &=
11742 ~(ADVERTISED_1000baseT_Half |
11743 ADVERTISED_1000baseT_Full);
11744
11745 return err;
11746}
11747
11748static void __devinit tg3_read_partno(struct tg3 *tp)
11749{
11750 unsigned char vpd_data[256];
Michael Chanaf2c6a42006-11-07 14:57:51 -080011751 unsigned int i;
Michael Chan1b277772006-03-20 22:27:48 -080011752 u32 magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011753
Michael Chan18201802006-03-20 22:29:15 -080011754 if (tg3_nvram_read_swab(tp, 0x0, &magic))
David S. Millerf49639e2006-06-09 11:58:36 -070011755 goto out_not_found;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011756
Michael Chan18201802006-03-20 22:29:15 -080011757 if (magic == TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -080011758 for (i = 0; i < 256; i += 4) {
11759 u32 tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011760
Michael Chan1b277772006-03-20 22:27:48 -080011761 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11762 goto out_not_found;
11763
11764 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
11765 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
11766 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11767 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11768 }
11769 } else {
11770 int vpd_cap;
11771
11772 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11773 for (i = 0; i < 256; i += 4) {
11774 u32 tmp, j = 0;
Al Virob9fc7dc2007-12-17 22:59:57 -080011775 __le32 v;
Michael Chan1b277772006-03-20 22:27:48 -080011776 u16 tmp16;
11777
11778 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11779 i);
11780 while (j++ < 100) {
11781 pci_read_config_word(tp->pdev, vpd_cap +
11782 PCI_VPD_ADDR, &tmp16);
11783 if (tmp16 & 0x8000)
11784 break;
11785 msleep(1);
11786 }
David S. Millerf49639e2006-06-09 11:58:36 -070011787 if (!(tmp16 & 0x8000))
11788 goto out_not_found;
11789
Michael Chan1b277772006-03-20 22:27:48 -080011790 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11791 &tmp);
Al Virob9fc7dc2007-12-17 22:59:57 -080011792 v = cpu_to_le32(tmp);
11793 memcpy(&vpd_data[i], &v, 4);
Michael Chan1b277772006-03-20 22:27:48 -080011794 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011795 }
11796
11797 /* Now parse and find the part number. */
Michael Chanaf2c6a42006-11-07 14:57:51 -080011798 for (i = 0; i < 254; ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011799 unsigned char val = vpd_data[i];
Michael Chanaf2c6a42006-11-07 14:57:51 -080011800 unsigned int block_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011801
11802 if (val == 0x82 || val == 0x91) {
11803 i = (i + 3 +
11804 (vpd_data[i + 1] +
11805 (vpd_data[i + 2] << 8)));
11806 continue;
11807 }
11808
11809 if (val != 0x90)
11810 goto out_not_found;
11811
11812 block_end = (i + 3 +
11813 (vpd_data[i + 1] +
11814 (vpd_data[i + 2] << 8)));
11815 i += 3;
Michael Chanaf2c6a42006-11-07 14:57:51 -080011816
11817 if (block_end > 256)
11818 goto out_not_found;
11819
11820 while (i < (block_end - 2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011821 if (vpd_data[i + 0] == 'P' &&
11822 vpd_data[i + 1] == 'N') {
11823 int partno_len = vpd_data[i + 2];
11824
Michael Chanaf2c6a42006-11-07 14:57:51 -080011825 i += 3;
11826 if (partno_len > 24 || (partno_len + i) > 256)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011827 goto out_not_found;
11828
11829 memcpy(tp->board_part_number,
Michael Chanaf2c6a42006-11-07 14:57:51 -080011830 &vpd_data[i], partno_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011831
11832 /* Success. */
11833 return;
11834 }
Michael Chanaf2c6a42006-11-07 14:57:51 -080011835 i += 3 + vpd_data[i + 2];
Linus Torvalds1da177e2005-04-16 15:20:36 -070011836 }
11837
11838 /* Part number not found. */
11839 goto out_not_found;
11840 }
11841
11842out_not_found:
Michael Chanb5d37722006-09-27 16:06:21 -070011843 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11844 strcpy(tp->board_part_number, "BCM95906");
11845 else
11846 strcpy(tp->board_part_number, "none");
Linus Torvalds1da177e2005-04-16 15:20:36 -070011847}
11848
Matt Carlson9c8a6202007-10-21 16:16:08 -070011849static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11850{
11851 u32 val;
11852
11853 if (tg3_nvram_read_swab(tp, offset, &val) ||
11854 (val & 0xfc000000) != 0x0c000000 ||
11855 tg3_nvram_read_swab(tp, offset + 4, &val) ||
11856 val != 0)
11857 return 0;
11858
11859 return 1;
11860}
11861
Michael Chanc4e65752006-03-20 22:29:32 -080011862static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11863{
11864 u32 val, offset, start;
Matt Carlson9c8a6202007-10-21 16:16:08 -070011865 u32 ver_offset;
11866 int i, bcnt;
Michael Chanc4e65752006-03-20 22:29:32 -080011867
11868 if (tg3_nvram_read_swab(tp, 0, &val))
11869 return;
11870
11871 if (val != TG3_EEPROM_MAGIC)
11872 return;
11873
11874 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11875 tg3_nvram_read_swab(tp, 0x4, &start))
11876 return;
11877
11878 offset = tg3_nvram_logical_addr(tp, offset);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011879
11880 if (!tg3_fw_img_is_valid(tp, offset) ||
11881 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
Michael Chanc4e65752006-03-20 22:29:32 -080011882 return;
11883
Matt Carlson9c8a6202007-10-21 16:16:08 -070011884 offset = offset + ver_offset - start;
11885 for (i = 0; i < 16; i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011886 __le32 v;
11887 if (tg3_nvram_read_le(tp, offset + i, &v))
Michael Chanc4e65752006-03-20 22:29:32 -080011888 return;
11889
Al Virob9fc7dc2007-12-17 22:59:57 -080011890 memcpy(tp->fw_ver + i, &v, 4);
Michael Chanc4e65752006-03-20 22:29:32 -080011891 }
Matt Carlson9c8a6202007-10-21 16:16:08 -070011892
11893 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
Matt Carlson84af67f2007-11-12 21:08:59 -080011894 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
Matt Carlson9c8a6202007-10-21 16:16:08 -070011895 return;
11896
11897 for (offset = TG3_NVM_DIR_START;
11898 offset < TG3_NVM_DIR_END;
11899 offset += TG3_NVM_DIRENT_SIZE) {
11900 if (tg3_nvram_read_swab(tp, offset, &val))
11901 return;
11902
11903 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11904 break;
11905 }
11906
11907 if (offset == TG3_NVM_DIR_END)
11908 return;
11909
11910 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11911 start = 0x08000000;
11912 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11913 return;
11914
11915 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11916 !tg3_fw_img_is_valid(tp, offset) ||
11917 tg3_nvram_read_swab(tp, offset + 8, &val))
11918 return;
11919
11920 offset += val - start;
11921
11922 bcnt = strlen(tp->fw_ver);
11923
11924 tp->fw_ver[bcnt++] = ',';
11925 tp->fw_ver[bcnt++] = ' ';
11926
11927 for (i = 0; i < 4; i++) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011928 __le32 v;
11929 if (tg3_nvram_read_le(tp, offset, &v))
Matt Carlson9c8a6202007-10-21 16:16:08 -070011930 return;
11931
Al Virob9fc7dc2007-12-17 22:59:57 -080011932 offset += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011933
Al Virob9fc7dc2007-12-17 22:59:57 -080011934 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11935 memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011936 break;
11937 }
11938
Al Virob9fc7dc2007-12-17 22:59:57 -080011939 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11940 bcnt += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011941 }
11942
11943 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
Michael Chanc4e65752006-03-20 22:29:32 -080011944}
11945
Michael Chan7544b092007-05-05 13:08:32 -070011946static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11947
Linus Torvalds1da177e2005-04-16 15:20:36 -070011948static int __devinit tg3_get_invariants(struct tg3 *tp)
11949{
11950 static struct pci_device_id write_reorder_chipsets[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011951 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11952 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
John W. Linvillec165b002006-07-08 13:28:53 -070011953 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11954 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
Michael Chan399de502005-10-03 14:02:39 -070011955 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11956 PCI_DEVICE_ID_VIA_8385_0) },
Linus Torvalds1da177e2005-04-16 15:20:36 -070011957 { },
11958 };
11959 u32 misc_ctrl_reg;
11960 u32 cacheline_sz_reg;
11961 u32 pci_state_reg, grc_misc_cfg;
11962 u32 val;
11963 u16 pci_cmd;
Michael Chanc7835a72006-11-15 21:14:42 -080011964 int err, pcie_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011965
Linus Torvalds1da177e2005-04-16 15:20:36 -070011966 /* Force memory write invalidate off. If we leave it on,
11967 * then on 5700_BX chips we have to enable a workaround.
11968 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11969 * to match the cacheline size. The Broadcom driver have this
11970 * workaround but turns MWI off all the times so never uses
11971 * it. This seems to suggest that the workaround is insufficient.
11972 */
11973 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11974 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11975 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11976
11977 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11978 * has the register indirect write enable bit set before
11979 * we try to access any of the MMIO registers. It is also
11980 * critical that the PCI-X hw workaround situation is decided
11981 * before that as well.
11982 */
11983 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11984 &misc_ctrl_reg);
11985
11986 tp->pci_chip_rev_id = (misc_ctrl_reg >>
11987 MISC_HOST_CTRL_CHIPREV_SHIFT);
Matt Carlson795d01c2007-10-07 23:28:17 -070011988 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11989 u32 prod_id_asic_rev;
11990
11991 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11992 &prod_id_asic_rev);
11993 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11994 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011995
Michael Chanff645be2005-04-21 17:09:53 -070011996 /* Wrong chip ID in 5752 A0. This code can be removed later
11997 * as A0 is not in production.
11998 */
11999 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12000 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12001
Michael Chan68929142005-08-09 20:17:14 -070012002 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12003 * we need to disable memory and use config. cycles
12004 * only to access all registers. The 5702/03 chips
12005 * can mistakenly decode the special cycles from the
12006 * ICH chipsets as memory write cycles, causing corruption
12007 * of register and memory space. Only certain ICH bridges
12008 * will drive special cycles with non-zero data during the
12009 * address phase which can fall within the 5703's address
12010 * range. This is not an ICH bug as the PCI spec allows
12011 * non-zero address during special cycles. However, only
12012 * these ICH bridges are known to drive non-zero addresses
12013 * during special cycles.
12014 *
12015 * Since special cycles do not cross PCI bridges, we only
12016 * enable this workaround if the 5703 is on the secondary
12017 * bus of these ICH bridges.
12018 */
12019 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12020 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12021 static struct tg3_dev_id {
12022 u32 vendor;
12023 u32 device;
12024 u32 rev;
12025 } ich_chipsets[] = {
12026 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12027 PCI_ANY_ID },
12028 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12029 PCI_ANY_ID },
12030 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12031 0xa },
12032 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12033 PCI_ANY_ID },
12034 { },
12035 };
12036 struct tg3_dev_id *pci_id = &ich_chipsets[0];
12037 struct pci_dev *bridge = NULL;
12038
12039 while (pci_id->vendor != 0) {
12040 bridge = pci_get_device(pci_id->vendor, pci_id->device,
12041 bridge);
12042 if (!bridge) {
12043 pci_id++;
12044 continue;
12045 }
12046 if (pci_id->rev != PCI_ANY_ID) {
Auke Kok44c10132007-06-08 15:46:36 -070012047 if (bridge->revision > pci_id->rev)
Michael Chan68929142005-08-09 20:17:14 -070012048 continue;
12049 }
12050 if (bridge->subordinate &&
12051 (bridge->subordinate->number ==
12052 tp->pdev->bus->number)) {
12053
12054 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
12055 pci_dev_put(bridge);
12056 break;
12057 }
12058 }
12059 }
12060
Matt Carlson41588ba2008-04-19 18:12:33 -070012061 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
12062 static struct tg3_dev_id {
12063 u32 vendor;
12064 u32 device;
12065 } bridge_chipsets[] = {
12066 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12067 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12068 { },
12069 };
12070 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12071 struct pci_dev *bridge = NULL;
12072
12073 while (pci_id->vendor != 0) {
12074 bridge = pci_get_device(pci_id->vendor,
12075 pci_id->device,
12076 bridge);
12077 if (!bridge) {
12078 pci_id++;
12079 continue;
12080 }
12081 if (bridge->subordinate &&
12082 (bridge->subordinate->number <=
12083 tp->pdev->bus->number) &&
12084 (bridge->subordinate->subordinate >=
12085 tp->pdev->bus->number)) {
12086 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12087 pci_dev_put(bridge);
12088 break;
12089 }
12090 }
12091 }
12092
Michael Chan4a29cc22006-03-19 13:21:12 -080012093 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12094 * DMA addresses > 40-bit. This bridge may have other additional
12095 * 57xx devices behind it in some 4-port NIC designs for example.
12096 * Any tg3 device found behind the bridge will also need the 40-bit
12097 * DMA workaround.
12098 */
Michael Chana4e2b342005-10-26 15:46:52 -070012099 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12100 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12101 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
Michael Chan4a29cc22006-03-19 13:21:12 -080012102 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
Michael Chan4cf78e42005-07-25 12:29:19 -070012103 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
Michael Chana4e2b342005-10-26 15:46:52 -070012104 }
Michael Chan4a29cc22006-03-19 13:21:12 -080012105 else {
12106 struct pci_dev *bridge = NULL;
12107
12108 do {
12109 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12110 PCI_DEVICE_ID_SERVERWORKS_EPB,
12111 bridge);
12112 if (bridge && bridge->subordinate &&
12113 (bridge->subordinate->number <=
12114 tp->pdev->bus->number) &&
12115 (bridge->subordinate->subordinate >=
12116 tp->pdev->bus->number)) {
12117 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12118 pci_dev_put(bridge);
12119 break;
12120 }
12121 } while (bridge);
12122 }
Michael Chan4cf78e42005-07-25 12:29:19 -070012123
Linus Torvalds1da177e2005-04-16 15:20:36 -070012124 /* Initialize misc host control in PCI block. */
12125 tp->misc_host_ctrl |= (misc_ctrl_reg &
12126 MISC_HOST_CTRL_CHIPREV);
12127 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12128 tp->misc_host_ctrl);
12129
12130 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12131 &cacheline_sz_reg);
12132
12133 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
12134 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
12135 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
12136 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
12137
Michael Chan7544b092007-05-05 13:08:32 -070012138 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12139 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12140 tp->pdev_peer = tg3_find_peer(tp);
12141
John W. Linville2052da92005-04-21 16:56:08 -070012142 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
Michael Chan4cf78e42005-07-25 12:29:19 -070012143 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -080012144 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chand9ab5ad2006-03-20 22:27:35 -080012145 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012146 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012147 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012148 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012149 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
Michael Chana4e2b342005-10-26 15:46:52 -070012150 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
John W. Linville6708e5c2005-04-21 17:00:52 -070012151 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12152
John W. Linville1b440c562005-04-21 17:03:18 -070012153 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12154 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12155 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12156
Michael Chan5a6f3072006-03-20 22:28:05 -080012157 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Michael Chan7544b092007-05-05 13:08:32 -070012158 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12159 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12160 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12161 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12162 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12163 tp->pdev_peer == tp->pdev))
12164 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12165
Michael Chanaf36e6b2006-03-23 01:28:06 -080012166 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012167 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012168 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012169 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012170 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012171 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan5a6f3072006-03-20 22:28:05 -080012172 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
Michael Chanfcfa0a32006-03-20 22:28:41 -080012173 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
Michael Chan52c0fd82006-06-29 20:15:54 -070012174 } else {
Michael Chan7f62ad52007-02-20 23:25:40 -080012175 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
Michael Chan52c0fd82006-06-29 20:15:54 -070012176 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12177 ASIC_REV_5750 &&
12178 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
Michael Chan7f62ad52007-02-20 23:25:40 -080012179 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
Michael Chan52c0fd82006-06-29 20:15:54 -070012180 }
Michael Chan5a6f3072006-03-20 22:28:05 -080012181 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012182
Matt Carlsonf51f3562008-05-25 23:45:08 -070012183 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12184 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan0f893dc2005-07-25 12:30:38 -070012185 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12186
Michael Chanc7835a72006-11-15 21:14:42 -080012187 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12188 if (pcie_cap != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012189 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
Matt Carlson5f5c51e2007-11-12 21:19:37 -080012190
12191 pcie_set_readrq(tp->pdev, 4096);
12192
Michael Chanc7835a72006-11-15 21:14:42 -080012193 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12194 u16 lnkctl;
12195
12196 pci_read_config_word(tp->pdev,
12197 pcie_cap + PCI_EXP_LNKCTL,
12198 &lnkctl);
12199 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
12200 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12201 }
Matt Carlsonfcb389d2008-11-03 16:55:44 -080012202 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12203 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012204
Michael Chan399de502005-10-03 14:02:39 -070012205 /* If we have an AMD 762 or VIA K8T800 chipset, write
12206 * reordering to the mailbox registers done by the host
12207 * controller can cause major troubles. We read back from
12208 * every mailbox register write to force the writes to be
12209 * posted to the chip in order.
12210 */
12211 if (pci_dev_present(write_reorder_chipsets) &&
12212 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12213 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12214
Linus Torvalds1da177e2005-04-16 15:20:36 -070012215 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12216 tp->pci_lat_timer < 64) {
12217 tp->pci_lat_timer = 64;
12218
12219 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
12220 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
12221 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
12222 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
12223
12224 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12225 cacheline_sz_reg);
12226 }
12227
Matt Carlson9974a352007-10-07 23:27:28 -070012228 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12229 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12230 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12231 if (!tp->pcix_cap) {
12232 printk(KERN_ERR PFX "Cannot find PCI-X "
12233 "capability, aborting.\n");
12234 return -EIO;
12235 }
12236 }
12237
Linus Torvalds1da177e2005-04-16 15:20:36 -070012238 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12239 &pci_state_reg);
12240
Matt Carlson9974a352007-10-07 23:27:28 -070012241 if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012242 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12243
12244 /* If this is a 5700 BX chipset, and we are in PCI-X
12245 * mode, enable register write workaround.
12246 *
12247 * The workaround is to use indirect register accesses
12248 * for all chip writes not to mailbox registers.
12249 */
12250 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12251 u32 pm_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012252
12253 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12254
12255 /* The chip can have it's power management PCI config
12256 * space registers clobbered due to this bug.
12257 * So explicitly force the chip into D0 here.
12258 */
Matt Carlson9974a352007-10-07 23:27:28 -070012259 pci_read_config_dword(tp->pdev,
12260 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012261 &pm_reg);
12262 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12263 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
Matt Carlson9974a352007-10-07 23:27:28 -070012264 pci_write_config_dword(tp->pdev,
12265 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012266 pm_reg);
12267
12268 /* Also, force SERR#/PERR# in PCI command. */
12269 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12270 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12271 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12272 }
12273 }
12274
Michael Chan087fe252005-08-09 20:17:41 -070012275 /* 5700 BX chips need to have their TX producer index mailboxes
12276 * written twice to workaround a bug.
12277 */
12278 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
12279 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12280
Linus Torvalds1da177e2005-04-16 15:20:36 -070012281 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12282 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12283 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12284 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12285
12286 /* Chip-specific fixup from Broadcom driver */
12287 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12288 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12289 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12290 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12291 }
12292
Michael Chan1ee582d2005-08-09 20:16:46 -070012293 /* Default fast path register access methods */
Michael Chan20094932005-08-09 20:16:32 -070012294 tp->read32 = tg3_read32;
Michael Chan1ee582d2005-08-09 20:16:46 -070012295 tp->write32 = tg3_write32;
Michael Chan09ee9292005-08-09 20:17:00 -070012296 tp->read32_mbox = tg3_read32;
Michael Chan20094932005-08-09 20:16:32 -070012297 tp->write32_mbox = tg3_write32;
Michael Chan1ee582d2005-08-09 20:16:46 -070012298 tp->write32_tx_mbox = tg3_write32;
12299 tp->write32_rx_mbox = tg3_write32;
12300
12301 /* Various workaround register access methods */
12302 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12303 tp->write32 = tg3_write_indirect_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070012304 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12305 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12306 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12307 /*
12308 * Back to back register writes can cause problems on these
12309 * chips, the workaround is to read back all reg writes
12310 * except those to mailbox regs.
12311 *
12312 * See tg3_write_indirect_reg32().
12313 */
Michael Chan1ee582d2005-08-09 20:16:46 -070012314 tp->write32 = tg3_write_flush_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070012315 }
12316
Michael Chan1ee582d2005-08-09 20:16:46 -070012317
12318 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12319 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12320 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12321 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12322 tp->write32_rx_mbox = tg3_write_flush_reg32;
12323 }
Michael Chan20094932005-08-09 20:16:32 -070012324
Michael Chan68929142005-08-09 20:17:14 -070012325 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12326 tp->read32 = tg3_read_indirect_reg32;
12327 tp->write32 = tg3_write_indirect_reg32;
12328 tp->read32_mbox = tg3_read_indirect_mbox;
12329 tp->write32_mbox = tg3_write_indirect_mbox;
12330 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12331 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12332
12333 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070012334 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070012335
12336 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12337 pci_cmd &= ~PCI_COMMAND_MEMORY;
12338 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12339 }
Michael Chanb5d37722006-09-27 16:06:21 -070012340 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12341 tp->read32_mbox = tg3_read32_mbox_5906;
12342 tp->write32_mbox = tg3_write32_mbox_5906;
12343 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12344 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12345 }
Michael Chan68929142005-08-09 20:17:14 -070012346
Michael Chanbbadf502006-04-06 21:46:34 -070012347 if (tp->write32 == tg3_write_indirect_reg32 ||
12348 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12349 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
David S. Millerf49639e2006-06-09 11:58:36 -070012350 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
Michael Chanbbadf502006-04-06 21:46:34 -070012351 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12352
Michael Chan7d0c41e2005-04-21 17:06:20 -070012353 /* Get eeprom hw config before calling tg3_set_power_state().
Michael Chan9d26e212006-12-07 00:21:14 -080012354 * In particular, the TG3_FLG2_IS_NIC flag must be
Michael Chan7d0c41e2005-04-21 17:06:20 -070012355 * determined before calling tg3_set_power_state() so that
12356 * we know whether or not to switch out of Vaux power.
12357 * When the flag is set, it means that GPIO1 is used for eeprom
12358 * write protect and also implies that it is a LOM where GPIOs
12359 * are not used to switch power.
Jeff Garzik6aa20a22006-09-13 13:24:59 -040012360 */
Michael Chan7d0c41e2005-04-21 17:06:20 -070012361 tg3_get_eeprom_hw_cfg(tp);
12362
Matt Carlson0d3031d2007-10-10 18:02:43 -070012363 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12364 /* Allow reads and writes to the
12365 * APE register and memory space.
12366 */
12367 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12368 PCISTATE_ALLOW_APE_SHMEM_WR;
12369 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12370 pci_state_reg);
12371 }
12372
Matt Carlson9936bcf2007-10-10 18:03:07 -070012373 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012374 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlsonbcb37f62008-11-03 16:52:09 -080012375 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsond30cdd22007-10-07 23:28:35 -070012376 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12377
Michael Chan314fba32005-04-21 17:07:04 -070012378 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12379 * GPIO1 driven high will bring 5700's external PHY out of reset.
12380 * It is also used as eeprom write protect on LOMs.
12381 */
12382 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12383 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12384 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12385 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12386 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan3e7d83b2005-04-21 17:10:36 -070012387 /* Unused GPIO3 must be driven as output on 5752 because there
12388 * are no pull-up resistors on unused GPIO pins.
12389 */
12390 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12391 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chan314fba32005-04-21 17:07:04 -070012392
Michael Chanaf36e6b2006-03-23 01:28:06 -080012393 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12394 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12395
Matt Carlson5f0c4a32008-06-09 15:41:12 -070012396 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12397 /* Turn off the debug UART. */
12398 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12399 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12400 /* Keep VMain power. */
12401 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12402 GRC_LCLCTRL_GPIO_OUTPUT0;
12403 }
12404
Linus Torvalds1da177e2005-04-16 15:20:36 -070012405 /* Force the chip into D0. */
Michael Chanbc1c7562006-03-20 17:48:03 -080012406 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012407 if (err) {
12408 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12409 pci_name(tp->pdev));
12410 return err;
12411 }
12412
12413 /* 5700 B0 chips do not support checksumming correctly due
12414 * to hardware bugs.
12415 */
12416 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12417 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12418
Linus Torvalds1da177e2005-04-16 15:20:36 -070012419 /* Derive initial jumbo mode from MTU assigned in
12420 * ether_setup() via the alloc_etherdev() call
12421 */
Michael Chan0f893dc2005-07-25 12:30:38 -070012422 if (tp->dev->mtu > ETH_DATA_LEN &&
Michael Chana4e2b342005-10-26 15:46:52 -070012423 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan0f893dc2005-07-25 12:30:38 -070012424 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012425
12426 /* Determine WakeOnLan speed to use. */
12427 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12428 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12429 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12430 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12431 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12432 } else {
12433 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12434 }
12435
12436 /* A few boards don't want Ethernet@WireSpeed phy feature */
12437 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12438 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12439 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
Michael Chan747e8f82005-07-25 12:33:22 -070012440 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
Michael Chanb5d37722006-09-27 16:06:21 -070012441 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
Michael Chan747e8f82005-07-25 12:33:22 -070012442 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
Linus Torvalds1da177e2005-04-16 15:20:36 -070012443 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12444
12445 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12446 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12447 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12448 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12449 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12450
Michael Chanc424cb22006-04-29 18:56:34 -070012451 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12452 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012453 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012454 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12455 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
Michael Chand4011ad2007-02-13 12:17:25 -080012456 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12457 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12458 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
Michael Chanc1d2a192007-01-08 19:57:20 -080012459 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12460 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
Matt Carlson57e69832008-05-25 23:48:31 -070012461 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12462 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
Michael Chanc424cb22006-04-29 18:56:34 -070012463 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12464 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012465
Matt Carlsonb2a5c192008-04-03 21:44:44 -070012466 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12467 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12468 tp->phy_otp = tg3_read_otp_phycfg(tp);
12469 if (tp->phy_otp == 0)
12470 tp->phy_otp = TG3_OTP_DEFAULT;
12471 }
12472
Matt Carlsonf51f3562008-05-25 23:45:08 -070012473 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
Matt Carlson8ef21422008-05-02 16:47:53 -070012474 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12475 else
12476 tp->mi_mode = MAC_MI_MODE_BASE;
12477
Linus Torvalds1da177e2005-04-16 15:20:36 -070012478 tp->coalesce_mode = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012479 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12480 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12481 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12482
Matt Carlson57e69832008-05-25 23:48:31 -070012483 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12484 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12485
Matt Carlson158d7ab2008-05-29 01:37:54 -070012486 err = tg3_mdio_init(tp);
12487 if (err)
12488 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012489
12490 /* Initialize data/descriptor byte/word swapping. */
12491 val = tr32(GRC_MODE);
12492 val &= GRC_MODE_HOST_STACKUP;
12493 tw32(GRC_MODE, val | tp->grc_mode);
12494
12495 tg3_switch_clocks(tp);
12496
12497 /* Clear this out for sanity. */
12498 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12499
12500 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12501 &pci_state_reg);
12502 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12503 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12504 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12505
12506 if (chiprevid == CHIPREV_ID_5701_A0 ||
12507 chiprevid == CHIPREV_ID_5701_B0 ||
12508 chiprevid == CHIPREV_ID_5701_B2 ||
12509 chiprevid == CHIPREV_ID_5701_B5) {
12510 void __iomem *sram_base;
12511
12512 /* Write some dummy words into the SRAM status block
12513 * area, see if it reads back correctly. If the return
12514 * value is bad, force enable the PCIX workaround.
12515 */
12516 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12517
12518 writel(0x00000000, sram_base);
12519 writel(0x00000000, sram_base + 4);
12520 writel(0xffffffff, sram_base + 4);
12521 if (readl(sram_base) != 0x00000000)
12522 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12523 }
12524 }
12525
12526 udelay(50);
12527 tg3_nvram_init(tp);
12528
12529 grc_misc_cfg = tr32(GRC_MISC_CFG);
12530 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12531
Linus Torvalds1da177e2005-04-16 15:20:36 -070012532 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12533 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12534 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12535 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12536
David S. Millerfac9b832005-05-18 22:46:34 -070012537 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12538 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12539 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12540 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12541 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12542 HOSTCC_MODE_CLRTICK_TXBD);
12543
12544 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12545 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12546 tp->misc_host_ctrl);
12547 }
12548
Matt Carlson3bda1252008-08-15 14:08:22 -070012549 /* Preserve the APE MAC_MODE bits */
12550 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12551 tp->mac_mode = tr32(MAC_MODE) |
12552 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12553 else
12554 tp->mac_mode = TG3_DEF_MAC_MODE;
12555
Linus Torvalds1da177e2005-04-16 15:20:36 -070012556 /* these are limited to 10/100 only */
12557 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12558 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12559 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12560 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12561 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12562 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12563 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12564 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12565 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
Michael Chan676917d2006-12-07 00:20:22 -080012566 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12567 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
Michael Chanb5d37722006-09-27 16:06:21 -070012568 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012569 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12570
12571 err = tg3_phy_probe(tp);
12572 if (err) {
12573 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12574 pci_name(tp->pdev), err);
12575 /* ... but do not return immediately ... */
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070012576 tg3_mdio_fini(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012577 }
12578
12579 tg3_read_partno(tp);
Michael Chanc4e65752006-03-20 22:29:32 -080012580 tg3_read_fw_ver(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012581
12582 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12583 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12584 } else {
12585 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12586 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12587 else
12588 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12589 }
12590
12591 /* 5700 {AX,BX} chips have a broken status block link
12592 * change bit implementation, so we must use the
12593 * status register in those cases.
12594 */
12595 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12596 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12597 else
12598 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12599
12600 /* The led_ctrl is set during tg3_phy_probe, here we might
12601 * have to force the link status polling mechanism based
12602 * upon subsystem IDs.
12603 */
12604 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
Michael Chan007a880d2007-05-31 14:49:51 -070012605 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070012606 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12607 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12608 TG3_FLAG_USE_LINKCHG_REG);
12609 }
12610
12611 /* For all SERDES we poll the MAC status register. */
12612 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12613 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12614 else
12615 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12616
Michael Chan5a6f3072006-03-20 22:28:05 -080012617 /* All chips before 5787 can get confused if TX buffers
Linus Torvalds1da177e2005-04-16 15:20:36 -070012618 * straddle the 4GB address boundary in some cases.
12619 */
Michael Chanaf36e6b2006-03-23 01:28:06 -080012620 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012621 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012622 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012623 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012624 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012625 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Michael Chan5a6f3072006-03-20 22:28:05 -080012626 tp->dev->hard_start_xmit = tg3_start_xmit;
12627 else
12628 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012629
12630 tp->rx_offset = 2;
12631 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12632 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12633 tp->rx_offset = 0;
12634
Michael Chanf92905d2006-06-29 20:14:29 -070012635 tp->rx_std_max_post = TG3_RX_RING_SIZE;
12636
12637 /* Increment the rx prod index on the rx std ring by at most
12638 * 8 for these chips to workaround hw errata.
12639 */
12640 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12641 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12642 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12643 tp->rx_std_max_post = 8;
12644
Matt Carlson8ed5d972007-05-07 00:25:49 -070012645 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12646 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12647 PCIE_PWR_MGMT_L1_THRESH_MSK;
12648
Linus Torvalds1da177e2005-04-16 15:20:36 -070012649 return err;
12650}
12651
David S. Miller49b6e95f2007-03-29 01:38:42 -070012652#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012653static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12654{
12655 struct net_device *dev = tp->dev;
12656 struct pci_dev *pdev = tp->pdev;
David S. Miller49b6e95f2007-03-29 01:38:42 -070012657 struct device_node *dp = pci_device_to_OF_node(pdev);
David S. Miller374d4ca2007-03-29 01:57:57 -070012658 const unsigned char *addr;
David S. Miller49b6e95f2007-03-29 01:38:42 -070012659 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012660
David S. Miller49b6e95f2007-03-29 01:38:42 -070012661 addr = of_get_property(dp, "local-mac-address", &len);
12662 if (addr && len == 6) {
12663 memcpy(dev->dev_addr, addr, 6);
12664 memcpy(dev->perm_addr, dev->dev_addr, 6);
12665 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012666 }
12667 return -ENODEV;
12668}
12669
12670static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12671{
12672 struct net_device *dev = tp->dev;
12673
12674 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
John W. Linville2ff43692005-09-12 14:44:20 -070012675 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012676 return 0;
12677}
12678#endif
12679
12680static int __devinit tg3_get_device_address(struct tg3 *tp)
12681{
12682 struct net_device *dev = tp->dev;
12683 u32 hi, lo, mac_offset;
Michael Chan008652b2006-03-27 23:14:53 -080012684 int addr_ok = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012685
David S. Miller49b6e95f2007-03-29 01:38:42 -070012686#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012687 if (!tg3_get_macaddr_sparc(tp))
12688 return 0;
12689#endif
12690
12691 mac_offset = 0x7c;
David S. Millerf49639e2006-06-09 11:58:36 -070012692 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
Michael Chana4e2b342005-10-26 15:46:52 -070012693 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012694 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12695 mac_offset = 0xcc;
12696 if (tg3_nvram_lock(tp))
12697 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12698 else
12699 tg3_nvram_unlock(tp);
12700 }
Michael Chanb5d37722006-09-27 16:06:21 -070012701 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12702 mac_offset = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012703
12704 /* First try to get it from MAC address mailbox. */
12705 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12706 if ((hi >> 16) == 0x484b) {
12707 dev->dev_addr[0] = (hi >> 8) & 0xff;
12708 dev->dev_addr[1] = (hi >> 0) & 0xff;
12709
12710 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12711 dev->dev_addr[2] = (lo >> 24) & 0xff;
12712 dev->dev_addr[3] = (lo >> 16) & 0xff;
12713 dev->dev_addr[4] = (lo >> 8) & 0xff;
12714 dev->dev_addr[5] = (lo >> 0) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012715
Michael Chan008652b2006-03-27 23:14:53 -080012716 /* Some old bootcode may report a 0 MAC address in SRAM */
12717 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12718 }
12719 if (!addr_ok) {
12720 /* Next, try NVRAM. */
David S. Millerf49639e2006-06-09 11:58:36 -070012721 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
Michael Chan008652b2006-03-27 23:14:53 -080012722 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12723 dev->dev_addr[0] = ((hi >> 16) & 0xff);
12724 dev->dev_addr[1] = ((hi >> 24) & 0xff);
12725 dev->dev_addr[2] = ((lo >> 0) & 0xff);
12726 dev->dev_addr[3] = ((lo >> 8) & 0xff);
12727 dev->dev_addr[4] = ((lo >> 16) & 0xff);
12728 dev->dev_addr[5] = ((lo >> 24) & 0xff);
12729 }
12730 /* Finally just fetch it out of the MAC control regs. */
12731 else {
12732 hi = tr32(MAC_ADDR_0_HIGH);
12733 lo = tr32(MAC_ADDR_0_LOW);
12734
12735 dev->dev_addr[5] = lo & 0xff;
12736 dev->dev_addr[4] = (lo >> 8) & 0xff;
12737 dev->dev_addr[3] = (lo >> 16) & 0xff;
12738 dev->dev_addr[2] = (lo >> 24) & 0xff;
12739 dev->dev_addr[1] = hi & 0xff;
12740 dev->dev_addr[0] = (hi >> 8) & 0xff;
12741 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012742 }
12743
12744 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
David S. Miller7582a332008-03-20 15:53:15 -070012745#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012746 if (!tg3_get_default_macaddr_sparc(tp))
12747 return 0;
12748#endif
12749 return -EINVAL;
12750 }
John W. Linville2ff43692005-09-12 14:44:20 -070012751 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012752 return 0;
12753}
12754
David S. Miller59e6b432005-05-18 22:50:10 -070012755#define BOUNDARY_SINGLE_CACHELINE 1
12756#define BOUNDARY_MULTI_CACHELINE 2
12757
12758static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12759{
12760 int cacheline_size;
12761 u8 byte;
12762 int goal;
12763
12764 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12765 if (byte == 0)
12766 cacheline_size = 1024;
12767 else
12768 cacheline_size = (int) byte * 4;
12769
12770 /* On 5703 and later chips, the boundary bits have no
12771 * effect.
12772 */
12773 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12774 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12775 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12776 goto out;
12777
12778#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12779 goal = BOUNDARY_MULTI_CACHELINE;
12780#else
12781#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12782 goal = BOUNDARY_SINGLE_CACHELINE;
12783#else
12784 goal = 0;
12785#endif
12786#endif
12787
12788 if (!goal)
12789 goto out;
12790
12791 /* PCI controllers on most RISC systems tend to disconnect
12792 * when a device tries to burst across a cache-line boundary.
12793 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12794 *
12795 * Unfortunately, for PCI-E there are only limited
12796 * write-side controls for this, and thus for reads
12797 * we will still get the disconnects. We'll also waste
12798 * these PCI cycles for both read and write for chips
12799 * other than 5700 and 5701 which do not implement the
12800 * boundary bits.
12801 */
12802 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12803 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12804 switch (cacheline_size) {
12805 case 16:
12806 case 32:
12807 case 64:
12808 case 128:
12809 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12810 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12811 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12812 } else {
12813 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12814 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12815 }
12816 break;
12817
12818 case 256:
12819 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12820 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12821 break;
12822
12823 default:
12824 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12825 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12826 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012827 }
David S. Miller59e6b432005-05-18 22:50:10 -070012828 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12829 switch (cacheline_size) {
12830 case 16:
12831 case 32:
12832 case 64:
12833 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12834 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12835 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12836 break;
12837 }
12838 /* fallthrough */
12839 case 128:
12840 default:
12841 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12842 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12843 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012844 }
David S. Miller59e6b432005-05-18 22:50:10 -070012845 } else {
12846 switch (cacheline_size) {
12847 case 16:
12848 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12849 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12850 DMA_RWCTRL_WRITE_BNDRY_16);
12851 break;
12852 }
12853 /* fallthrough */
12854 case 32:
12855 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12856 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12857 DMA_RWCTRL_WRITE_BNDRY_32);
12858 break;
12859 }
12860 /* fallthrough */
12861 case 64:
12862 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12863 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12864 DMA_RWCTRL_WRITE_BNDRY_64);
12865 break;
12866 }
12867 /* fallthrough */
12868 case 128:
12869 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12870 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12871 DMA_RWCTRL_WRITE_BNDRY_128);
12872 break;
12873 }
12874 /* fallthrough */
12875 case 256:
12876 val |= (DMA_RWCTRL_READ_BNDRY_256 |
12877 DMA_RWCTRL_WRITE_BNDRY_256);
12878 break;
12879 case 512:
12880 val |= (DMA_RWCTRL_READ_BNDRY_512 |
12881 DMA_RWCTRL_WRITE_BNDRY_512);
12882 break;
12883 case 1024:
12884 default:
12885 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12886 DMA_RWCTRL_WRITE_BNDRY_1024);
12887 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012888 }
David S. Miller59e6b432005-05-18 22:50:10 -070012889 }
12890
12891out:
12892 return val;
12893}
12894
Linus Torvalds1da177e2005-04-16 15:20:36 -070012895static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12896{
12897 struct tg3_internal_buffer_desc test_desc;
12898 u32 sram_dma_descs;
12899 int i, ret;
12900
12901 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12902
12903 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12904 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12905 tw32(RDMAC_STATUS, 0);
12906 tw32(WDMAC_STATUS, 0);
12907
12908 tw32(BUFMGR_MODE, 0);
12909 tw32(FTQ_RESET, 0);
12910
12911 test_desc.addr_hi = ((u64) buf_dma) >> 32;
12912 test_desc.addr_lo = buf_dma & 0xffffffff;
12913 test_desc.nic_mbuf = 0x00002100;
12914 test_desc.len = size;
12915
12916 /*
12917 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12918 * the *second* time the tg3 driver was getting loaded after an
12919 * initial scan.
12920 *
12921 * Broadcom tells me:
12922 * ...the DMA engine is connected to the GRC block and a DMA
12923 * reset may affect the GRC block in some unpredictable way...
12924 * The behavior of resets to individual blocks has not been tested.
12925 *
12926 * Broadcom noted the GRC reset will also reset all sub-components.
12927 */
12928 if (to_device) {
12929 test_desc.cqid_sqid = (13 << 8) | 2;
12930
12931 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12932 udelay(40);
12933 } else {
12934 test_desc.cqid_sqid = (16 << 8) | 7;
12935
12936 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12937 udelay(40);
12938 }
12939 test_desc.flags = 0x00000005;
12940
12941 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12942 u32 val;
12943
12944 val = *(((u32 *)&test_desc) + i);
12945 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12946 sram_dma_descs + (i * sizeof(u32)));
12947 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12948 }
12949 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12950
12951 if (to_device) {
12952 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12953 } else {
12954 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12955 }
12956
12957 ret = -ENODEV;
12958 for (i = 0; i < 40; i++) {
12959 u32 val;
12960
12961 if (to_device)
12962 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12963 else
12964 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12965 if ((val & 0xffff) == sram_dma_descs) {
12966 ret = 0;
12967 break;
12968 }
12969
12970 udelay(100);
12971 }
12972
12973 return ret;
12974}
12975
David S. Millerded73402005-05-23 13:59:47 -070012976#define TEST_BUFFER_SIZE 0x2000
Linus Torvalds1da177e2005-04-16 15:20:36 -070012977
12978static int __devinit tg3_test_dma(struct tg3 *tp)
12979{
12980 dma_addr_t buf_dma;
David S. Miller59e6b432005-05-18 22:50:10 -070012981 u32 *buf, saved_dma_rwctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012982 int ret;
12983
12984 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12985 if (!buf) {
12986 ret = -ENOMEM;
12987 goto out_nofree;
12988 }
12989
12990 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12991 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12992
David S. Miller59e6b432005-05-18 22:50:10 -070012993 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012994
12995 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12996 /* DMA read watermark not used on PCIE */
12997 tp->dma_rwctrl |= 0x00180000;
12998 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
Michael Chan85e94ce2005-04-21 17:05:28 -070012999 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13000 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013001 tp->dma_rwctrl |= 0x003f0000;
13002 else
13003 tp->dma_rwctrl |= 0x003f000f;
13004 } else {
13005 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13006 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
13007 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
Michael Chan49afdeb2007-02-13 12:17:03 -080013008 u32 read_water = 0x7;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013009
Michael Chan4a29cc22006-03-19 13:21:12 -080013010 /* If the 5704 is behind the EPB bridge, we can
13011 * do the less restrictive ONE_DMA workaround for
13012 * better performance.
13013 */
13014 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
13015 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13016 tp->dma_rwctrl |= 0x8000;
13017 else if (ccval == 0x6 || ccval == 0x7)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013018 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
13019
Michael Chan49afdeb2007-02-13 12:17:03 -080013020 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
13021 read_water = 4;
David S. Miller59e6b432005-05-18 22:50:10 -070013022 /* Set bit 23 to enable PCIX hw bug fix */
Michael Chan49afdeb2007-02-13 12:17:03 -080013023 tp->dma_rwctrl |=
13024 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
13025 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
13026 (1 << 23);
Michael Chan4cf78e42005-07-25 12:29:19 -070013027 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
13028 /* 5780 always in PCIX mode */
13029 tp->dma_rwctrl |= 0x00144000;
Michael Chana4e2b342005-10-26 15:46:52 -070013030 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13031 /* 5714 always in PCIX mode */
13032 tp->dma_rwctrl |= 0x00148000;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013033 } else {
13034 tp->dma_rwctrl |= 0x001b000f;
13035 }
13036 }
13037
13038 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13039 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13040 tp->dma_rwctrl &= 0xfffffff0;
13041
13042 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13043 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13044 /* Remove this if it causes problems for some boards. */
13045 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
13046
13047 /* On 5700/5701 chips, we need to set this bit.
13048 * Otherwise the chip will issue cacheline transactions
13049 * to streamable DMA memory with not all the byte
13050 * enables turned on. This is an error on several
13051 * RISC PCI controllers, in particular sparc64.
13052 *
13053 * On 5703/5704 chips, this bit has been reassigned
13054 * a different meaning. In particular, it is used
13055 * on those chips to enable a PCI-X workaround.
13056 */
13057 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
13058 }
13059
13060 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13061
13062#if 0
13063 /* Unneeded, already done by tg3_get_invariants. */
13064 tg3_switch_clocks(tp);
13065#endif
13066
13067 ret = 0;
13068 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13069 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13070 goto out;
13071
David S. Miller59e6b432005-05-18 22:50:10 -070013072 /* It is best to perform DMA test with maximum write burst size
13073 * to expose the 5700/5701 write DMA bug.
13074 */
13075 saved_dma_rwctrl = tp->dma_rwctrl;
13076 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13077 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13078
Linus Torvalds1da177e2005-04-16 15:20:36 -070013079 while (1) {
13080 u32 *p = buf, i;
13081
13082 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
13083 p[i] = i;
13084
13085 /* Send the buffer to the chip. */
13086 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13087 if (ret) {
13088 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
13089 break;
13090 }
13091
13092#if 0
13093 /* validate data reached card RAM correctly. */
13094 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13095 u32 val;
13096 tg3_read_mem(tp, 0x2100 + (i*4), &val);
13097 if (le32_to_cpu(val) != p[i]) {
13098 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
13099 /* ret = -ENODEV here? */
13100 }
13101 p[i] = 0;
13102 }
13103#endif
13104 /* Now read it back. */
13105 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13106 if (ret) {
13107 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13108
13109 break;
13110 }
13111
13112 /* Verify it. */
13113 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13114 if (p[i] == i)
13115 continue;
13116
David S. Miller59e6b432005-05-18 22:50:10 -070013117 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13118 DMA_RWCTRL_WRITE_BNDRY_16) {
13119 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013120 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13121 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13122 break;
13123 } else {
13124 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13125 ret = -ENODEV;
13126 goto out;
13127 }
13128 }
13129
13130 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13131 /* Success. */
13132 ret = 0;
13133 break;
13134 }
13135 }
David S. Miller59e6b432005-05-18 22:50:10 -070013136 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13137 DMA_RWCTRL_WRITE_BNDRY_16) {
Michael Chan6d1cfba2005-06-08 14:13:14 -070013138 static struct pci_device_id dma_wait_state_chipsets[] = {
13139 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13140 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13141 { },
13142 };
13143
David S. Miller59e6b432005-05-18 22:50:10 -070013144 /* DMA test passed without adjusting DMA boundary,
Michael Chan6d1cfba2005-06-08 14:13:14 -070013145 * now look for chipsets that are known to expose the
13146 * DMA bug without failing the test.
David S. Miller59e6b432005-05-18 22:50:10 -070013147 */
Michael Chan6d1cfba2005-06-08 14:13:14 -070013148 if (pci_dev_present(dma_wait_state_chipsets)) {
13149 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13150 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13151 }
13152 else
13153 /* Safe to use the calculated DMA boundary. */
13154 tp->dma_rwctrl = saved_dma_rwctrl;
13155
David S. Miller59e6b432005-05-18 22:50:10 -070013156 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13157 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013158
13159out:
13160 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13161out_nofree:
13162 return ret;
13163}
13164
13165static void __devinit tg3_init_link_config(struct tg3 *tp)
13166{
13167 tp->link_config.advertising =
13168 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13169 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13170 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13171 ADVERTISED_Autoneg | ADVERTISED_MII);
13172 tp->link_config.speed = SPEED_INVALID;
13173 tp->link_config.duplex = DUPLEX_INVALID;
13174 tp->link_config.autoneg = AUTONEG_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013175 tp->link_config.active_speed = SPEED_INVALID;
13176 tp->link_config.active_duplex = DUPLEX_INVALID;
13177 tp->link_config.phy_is_low_power = 0;
13178 tp->link_config.orig_speed = SPEED_INVALID;
13179 tp->link_config.orig_duplex = DUPLEX_INVALID;
13180 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13181}
13182
13183static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13184{
Michael Chanfdfec172005-07-25 12:31:48 -070013185 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13186 tp->bufmgr_config.mbuf_read_dma_low_water =
13187 DEFAULT_MB_RDMA_LOW_WATER_5705;
13188 tp->bufmgr_config.mbuf_mac_rx_low_water =
13189 DEFAULT_MB_MACRX_LOW_WATER_5705;
13190 tp->bufmgr_config.mbuf_high_water =
13191 DEFAULT_MB_HIGH_WATER_5705;
Michael Chanb5d37722006-09-27 16:06:21 -070013192 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13193 tp->bufmgr_config.mbuf_mac_rx_low_water =
13194 DEFAULT_MB_MACRX_LOW_WATER_5906;
13195 tp->bufmgr_config.mbuf_high_water =
13196 DEFAULT_MB_HIGH_WATER_5906;
13197 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013198
Michael Chanfdfec172005-07-25 12:31:48 -070013199 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13200 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13201 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13202 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13203 tp->bufmgr_config.mbuf_high_water_jumbo =
13204 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13205 } else {
13206 tp->bufmgr_config.mbuf_read_dma_low_water =
13207 DEFAULT_MB_RDMA_LOW_WATER;
13208 tp->bufmgr_config.mbuf_mac_rx_low_water =
13209 DEFAULT_MB_MACRX_LOW_WATER;
13210 tp->bufmgr_config.mbuf_high_water =
13211 DEFAULT_MB_HIGH_WATER;
13212
13213 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13214 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13215 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13216 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13217 tp->bufmgr_config.mbuf_high_water_jumbo =
13218 DEFAULT_MB_HIGH_WATER_JUMBO;
13219 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013220
13221 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13222 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13223}
13224
13225static char * __devinit tg3_phy_string(struct tg3 *tp)
13226{
13227 switch (tp->phy_id & PHY_ID_MASK) {
13228 case PHY_ID_BCM5400: return "5400";
13229 case PHY_ID_BCM5401: return "5401";
13230 case PHY_ID_BCM5411: return "5411";
13231 case PHY_ID_BCM5701: return "5701";
13232 case PHY_ID_BCM5703: return "5703";
13233 case PHY_ID_BCM5704: return "5704";
13234 case PHY_ID_BCM5705: return "5705";
13235 case PHY_ID_BCM5750: return "5750";
Michael Chan85e94ce2005-04-21 17:05:28 -070013236 case PHY_ID_BCM5752: return "5752";
Michael Chana4e2b342005-10-26 15:46:52 -070013237 case PHY_ID_BCM5714: return "5714";
Michael Chan4cf78e42005-07-25 12:29:19 -070013238 case PHY_ID_BCM5780: return "5780";
Michael Chanaf36e6b2006-03-23 01:28:06 -080013239 case PHY_ID_BCM5755: return "5755";
Michael Chand9ab5ad2006-03-20 22:27:35 -080013240 case PHY_ID_BCM5787: return "5787";
Matt Carlsond30cdd22007-10-07 23:28:35 -070013241 case PHY_ID_BCM5784: return "5784";
Michael Chan126a3362006-09-27 16:03:07 -070013242 case PHY_ID_BCM5756: return "5722/5756";
Michael Chanb5d37722006-09-27 16:06:21 -070013243 case PHY_ID_BCM5906: return "5906";
Matt Carlson9936bcf2007-10-10 18:03:07 -070013244 case PHY_ID_BCM5761: return "5761";
Linus Torvalds1da177e2005-04-16 15:20:36 -070013245 case PHY_ID_BCM8002: return "8002/serdes";
13246 case 0: return "serdes";
13247 default: return "unknown";
Stephen Hemminger855e1112008-04-16 16:37:28 -070013248 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013249}
13250
Michael Chanf9804dd2005-09-27 12:13:10 -070013251static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13252{
13253 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13254 strcpy(str, "PCI Express");
13255 return str;
13256 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13257 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13258
13259 strcpy(str, "PCIX:");
13260
13261 if ((clock_ctrl == 7) ||
13262 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13263 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13264 strcat(str, "133MHz");
13265 else if (clock_ctrl == 0)
13266 strcat(str, "33MHz");
13267 else if (clock_ctrl == 2)
13268 strcat(str, "50MHz");
13269 else if (clock_ctrl == 4)
13270 strcat(str, "66MHz");
13271 else if (clock_ctrl == 6)
13272 strcat(str, "100MHz");
Michael Chanf9804dd2005-09-27 12:13:10 -070013273 } else {
13274 strcpy(str, "PCI:");
13275 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13276 strcat(str, "66MHz");
13277 else
13278 strcat(str, "33MHz");
13279 }
13280 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13281 strcat(str, ":32-bit");
13282 else
13283 strcat(str, ":64-bit");
13284 return str;
13285}
13286
Michael Chan8c2dc7e2005-12-19 16:26:02 -080013287static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013288{
13289 struct pci_dev *peer;
13290 unsigned int func, devnr = tp->pdev->devfn & ~7;
13291
13292 for (func = 0; func < 8; func++) {
13293 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13294 if (peer && peer != tp->pdev)
13295 break;
13296 pci_dev_put(peer);
13297 }
Michael Chan16fe9d72005-12-13 21:09:54 -080013298 /* 5704 can be configured in single-port mode, set peer to
13299 * tp->pdev in that case.
13300 */
13301 if (!peer) {
13302 peer = tp->pdev;
13303 return peer;
13304 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013305
13306 /*
13307 * We don't need to keep the refcount elevated; there's no way
13308 * to remove one half of this device without removing the other
13309 */
13310 pci_dev_put(peer);
13311
13312 return peer;
13313}
13314
David S. Miller15f98502005-05-18 22:49:26 -070013315static void __devinit tg3_init_coal(struct tg3 *tp)
13316{
13317 struct ethtool_coalesce *ec = &tp->coal;
13318
13319 memset(ec, 0, sizeof(*ec));
13320 ec->cmd = ETHTOOL_GCOALESCE;
13321 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13322 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13323 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13324 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13325 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13326 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13327 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13328 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13329 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13330
13331 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13332 HOSTCC_MODE_CLRTICK_TXBD)) {
13333 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13334 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13335 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13336 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13337 }
Michael Chand244c892005-07-05 14:42:33 -070013338
13339 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13340 ec->rx_coalesce_usecs_irq = 0;
13341 ec->tx_coalesce_usecs_irq = 0;
13342 ec->stats_block_coalesce_usecs = 0;
13343 }
David S. Miller15f98502005-05-18 22:49:26 -070013344}
13345
Linus Torvalds1da177e2005-04-16 15:20:36 -070013346static int __devinit tg3_init_one(struct pci_dev *pdev,
13347 const struct pci_device_id *ent)
13348{
13349 static int tg3_version_printed = 0;
Matt Carlson63532392008-11-03 16:49:57 -080013350 resource_size_t tg3reg_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013351 struct net_device *dev;
13352 struct tg3 *tp;
Joe Perchesd6645372007-12-20 04:06:59 -080013353 int err, pm_cap;
Michael Chanf9804dd2005-09-27 12:13:10 -070013354 char str[40];
Michael Chan72f2afb2006-03-06 19:28:35 -080013355 u64 dma_mask, persist_dma_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013356
13357 if (tg3_version_printed++ == 0)
13358 printk(KERN_INFO "%s", version);
13359
13360 err = pci_enable_device(pdev);
13361 if (err) {
13362 printk(KERN_ERR PFX "Cannot enable PCI device, "
13363 "aborting.\n");
13364 return err;
13365 }
13366
Matt Carlson63532392008-11-03 16:49:57 -080013367 if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013368 printk(KERN_ERR PFX "Cannot find proper PCI device "
13369 "base address, aborting.\n");
13370 err = -ENODEV;
13371 goto err_out_disable_pdev;
13372 }
13373
13374 err = pci_request_regions(pdev, DRV_MODULE_NAME);
13375 if (err) {
13376 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13377 "aborting.\n");
13378 goto err_out_disable_pdev;
13379 }
13380
13381 pci_set_master(pdev);
13382
13383 /* Find power-management capability. */
13384 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13385 if (pm_cap == 0) {
13386 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13387 "aborting.\n");
13388 err = -EIO;
13389 goto err_out_free_res;
13390 }
13391
Linus Torvalds1da177e2005-04-16 15:20:36 -070013392 dev = alloc_etherdev(sizeof(*tp));
13393 if (!dev) {
13394 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13395 err = -ENOMEM;
13396 goto err_out_free_res;
13397 }
13398
Linus Torvalds1da177e2005-04-16 15:20:36 -070013399 SET_NETDEV_DEV(dev, &pdev->dev);
13400
Linus Torvalds1da177e2005-04-16 15:20:36 -070013401#if TG3_VLAN_TAG_USED
13402 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13403 dev->vlan_rx_register = tg3_vlan_rx_register;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013404#endif
13405
13406 tp = netdev_priv(dev);
13407 tp->pdev = pdev;
13408 tp->dev = dev;
13409 tp->pm_cap = pm_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013410 tp->rx_mode = TG3_DEF_RX_MODE;
13411 tp->tx_mode = TG3_DEF_TX_MODE;
Matt Carlson8ef21422008-05-02 16:47:53 -070013412
Linus Torvalds1da177e2005-04-16 15:20:36 -070013413 if (tg3_debug > 0)
13414 tp->msg_enable = tg3_debug;
13415 else
13416 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13417
13418 /* The word/byte swap controls here control register access byte
13419 * swapping. DMA data byte swapping is controlled in the GRC_MODE
13420 * setting below.
13421 */
13422 tp->misc_host_ctrl =
13423 MISC_HOST_CTRL_MASK_PCI_INT |
13424 MISC_HOST_CTRL_WORD_SWAP |
13425 MISC_HOST_CTRL_INDIR_ACCESS |
13426 MISC_HOST_CTRL_PCISTATE_RW;
13427
13428 /* The NONFRM (non-frame) byte/word swap controls take effect
13429 * on descriptor entries, anything which isn't packet data.
13430 *
13431 * The StrongARM chips on the board (one for tx, one for rx)
13432 * are running in big-endian mode.
13433 */
13434 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13435 GRC_MODE_WSWAP_NONFRM_DATA);
13436#ifdef __BIG_ENDIAN
13437 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13438#endif
13439 spin_lock_init(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013440 spin_lock_init(&tp->indirect_lock);
David Howellsc4028952006-11-22 14:57:56 +000013441 INIT_WORK(&tp->reset_task, tg3_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013442
Matt Carlson63532392008-11-03 16:49:57 -080013443 dev->mem_start = pci_resource_start(pdev, BAR_0);
13444 tg3reg_len = pci_resource_len(pdev, BAR_0);
13445 dev->mem_end = dev->mem_start + tg3reg_len;
13446
13447 tp->regs = ioremap_nocache(dev->mem_start, tg3reg_len);
Andy Gospodarekab0049b2007-09-06 20:42:14 +010013448 if (!tp->regs) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013449 printk(KERN_ERR PFX "Cannot map device registers, "
13450 "aborting.\n");
13451 err = -ENOMEM;
13452 goto err_out_free_dev;
13453 }
13454
13455 tg3_init_link_config(tp);
13456
Linus Torvalds1da177e2005-04-16 15:20:36 -070013457 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13458 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13459 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13460
13461 dev->open = tg3_open;
13462 dev->stop = tg3_close;
13463 dev->get_stats = tg3_get_stats;
13464 dev->set_multicast_list = tg3_set_rx_mode;
13465 dev->set_mac_address = tg3_set_mac_addr;
13466 dev->do_ioctl = tg3_ioctl;
13467 dev->tx_timeout = tg3_tx_timeout;
Stephen Hemmingerbea33482007-10-03 16:41:36 -070013468 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013469 dev->ethtool_ops = &tg3_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013470 dev->watchdog_timeo = TG3_TX_TIMEOUT;
13471 dev->change_mtu = tg3_change_mtu;
13472 dev->irq = pdev->irq;
13473#ifdef CONFIG_NET_POLL_CONTROLLER
13474 dev->poll_controller = tg3_poll_controller;
13475#endif
13476
13477 err = tg3_get_invariants(tp);
13478 if (err) {
13479 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13480 "aborting.\n");
13481 goto err_out_iounmap;
13482 }
13483
Michael Chan4a29cc22006-03-19 13:21:12 -080013484 /* The EPB bridge inside 5714, 5715, and 5780 and any
13485 * device behind the EPB cannot support DMA addresses > 40-bit.
Michael Chan72f2afb2006-03-06 19:28:35 -080013486 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13487 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13488 * do DMA address check in tg3_start_xmit().
13489 */
Michael Chan4a29cc22006-03-19 13:21:12 -080013490 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13491 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13492 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
Michael Chan72f2afb2006-03-06 19:28:35 -080013493 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13494#ifdef CONFIG_HIGHMEM
13495 dma_mask = DMA_64BIT_MASK;
13496#endif
Michael Chan4a29cc22006-03-19 13:21:12 -080013497 } else
Michael Chan72f2afb2006-03-06 19:28:35 -080013498 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13499
13500 /* Configure DMA attributes. */
13501 if (dma_mask > DMA_32BIT_MASK) {
13502 err = pci_set_dma_mask(pdev, dma_mask);
13503 if (!err) {
13504 dev->features |= NETIF_F_HIGHDMA;
13505 err = pci_set_consistent_dma_mask(pdev,
13506 persist_dma_mask);
13507 if (err < 0) {
13508 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13509 "DMA for consistent allocations\n");
13510 goto err_out_iounmap;
13511 }
13512 }
13513 }
13514 if (err || dma_mask == DMA_32BIT_MASK) {
13515 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13516 if (err) {
13517 printk(KERN_ERR PFX "No usable DMA configuration, "
13518 "aborting.\n");
13519 goto err_out_iounmap;
13520 }
13521 }
13522
Michael Chanfdfec172005-07-25 12:31:48 -070013523 tg3_init_bufmgr_config(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013524
Linus Torvalds1da177e2005-04-16 15:20:36 -070013525 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13526 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13527 }
13528 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13529 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13530 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
Michael Chanc7835a72006-11-15 21:14:42 -080013531 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -070013532 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13533 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13534 } else {
Michael Chan7f62ad52007-02-20 23:25:40 -080013535 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013536 }
13537
Michael Chan4e3a7aa2006-03-20 17:47:44 -080013538 /* TSO is on by default on chips that support hardware TSO.
13539 * Firmware TSO on older chips gives lower performance, so it
13540 * is off by default, but can be enabled using ethtool.
13541 */
Michael Chanb0026622006-07-03 19:42:14 -070013542 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013543 dev->features |= NETIF_F_TSO;
Michael Chanb5d37722006-09-27 16:06:21 -070013544 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13545 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
Michael Chanb0026622006-07-03 19:42:14 -070013546 dev->features |= NETIF_F_TSO6;
Matt Carlson57e69832008-05-25 23:48:31 -070013547 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13548 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13549 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13550 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlson9936bcf2007-10-10 18:03:07 -070013551 dev->features |= NETIF_F_TSO_ECN;
Michael Chanb0026622006-07-03 19:42:14 -070013552 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013553
Linus Torvalds1da177e2005-04-16 15:20:36 -070013554
13555 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13556 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13557 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13558 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13559 tp->rx_pending = 63;
13560 }
13561
Linus Torvalds1da177e2005-04-16 15:20:36 -070013562 err = tg3_get_device_address(tp);
13563 if (err) {
13564 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13565 "aborting.\n");
13566 goto err_out_iounmap;
13567 }
13568
Matt Carlson0d3031d2007-10-10 18:02:43 -070013569 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
Matt Carlson63532392008-11-03 16:49:57 -080013570 if (!(pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM)) {
Matt Carlson0d3031d2007-10-10 18:02:43 -070013571 printk(KERN_ERR PFX "Cannot find proper PCI device "
13572 "base address for APE, aborting.\n");
13573 err = -ENODEV;
13574 goto err_out_iounmap;
13575 }
13576
Matt Carlson63532392008-11-03 16:49:57 -080013577 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
Al Viro79ea13c2008-01-24 02:06:46 -080013578 if (!tp->aperegs) {
Matt Carlson0d3031d2007-10-10 18:02:43 -070013579 printk(KERN_ERR PFX "Cannot map APE registers, "
13580 "aborting.\n");
13581 err = -ENOMEM;
13582 goto err_out_iounmap;
13583 }
13584
13585 tg3_ape_lock_init(tp);
13586 }
13587
Matt Carlsonc88864d2007-11-12 21:07:01 -080013588 /*
13589 * Reset chip in case UNDI or EFI driver did not shutdown
13590 * DMA self test will enable WDMAC and we'll see (spurious)
13591 * pending DMA on the PCI bus at that point.
13592 */
13593 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13594 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13595 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13596 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13597 }
13598
13599 err = tg3_test_dma(tp);
13600 if (err) {
13601 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13602 goto err_out_apeunmap;
13603 }
13604
13605 /* Tigon3 can do ipv4 only... and some chips have buggy
13606 * checksumming.
13607 */
13608 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13609 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13610 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13611 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13612 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070013613 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13614 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsonc88864d2007-11-12 21:07:01 -080013615 dev->features |= NETIF_F_IPV6_CSUM;
13616
13617 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13618 } else
13619 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13620
13621 /* flow control autonegotiation is default behavior */
13622 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
Matt Carlson8d018622007-12-20 20:05:44 -080013623 tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
Matt Carlsonc88864d2007-11-12 21:07:01 -080013624
13625 tg3_init_coal(tp);
13626
Michael Chanc49a1562006-12-17 17:07:29 -080013627 pci_set_drvdata(pdev, dev);
13628
Linus Torvalds1da177e2005-04-16 15:20:36 -070013629 err = register_netdev(dev);
13630 if (err) {
13631 printk(KERN_ERR PFX "Cannot register net device, "
13632 "aborting.\n");
Matt Carlson0d3031d2007-10-10 18:02:43 -070013633 goto err_out_apeunmap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013634 }
13635
Matt Carlsondf59c942008-11-03 16:52:56 -080013636 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070013637 dev->name,
13638 tp->board_part_number,
13639 tp->pci_chip_rev_id,
Michael Chanf9804dd2005-09-27 12:13:10 -070013640 tg3_bus_string(tp, str),
Johannes Berge1749612008-10-27 15:59:26 -070013641 dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013642
Matt Carlsondf59c942008-11-03 16:52:56 -080013643 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
13644 printk(KERN_INFO
13645 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
13646 tp->dev->name,
13647 tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
Kay Sieversfb28ad32008-11-10 13:55:14 -080013648 dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev));
Matt Carlsondf59c942008-11-03 16:52:56 -080013649 else
13650 printk(KERN_INFO
13651 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
13652 tp->dev->name, tg3_phy_string(tp),
13653 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13654 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13655 "10/100/1000Base-T")),
13656 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
13657
13658 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070013659 dev->name,
13660 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13661 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13662 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13663 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -070013664 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
Michael Chan4a29cc22006-03-19 13:21:12 -080013665 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13666 dev->name, tp->dma_rwctrl,
13667 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13668 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
Linus Torvalds1da177e2005-04-16 15:20:36 -070013669
13670 return 0;
13671
Matt Carlson0d3031d2007-10-10 18:02:43 -070013672err_out_apeunmap:
13673 if (tp->aperegs) {
13674 iounmap(tp->aperegs);
13675 tp->aperegs = NULL;
13676 }
13677
Linus Torvalds1da177e2005-04-16 15:20:36 -070013678err_out_iounmap:
Michael Chan68929142005-08-09 20:17:14 -070013679 if (tp->regs) {
13680 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070013681 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070013682 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013683
13684err_out_free_dev:
13685 free_netdev(dev);
13686
13687err_out_free_res:
13688 pci_release_regions(pdev);
13689
13690err_out_disable_pdev:
13691 pci_disable_device(pdev);
13692 pci_set_drvdata(pdev, NULL);
13693 return err;
13694}
13695
13696static void __devexit tg3_remove_one(struct pci_dev *pdev)
13697{
13698 struct net_device *dev = pci_get_drvdata(pdev);
13699
13700 if (dev) {
13701 struct tg3 *tp = netdev_priv(dev);
13702
Michael Chan7faa0062006-02-02 17:29:28 -080013703 flush_scheduled_work();
Matt Carlson158d7ab2008-05-29 01:37:54 -070013704
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013705 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13706 tg3_phy_fini(tp);
Matt Carlson158d7ab2008-05-29 01:37:54 -070013707 tg3_mdio_fini(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013708 }
Matt Carlson158d7ab2008-05-29 01:37:54 -070013709
Linus Torvalds1da177e2005-04-16 15:20:36 -070013710 unregister_netdev(dev);
Matt Carlson0d3031d2007-10-10 18:02:43 -070013711 if (tp->aperegs) {
13712 iounmap(tp->aperegs);
13713 tp->aperegs = NULL;
13714 }
Michael Chan68929142005-08-09 20:17:14 -070013715 if (tp->regs) {
13716 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070013717 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070013718 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013719 free_netdev(dev);
13720 pci_release_regions(pdev);
13721 pci_disable_device(pdev);
13722 pci_set_drvdata(pdev, NULL);
13723 }
13724}
13725
13726static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13727{
13728 struct net_device *dev = pci_get_drvdata(pdev);
13729 struct tg3 *tp = netdev_priv(dev);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070013730 pci_power_t target_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013731 int err;
13732
Michael Chan3e0c95f2007-08-03 20:56:54 -070013733 /* PCI register 4 needs to be saved whether netif_running() or not.
13734 * MSI address and data need to be saved if using MSI and
13735 * netif_running().
13736 */
13737 pci_save_state(pdev);
13738
Linus Torvalds1da177e2005-04-16 15:20:36 -070013739 if (!netif_running(dev))
13740 return 0;
13741
Michael Chan7faa0062006-02-02 17:29:28 -080013742 flush_scheduled_work();
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013743 tg3_phy_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013744 tg3_netif_stop(tp);
13745
13746 del_timer_sync(&tp->timer);
13747
David S. Millerf47c11e2005-06-24 20:18:35 -070013748 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013749 tg3_disable_ints(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -070013750 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013751
13752 netif_device_detach(dev);
13753
David S. Millerf47c11e2005-06-24 20:18:35 -070013754 tg3_full_lock(tp, 0);
Michael Chan944d9802005-05-29 14:57:48 -070013755 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan6a9eba12005-12-13 21:08:58 -080013756 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
David S. Millerf47c11e2005-06-24 20:18:35 -070013757 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013758
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070013759 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13760
13761 err = tg3_set_power_state(tp, target_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013762 if (err) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013763 int err2;
13764
David S. Millerf47c11e2005-06-24 20:18:35 -070013765 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013766
Michael Chan6a9eba12005-12-13 21:08:58 -080013767 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013768 err2 = tg3_restart_hw(tp, 1);
13769 if (err2)
Michael Chanb9ec6c12006-07-25 16:37:27 -070013770 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013771
13772 tp->timer.expires = jiffies + tp->timer_offset;
13773 add_timer(&tp->timer);
13774
13775 netif_device_attach(dev);
13776 tg3_netif_start(tp);
13777
Michael Chanb9ec6c12006-07-25 16:37:27 -070013778out:
David S. Millerf47c11e2005-06-24 20:18:35 -070013779 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013780
13781 if (!err2)
13782 tg3_phy_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013783 }
13784
13785 return err;
13786}
13787
13788static int tg3_resume(struct pci_dev *pdev)
13789{
13790 struct net_device *dev = pci_get_drvdata(pdev);
13791 struct tg3 *tp = netdev_priv(dev);
13792 int err;
13793
Michael Chan3e0c95f2007-08-03 20:56:54 -070013794 pci_restore_state(tp->pdev);
13795
Linus Torvalds1da177e2005-04-16 15:20:36 -070013796 if (!netif_running(dev))
13797 return 0;
13798
Michael Chanbc1c7562006-03-20 17:48:03 -080013799 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013800 if (err)
13801 return err;
13802
13803 netif_device_attach(dev);
13804
David S. Millerf47c11e2005-06-24 20:18:35 -070013805 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013806
Michael Chan6a9eba12005-12-13 21:08:58 -080013807 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Michael Chanb9ec6c12006-07-25 16:37:27 -070013808 err = tg3_restart_hw(tp, 1);
13809 if (err)
13810 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013811
13812 tp->timer.expires = jiffies + tp->timer_offset;
13813 add_timer(&tp->timer);
13814
Linus Torvalds1da177e2005-04-16 15:20:36 -070013815 tg3_netif_start(tp);
13816
Michael Chanb9ec6c12006-07-25 16:37:27 -070013817out:
David S. Millerf47c11e2005-06-24 20:18:35 -070013818 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013819
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013820 if (!err)
13821 tg3_phy_start(tp);
13822
Michael Chanb9ec6c12006-07-25 16:37:27 -070013823 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013824}
13825
13826static struct pci_driver tg3_driver = {
13827 .name = DRV_MODULE_NAME,
13828 .id_table = tg3_pci_tbl,
13829 .probe = tg3_init_one,
13830 .remove = __devexit_p(tg3_remove_one),
13831 .suspend = tg3_suspend,
13832 .resume = tg3_resume
13833};
13834
13835static int __init tg3_init(void)
13836{
Jeff Garzik29917622006-08-19 17:48:59 -040013837 return pci_register_driver(&tg3_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013838}
13839
13840static void __exit tg3_cleanup(void)
13841{
13842 pci_unregister_driver(&tg3_driver);
13843}
13844
13845module_init(tg3_init);
13846module_exit(tg3_cleanup);