blob: 03a930ef5d60e2facad4a84bbeb023f43cf5c8c9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
Michael Chan65610fb2007-02-13 12:18:46 -08007 * Copyright (C) 2005-2007 Broadcom Corporation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Firmware is:
Michael Chan49cabf42005-06-06 15:15:17 -070010 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/compiler.h>
24#include <linux/slab.h>
25#include <linux/delay.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020026#include <linux/in.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
Matt Carlson158d7ab2008-05-29 01:37:54 -070035#include <linux/phy.h>
Matt Carlsona9daf362008-05-25 23:49:44 -070036#include <linux/brcmphy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/if_vlan.h>
38#include <linux/ip.h>
39#include <linux/tcp.h>
40#include <linux/workqueue.h>
Michael Chan61487482005-09-05 17:53:19 -070041#include <linux/prefetch.h>
Tobias Klauserf9a5f7d2005-10-29 15:09:26 +020042#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44#include <net/checksum.h>
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -030045#include <net/ip.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47#include <asm/system.h>
48#include <asm/io.h>
49#include <asm/byteorder.h>
50#include <asm/uaccess.h>
51
David S. Miller49b6e95f2007-03-29 01:38:42 -070052#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <asm/idprom.h>
David S. Miller49b6e95f2007-03-29 01:38:42 -070054#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#endif
56
Matt Carlson63532392008-11-03 16:49:57 -080057#define BAR_0 0
58#define BAR_2 2
59
Linus Torvalds1da177e2005-04-16 15:20:36 -070060#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
61#define TG3_VLAN_TAG_USED 1
62#else
63#define TG3_VLAN_TAG_USED 0
64#endif
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#define TG3_TSO_SUPPORT 1
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
68#include "tg3.h"
69
70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": "
Matt Carlson23197912008-08-15 14:11:19 -070072#define DRV_MODULE_VERSION "3.94"
73#define DRV_MODULE_RELDATE "August 14, 2008"
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0
77#define TG3_DEF_TX_MODE 0
78#define TG3_DEF_MSG_ENABLE \
79 (NETIF_MSG_DRV | \
80 NETIF_MSG_PROBE | \
81 NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | \
83 NETIF_MSG_IFDOWN | \
84 NETIF_MSG_IFUP | \
85 NETIF_MSG_RX_ERR | \
86 NETIF_MSG_TX_ERR)
87
88/* length of time before we decide the hardware is borked,
89 * and dev->tx_timeout() should be called to fix the problem
90 */
91#define TG3_TX_TIMEOUT (5 * HZ)
92
93/* hardware minimum and maximum for a single frame's data payload */
94#define TG3_MIN_MTU 60
95#define TG3_MAX_MTU(tp) \
Michael Chan0f893dc2005-07-25 12:30:38 -070096 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
98/* These numbers seem to be hard coded in the NIC firmware somehow.
99 * You can't change the ring sizes, but you can change where you place
100 * them in the NIC onboard memory.
101 */
102#define TG3_RX_RING_SIZE 512
103#define TG3_DEF_RX_RING_PENDING 200
104#define TG3_RX_JUMBO_RING_SIZE 256
105#define TG3_DEF_RX_JUMBO_RING_PENDING 100
106
107/* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
112 */
113#define TG3_RX_RCB_RING_SIZE(tp) \
114 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
115
116#define TG3_TX_RING_SIZE 512
117#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
118
119#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120 TG3_RX_RING_SIZE)
121#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 TG3_RX_JUMBO_RING_SIZE)
123#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RCB_RING_SIZE(tp))
125#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
126 TG3_TX_RING_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128
129#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
130#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
131
132/* minimum number of free TX descriptors required to wake up TX process */
Ranjit Manomohan42952232006-10-18 20:54:26 -0700133#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
135/* number of ETHTOOL_GSTATS u64's */
136#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
137
Michael Chan4cafd3f2005-05-29 14:56:34 -0700138#define TG3_NUM_TEST 6
139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140static char version[] __devinitdata =
141 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
142
143MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
144MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
145MODULE_LICENSE("GPL");
146MODULE_VERSION(DRV_MODULE_VERSION);
147
148static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
149module_param(tg3_debug, int, 0);
150MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
151
152static struct pci_device_id tg3_pci_tbl[] = {
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
Michael Chan126a3362006-09-27 16:03:07 -0700177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
Michael Chan126a3362006-09-27 16:03:07 -0700192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
Michael Chan676917d2006-12-07 00:20:22 -0800196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
Michael Chanb5d37722006-09-27 16:06:21 -0700204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
Matt Carlsond30cdd22007-10-07 23:28:35 -0700206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
Matt Carlson6c7af272007-10-21 16:12:02 -0700208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
Matt Carlson9936bcf2007-10-10 18:03:07 -0700209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
Matt Carlsonc88e6682008-11-03 16:49:18 -0800211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
Matt Carlson57e69832008-05-25 23:48:31 -0700213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700214 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
215 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
216 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
217 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
218 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
219 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
220 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
221 {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222};
223
224MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
225
Andreas Mohr50da8592006-08-14 23:54:30 -0700226static const struct {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 const char string[ETH_GSTRING_LEN];
228} ethtool_stats_keys[TG3_NUM_STATS] = {
229 { "rx_octets" },
230 { "rx_fragments" },
231 { "rx_ucast_packets" },
232 { "rx_mcast_packets" },
233 { "rx_bcast_packets" },
234 { "rx_fcs_errors" },
235 { "rx_align_errors" },
236 { "rx_xon_pause_rcvd" },
237 { "rx_xoff_pause_rcvd" },
238 { "rx_mac_ctrl_rcvd" },
239 { "rx_xoff_entered" },
240 { "rx_frame_too_long_errors" },
241 { "rx_jabbers" },
242 { "rx_undersize_packets" },
243 { "rx_in_length_errors" },
244 { "rx_out_length_errors" },
245 { "rx_64_or_less_octet_packets" },
246 { "rx_65_to_127_octet_packets" },
247 { "rx_128_to_255_octet_packets" },
248 { "rx_256_to_511_octet_packets" },
249 { "rx_512_to_1023_octet_packets" },
250 { "rx_1024_to_1522_octet_packets" },
251 { "rx_1523_to_2047_octet_packets" },
252 { "rx_2048_to_4095_octet_packets" },
253 { "rx_4096_to_8191_octet_packets" },
254 { "rx_8192_to_9022_octet_packets" },
255
256 { "tx_octets" },
257 { "tx_collisions" },
258
259 { "tx_xon_sent" },
260 { "tx_xoff_sent" },
261 { "tx_flow_control" },
262 { "tx_mac_errors" },
263 { "tx_single_collisions" },
264 { "tx_mult_collisions" },
265 { "tx_deferred" },
266 { "tx_excessive_collisions" },
267 { "tx_late_collisions" },
268 { "tx_collide_2times" },
269 { "tx_collide_3times" },
270 { "tx_collide_4times" },
271 { "tx_collide_5times" },
272 { "tx_collide_6times" },
273 { "tx_collide_7times" },
274 { "tx_collide_8times" },
275 { "tx_collide_9times" },
276 { "tx_collide_10times" },
277 { "tx_collide_11times" },
278 { "tx_collide_12times" },
279 { "tx_collide_13times" },
280 { "tx_collide_14times" },
281 { "tx_collide_15times" },
282 { "tx_ucast_packets" },
283 { "tx_mcast_packets" },
284 { "tx_bcast_packets" },
285 { "tx_carrier_sense_errors" },
286 { "tx_discards" },
287 { "tx_errors" },
288
289 { "dma_writeq_full" },
290 { "dma_write_prioq_full" },
291 { "rxbds_empty" },
292 { "rx_discards" },
293 { "rx_errors" },
294 { "rx_threshold_hit" },
295
296 { "dma_readq_full" },
297 { "dma_read_prioq_full" },
298 { "tx_comp_queue_full" },
299
300 { "ring_set_send_prod_index" },
301 { "ring_status_update" },
302 { "nic_irqs" },
303 { "nic_avoided_irqs" },
304 { "nic_tx_threshold_hit" }
305};
306
Andreas Mohr50da8592006-08-14 23:54:30 -0700307static const struct {
Michael Chan4cafd3f2005-05-29 14:56:34 -0700308 const char string[ETH_GSTRING_LEN];
309} ethtool_test_keys[TG3_NUM_TEST] = {
310 { "nvram test (online) " },
311 { "link test (online) " },
312 { "register test (offline)" },
313 { "memory test (offline)" },
314 { "loopback test (offline)" },
315 { "interrupt test (offline)" },
316};
317
Michael Chanb401e9e2005-12-19 16:27:04 -0800318static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
319{
320 writel(val, tp->regs + off);
321}
322
323static u32 tg3_read32(struct tg3 *tp, u32 off)
324{
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400325 return (readl(tp->regs + off));
Michael Chanb401e9e2005-12-19 16:27:04 -0800326}
327
Matt Carlson0d3031d2007-10-10 18:02:43 -0700328static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
329{
330 writel(val, tp->aperegs + off);
331}
332
333static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
334{
335 return (readl(tp->aperegs + off));
336}
337
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
339{
Michael Chan68929142005-08-09 20:17:14 -0700340 unsigned long flags;
341
342 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700343 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
344 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
Michael Chan68929142005-08-09 20:17:14 -0700345 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700346}
347
348static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
349{
350 writel(val, tp->regs + off);
351 readl(tp->regs + off);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352}
353
Michael Chan68929142005-08-09 20:17:14 -0700354static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
355{
356 unsigned long flags;
357 u32 val;
358
359 spin_lock_irqsave(&tp->indirect_lock, flags);
360 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
361 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
362 spin_unlock_irqrestore(&tp->indirect_lock, flags);
363 return val;
364}
365
366static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
367{
368 unsigned long flags;
369
370 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
371 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
372 TG3_64BIT_REG_LOW, val);
373 return;
374 }
375 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
376 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
377 TG3_64BIT_REG_LOW, val);
378 return;
379 }
380
381 spin_lock_irqsave(&tp->indirect_lock, flags);
382 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
383 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
384 spin_unlock_irqrestore(&tp->indirect_lock, flags);
385
386 /* In indirect mode when disabling interrupts, we also need
387 * to clear the interrupt bit in the GRC local ctrl register.
388 */
389 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
390 (val == 0x1)) {
391 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
392 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
393 }
394}
395
396static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
397{
398 unsigned long flags;
399 u32 val;
400
401 spin_lock_irqsave(&tp->indirect_lock, flags);
402 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
403 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
404 spin_unlock_irqrestore(&tp->indirect_lock, flags);
405 return val;
406}
407
Michael Chanb401e9e2005-12-19 16:27:04 -0800408/* usec_wait specifies the wait time in usec when writing to certain registers
409 * where it is unsafe to read back the register without some delay.
410 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
411 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
412 */
413static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414{
Michael Chanb401e9e2005-12-19 16:27:04 -0800415 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
416 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
417 /* Non-posted methods */
418 tp->write32(tp, off, val);
419 else {
420 /* Posted method */
421 tg3_write32(tp, off, val);
422 if (usec_wait)
423 udelay(usec_wait);
424 tp->read32(tp, off);
425 }
426 /* Wait again after the read for the posted method to guarantee that
427 * the wait time is met.
428 */
429 if (usec_wait)
430 udelay(usec_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431}
432
Michael Chan09ee9292005-08-09 20:17:00 -0700433static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
434{
435 tp->write32_mbox(tp, off, val);
Michael Chan68929142005-08-09 20:17:14 -0700436 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
437 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
438 tp->read32_mbox(tp, off);
Michael Chan09ee9292005-08-09 20:17:00 -0700439}
440
Michael Chan20094932005-08-09 20:16:32 -0700441static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442{
443 void __iomem *mbox = tp->regs + off;
444 writel(val, mbox);
445 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
446 writel(val, mbox);
447 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
448 readl(mbox);
449}
450
Michael Chanb5d37722006-09-27 16:06:21 -0700451static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
452{
453 return (readl(tp->regs + off + GRCMBOX_BASE));
454}
455
456static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
457{
458 writel(val, tp->regs + off + GRCMBOX_BASE);
459}
460
Michael Chan20094932005-08-09 20:16:32 -0700461#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700462#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
Michael Chan20094932005-08-09 20:16:32 -0700463#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
464#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700465#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
Michael Chan20094932005-08-09 20:16:32 -0700466
467#define tw32(reg,val) tp->write32(tp, reg, val)
Michael Chanb401e9e2005-12-19 16:27:04 -0800468#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
469#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
Michael Chan20094932005-08-09 20:16:32 -0700470#define tr32(reg) tp->read32(tp, reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
472static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
473{
Michael Chan68929142005-08-09 20:17:14 -0700474 unsigned long flags;
475
Michael Chanb5d37722006-09-27 16:06:21 -0700476 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
477 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
478 return;
479
Michael Chan68929142005-08-09 20:17:14 -0700480 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700481 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
482 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
483 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484
Michael Chanbbadf502006-04-06 21:46:34 -0700485 /* Always leave this as zero. */
486 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
487 } else {
488 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
489 tw32_f(TG3PCI_MEM_WIN_DATA, val);
490
491 /* Always leave this as zero. */
492 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
493 }
Michael Chan68929142005-08-09 20:17:14 -0700494 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495}
496
497static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
498{
Michael Chan68929142005-08-09 20:17:14 -0700499 unsigned long flags;
500
Michael Chanb5d37722006-09-27 16:06:21 -0700501 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
502 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
503 *val = 0;
504 return;
505 }
506
Michael Chan68929142005-08-09 20:17:14 -0700507 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700508 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
509 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
510 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511
Michael Chanbbadf502006-04-06 21:46:34 -0700512 /* Always leave this as zero. */
513 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
514 } else {
515 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
516 *val = tr32(TG3PCI_MEM_WIN_DATA);
517
518 /* Always leave this as zero. */
519 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
520 }
Michael Chan68929142005-08-09 20:17:14 -0700521 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522}
523
Matt Carlson0d3031d2007-10-10 18:02:43 -0700524static void tg3_ape_lock_init(struct tg3 *tp)
525{
526 int i;
527
528 /* Make sure the driver hasn't any stale locks. */
529 for (i = 0; i < 8; i++)
530 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
531 APE_LOCK_GRANT_DRIVER);
532}
533
534static int tg3_ape_lock(struct tg3 *tp, int locknum)
535{
536 int i, off;
537 int ret = 0;
538 u32 status;
539
540 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
541 return 0;
542
543 switch (locknum) {
Matt Carlson77b483f2008-08-15 14:07:24 -0700544 case TG3_APE_LOCK_GRC:
Matt Carlson0d3031d2007-10-10 18:02:43 -0700545 case TG3_APE_LOCK_MEM:
546 break;
547 default:
548 return -EINVAL;
549 }
550
551 off = 4 * locknum;
552
553 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
554
555 /* Wait for up to 1 millisecond to acquire lock. */
556 for (i = 0; i < 100; i++) {
557 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
558 if (status == APE_LOCK_GRANT_DRIVER)
559 break;
560 udelay(10);
561 }
562
563 if (status != APE_LOCK_GRANT_DRIVER) {
564 /* Revoke the lock request. */
565 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
566 APE_LOCK_GRANT_DRIVER);
567
568 ret = -EBUSY;
569 }
570
571 return ret;
572}
573
574static void tg3_ape_unlock(struct tg3 *tp, int locknum)
575{
576 int off;
577
578 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
579 return;
580
581 switch (locknum) {
Matt Carlson77b483f2008-08-15 14:07:24 -0700582 case TG3_APE_LOCK_GRC:
Matt Carlson0d3031d2007-10-10 18:02:43 -0700583 case TG3_APE_LOCK_MEM:
584 break;
585 default:
586 return;
587 }
588
589 off = 4 * locknum;
590 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
591}
592
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593static void tg3_disable_ints(struct tg3 *tp)
594{
595 tw32(TG3PCI_MISC_HOST_CTRL,
596 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700597 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598}
599
600static inline void tg3_cond_int(struct tg3 *tp)
601{
Michael Chan38f38432005-09-05 17:53:32 -0700602 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
603 (tp->hw_status->status & SD_STATUS_UPDATED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
Michael Chanb5d37722006-09-27 16:06:21 -0700605 else
606 tw32(HOSTCC_MODE, tp->coalesce_mode |
607 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608}
609
610static void tg3_enable_ints(struct tg3 *tp)
611{
Michael Chanbbe832c2005-06-24 20:20:04 -0700612 tp->irq_sync = 0;
613 wmb();
614
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 tw32(TG3PCI_MISC_HOST_CTRL,
616 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700617 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
618 (tp->last_tag << 24));
Michael Chanfcfa0a32006-03-20 22:28:41 -0800619 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
620 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
621 (tp->last_tag << 24));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 tg3_cond_int(tp);
623}
624
Michael Chan04237dd2005-04-25 15:17:17 -0700625static inline unsigned int tg3_has_work(struct tg3 *tp)
626{
627 struct tg3_hw_status *sblk = tp->hw_status;
628 unsigned int work_exists = 0;
629
630 /* check for phy events */
631 if (!(tp->tg3_flags &
632 (TG3_FLAG_USE_LINKCHG_REG |
633 TG3_FLAG_POLL_SERDES))) {
634 if (sblk->status & SD_STATUS_LINK_CHG)
635 work_exists = 1;
636 }
637 /* check for RX/TX work to do */
638 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
639 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
640 work_exists = 1;
641
642 return work_exists;
643}
644
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645/* tg3_restart_ints
Michael Chan04237dd2005-04-25 15:17:17 -0700646 * similar to tg3_enable_ints, but it accurately determines whether there
647 * is new work pending and can return without flushing the PIO write
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400648 * which reenables interrupts
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 */
650static void tg3_restart_ints(struct tg3 *tp)
651{
David S. Millerfac9b832005-05-18 22:46:34 -0700652 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
653 tp->last_tag << 24);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 mmiowb();
655
David S. Millerfac9b832005-05-18 22:46:34 -0700656 /* When doing tagged status, this work check is unnecessary.
657 * The last_tag we write above tells the chip which piece of
658 * work we've completed.
659 */
660 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
661 tg3_has_work(tp))
Michael Chan04237dd2005-04-25 15:17:17 -0700662 tw32(HOSTCC_MODE, tp->coalesce_mode |
663 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664}
665
666static inline void tg3_netif_stop(struct tg3 *tp)
667{
Michael Chanbbe832c2005-06-24 20:20:04 -0700668 tp->dev->trans_start = jiffies; /* prevent tx timeout */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700669 napi_disable(&tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 netif_tx_disable(tp->dev);
671}
672
673static inline void tg3_netif_start(struct tg3 *tp)
674{
675 netif_wake_queue(tp->dev);
676 /* NOTE: unconditional netif_wake_queue is only appropriate
677 * so long as all callers are assured to have free tx slots
678 * (such as after tg3_init_hw)
679 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700680 napi_enable(&tp->napi);
David S. Millerf47c11e2005-06-24 20:18:35 -0700681 tp->hw_status->status |= SD_STATUS_UPDATED;
682 tg3_enable_ints(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683}
684
685static void tg3_switch_clocks(struct tg3 *tp)
686{
687 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
688 u32 orig_clock_ctrl;
689
Matt Carlson795d01c2007-10-07 23:28:17 -0700690 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
691 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan4cf78e42005-07-25 12:29:19 -0700692 return;
693
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 orig_clock_ctrl = clock_ctrl;
695 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
696 CLOCK_CTRL_CLKRUN_OENABLE |
697 0x1f);
698 tp->pci_clock_ctrl = clock_ctrl;
699
700 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
701 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800702 tw32_wait_f(TG3PCI_CLOCK_CTRL,
703 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 }
705 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800706 tw32_wait_f(TG3PCI_CLOCK_CTRL,
707 clock_ctrl |
708 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
709 40);
710 tw32_wait_f(TG3PCI_CLOCK_CTRL,
711 clock_ctrl | (CLOCK_CTRL_ALTCLK),
712 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 }
Michael Chanb401e9e2005-12-19 16:27:04 -0800714 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715}
716
717#define PHY_BUSY_LOOPS 5000
718
719static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
720{
721 u32 frame_val;
722 unsigned int loops;
723 int ret;
724
725 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
726 tw32_f(MAC_MI_MODE,
727 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
728 udelay(80);
729 }
730
731 *val = 0x0;
732
733 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
734 MI_COM_PHY_ADDR_MASK);
735 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
736 MI_COM_REG_ADDR_MASK);
737 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400738
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 tw32_f(MAC_MI_COM, frame_val);
740
741 loops = PHY_BUSY_LOOPS;
742 while (loops != 0) {
743 udelay(10);
744 frame_val = tr32(MAC_MI_COM);
745
746 if ((frame_val & MI_COM_BUSY) == 0) {
747 udelay(5);
748 frame_val = tr32(MAC_MI_COM);
749 break;
750 }
751 loops -= 1;
752 }
753
754 ret = -EBUSY;
755 if (loops != 0) {
756 *val = frame_val & MI_COM_DATA_MASK;
757 ret = 0;
758 }
759
760 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
761 tw32_f(MAC_MI_MODE, tp->mi_mode);
762 udelay(80);
763 }
764
765 return ret;
766}
767
768static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
769{
770 u32 frame_val;
771 unsigned int loops;
772 int ret;
773
Michael Chanb5d37722006-09-27 16:06:21 -0700774 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
775 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
776 return 0;
777
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
779 tw32_f(MAC_MI_MODE,
780 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
781 udelay(80);
782 }
783
784 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
785 MI_COM_PHY_ADDR_MASK);
786 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
787 MI_COM_REG_ADDR_MASK);
788 frame_val |= (val & MI_COM_DATA_MASK);
789 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400790
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 tw32_f(MAC_MI_COM, frame_val);
792
793 loops = PHY_BUSY_LOOPS;
794 while (loops != 0) {
795 udelay(10);
796 frame_val = tr32(MAC_MI_COM);
797 if ((frame_val & MI_COM_BUSY) == 0) {
798 udelay(5);
799 frame_val = tr32(MAC_MI_COM);
800 break;
801 }
802 loops -= 1;
803 }
804
805 ret = -EBUSY;
806 if (loops != 0)
807 ret = 0;
808
809 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
810 tw32_f(MAC_MI_MODE, tp->mi_mode);
811 udelay(80);
812 }
813
814 return ret;
815}
816
Matt Carlson95e28692008-05-25 23:44:14 -0700817static int tg3_bmcr_reset(struct tg3 *tp)
818{
819 u32 phy_control;
820 int limit, err;
821
822 /* OK, reset it, and poll the BMCR_RESET bit until it
823 * clears or we time out.
824 */
825 phy_control = BMCR_RESET;
826 err = tg3_writephy(tp, MII_BMCR, phy_control);
827 if (err != 0)
828 return -EBUSY;
829
830 limit = 5000;
831 while (limit--) {
832 err = tg3_readphy(tp, MII_BMCR, &phy_control);
833 if (err != 0)
834 return -EBUSY;
835
836 if ((phy_control & BMCR_RESET) == 0) {
837 udelay(40);
838 break;
839 }
840 udelay(10);
841 }
842 if (limit <= 0)
843 return -EBUSY;
844
845 return 0;
846}
847
Matt Carlson158d7ab2008-05-29 01:37:54 -0700848static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
849{
850 struct tg3 *tp = (struct tg3 *)bp->priv;
851 u32 val;
852
853 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
854 return -EAGAIN;
855
856 if (tg3_readphy(tp, reg, &val))
857 return -EIO;
858
859 return val;
860}
861
862static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
863{
864 struct tg3 *tp = (struct tg3 *)bp->priv;
865
866 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
867 return -EAGAIN;
868
869 if (tg3_writephy(tp, reg, val))
870 return -EIO;
871
872 return 0;
873}
874
875static int tg3_mdio_reset(struct mii_bus *bp)
876{
877 return 0;
878}
879
Matt Carlson9c61d6b2008-11-03 16:54:56 -0800880static void tg3_mdio_config_5785(struct tg3 *tp)
Matt Carlsona9daf362008-05-25 23:49:44 -0700881{
882 u32 val;
883
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700884 if (tp->mdio_bus->phy_map[PHY_ADDR]->interface !=
Matt Carlsona9daf362008-05-25 23:49:44 -0700885 PHY_INTERFACE_MODE_RGMII)
886 return;
887
888 val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
889 MAC_PHYCFG1_RGMII_SND_STAT_EN);
890 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
891 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
892 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
893 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
894 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
895 }
896 tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
897
898 val = tr32(MAC_PHYCFG2) & ~(MAC_PHYCFG2_INBAND_ENABLE);
899 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
900 val |= MAC_PHYCFG2_INBAND_ENABLE;
901 tw32(MAC_PHYCFG2, val);
902
903 val = tr32(MAC_EXT_RGMII_MODE);
904 val &= ~(MAC_RGMII_MODE_RX_INT_B |
905 MAC_RGMII_MODE_RX_QUALITY |
906 MAC_RGMII_MODE_RX_ACTIVITY |
907 MAC_RGMII_MODE_RX_ENG_DET |
908 MAC_RGMII_MODE_TX_ENABLE |
909 MAC_RGMII_MODE_TX_LOWPWR |
910 MAC_RGMII_MODE_TX_RESET);
911 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
912 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
913 val |= MAC_RGMII_MODE_RX_INT_B |
914 MAC_RGMII_MODE_RX_QUALITY |
915 MAC_RGMII_MODE_RX_ACTIVITY |
916 MAC_RGMII_MODE_RX_ENG_DET;
917 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
918 val |= MAC_RGMII_MODE_TX_ENABLE |
919 MAC_RGMII_MODE_TX_LOWPWR |
920 MAC_RGMII_MODE_TX_RESET;
921 }
922 tw32(MAC_EXT_RGMII_MODE, val);
923}
924
Matt Carlson158d7ab2008-05-29 01:37:54 -0700925static void tg3_mdio_start(struct tg3 *tp)
926{
927 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700928 mutex_lock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700929 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700930 mutex_unlock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700931 }
932
933 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
934 tw32_f(MAC_MI_MODE, tp->mi_mode);
935 udelay(80);
Matt Carlsona9daf362008-05-25 23:49:44 -0700936
Matt Carlson9c61d6b2008-11-03 16:54:56 -0800937 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
938 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
939 tg3_mdio_config_5785(tp);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700940}
941
942static void tg3_mdio_stop(struct tg3 *tp)
943{
944 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700945 mutex_lock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700946 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700947 mutex_unlock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700948 }
949}
950
951static int tg3_mdio_init(struct tg3 *tp)
952{
953 int i;
954 u32 reg;
Matt Carlsona9daf362008-05-25 23:49:44 -0700955 struct phy_device *phydev;
Matt Carlson158d7ab2008-05-29 01:37:54 -0700956
957 tg3_mdio_start(tp);
958
959 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
960 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
961 return 0;
962
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700963 tp->mdio_bus = mdiobus_alloc();
964 if (tp->mdio_bus == NULL)
965 return -ENOMEM;
Matt Carlson158d7ab2008-05-29 01:37:54 -0700966
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700967 tp->mdio_bus->name = "tg3 mdio bus";
968 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
Matt Carlson158d7ab2008-05-29 01:37:54 -0700969 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700970 tp->mdio_bus->priv = tp;
971 tp->mdio_bus->parent = &tp->pdev->dev;
972 tp->mdio_bus->read = &tg3_mdio_read;
973 tp->mdio_bus->write = &tg3_mdio_write;
974 tp->mdio_bus->reset = &tg3_mdio_reset;
975 tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
976 tp->mdio_bus->irq = &tp->mdio_irq[0];
Matt Carlson158d7ab2008-05-29 01:37:54 -0700977
978 for (i = 0; i < PHY_MAX_ADDR; i++)
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700979 tp->mdio_bus->irq[i] = PHY_POLL;
Matt Carlson158d7ab2008-05-29 01:37:54 -0700980
981 /* The bus registration will look for all the PHYs on the mdio bus.
982 * Unfortunately, it does not ensure the PHY is powered up before
983 * accessing the PHY ID registers. A chip reset is the
984 * quickest way to bring the device back to an operational state..
985 */
986 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
987 tg3_bmcr_reset(tp);
988
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700989 i = mdiobus_register(tp->mdio_bus);
Matt Carlsona9daf362008-05-25 23:49:44 -0700990 if (i) {
Matt Carlson158d7ab2008-05-29 01:37:54 -0700991 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
992 tp->dev->name, i);
Matt Carlson9c61d6b2008-11-03 16:54:56 -0800993 mdiobus_free(tp->mdio_bus);
Matt Carlsona9daf362008-05-25 23:49:44 -0700994 return i;
995 }
Matt Carlson158d7ab2008-05-29 01:37:54 -0700996
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700997 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsona9daf362008-05-25 23:49:44 -0700998
Matt Carlson9c61d6b2008-11-03 16:54:56 -0800999 if (!phydev || !phydev->drv) {
1000 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1001 mdiobus_unregister(tp->mdio_bus);
1002 mdiobus_free(tp->mdio_bus);
1003 return -ENODEV;
1004 }
1005
1006 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
Matt Carlsona9daf362008-05-25 23:49:44 -07001007 case TG3_PHY_ID_BCM50610:
1008 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1009 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1010 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1011 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1012 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1013 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1014 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1015 break;
1016 case TG3_PHY_ID_BCMAC131:
1017 phydev->interface = PHY_INTERFACE_MODE_MII;
1018 break;
1019 }
1020
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001021 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1022
1023 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1024 tg3_mdio_config_5785(tp);
Matt Carlsona9daf362008-05-25 23:49:44 -07001025
1026 return 0;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001027}
1028
1029static void tg3_mdio_fini(struct tg3 *tp)
1030{
1031 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1032 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001033 mdiobus_unregister(tp->mdio_bus);
1034 mdiobus_free(tp->mdio_bus);
Matt Carlson158d7ab2008-05-29 01:37:54 -07001035 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1036 }
1037}
1038
Matt Carlson95e28692008-05-25 23:44:14 -07001039/* tp->lock is held. */
Matt Carlson4ba526c2008-08-15 14:10:04 -07001040static inline void tg3_generate_fw_event(struct tg3 *tp)
1041{
1042 u32 val;
1043
1044 val = tr32(GRC_RX_CPU_EVENT);
1045 val |= GRC_RX_CPU_DRIVER_EVENT;
1046 tw32_f(GRC_RX_CPU_EVENT, val);
1047
1048 tp->last_event_jiffies = jiffies;
1049}
1050
1051#define TG3_FW_EVENT_TIMEOUT_USEC 2500
1052
1053/* tp->lock is held. */
Matt Carlson95e28692008-05-25 23:44:14 -07001054static void tg3_wait_for_event_ack(struct tg3 *tp)
1055{
1056 int i;
Matt Carlson4ba526c2008-08-15 14:10:04 -07001057 unsigned int delay_cnt;
1058 long time_remain;
Matt Carlson95e28692008-05-25 23:44:14 -07001059
Matt Carlson4ba526c2008-08-15 14:10:04 -07001060 /* If enough time has passed, no wait is necessary. */
1061 time_remain = (long)(tp->last_event_jiffies + 1 +
1062 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1063 (long)jiffies;
1064 if (time_remain < 0)
1065 return;
1066
1067 /* Check if we can shorten the wait time. */
1068 delay_cnt = jiffies_to_usecs(time_remain);
1069 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1070 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1071 delay_cnt = (delay_cnt >> 3) + 1;
1072
1073 for (i = 0; i < delay_cnt; i++) {
Matt Carlson95e28692008-05-25 23:44:14 -07001074 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1075 break;
Matt Carlson4ba526c2008-08-15 14:10:04 -07001076 udelay(8);
Matt Carlson95e28692008-05-25 23:44:14 -07001077 }
1078}
1079
1080/* tp->lock is held. */
1081static void tg3_ump_link_report(struct tg3 *tp)
1082{
1083 u32 reg;
1084 u32 val;
1085
1086 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1087 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1088 return;
1089
1090 tg3_wait_for_event_ack(tp);
1091
1092 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1093
1094 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1095
1096 val = 0;
1097 if (!tg3_readphy(tp, MII_BMCR, &reg))
1098 val = reg << 16;
1099 if (!tg3_readphy(tp, MII_BMSR, &reg))
1100 val |= (reg & 0xffff);
1101 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1102
1103 val = 0;
1104 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1105 val = reg << 16;
1106 if (!tg3_readphy(tp, MII_LPA, &reg))
1107 val |= (reg & 0xffff);
1108 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1109
1110 val = 0;
1111 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1112 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1113 val = reg << 16;
1114 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1115 val |= (reg & 0xffff);
1116 }
1117 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1118
1119 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1120 val = reg << 16;
1121 else
1122 val = 0;
1123 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1124
Matt Carlson4ba526c2008-08-15 14:10:04 -07001125 tg3_generate_fw_event(tp);
Matt Carlson95e28692008-05-25 23:44:14 -07001126}
1127
1128static void tg3_link_report(struct tg3 *tp)
1129{
1130 if (!netif_carrier_ok(tp->dev)) {
1131 if (netif_msg_link(tp))
1132 printk(KERN_INFO PFX "%s: Link is down.\n",
1133 tp->dev->name);
1134 tg3_ump_link_report(tp);
1135 } else if (netif_msg_link(tp)) {
1136 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1137 tp->dev->name,
1138 (tp->link_config.active_speed == SPEED_1000 ?
1139 1000 :
1140 (tp->link_config.active_speed == SPEED_100 ?
1141 100 : 10)),
1142 (tp->link_config.active_duplex == DUPLEX_FULL ?
1143 "full" : "half"));
1144
1145 printk(KERN_INFO PFX
1146 "%s: Flow control is %s for TX and %s for RX.\n",
1147 tp->dev->name,
1148 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1149 "on" : "off",
1150 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1151 "on" : "off");
1152 tg3_ump_link_report(tp);
1153 }
1154}
1155
1156static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1157{
1158 u16 miireg;
1159
1160 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1161 miireg = ADVERTISE_PAUSE_CAP;
1162 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1163 miireg = ADVERTISE_PAUSE_ASYM;
1164 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1165 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1166 else
1167 miireg = 0;
1168
1169 return miireg;
1170}
1171
1172static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1173{
1174 u16 miireg;
1175
1176 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1177 miireg = ADVERTISE_1000XPAUSE;
1178 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1179 miireg = ADVERTISE_1000XPSE_ASYM;
1180 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1181 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1182 else
1183 miireg = 0;
1184
1185 return miireg;
1186}
1187
1188static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1189{
1190 u8 cap = 0;
1191
1192 if (lcladv & ADVERTISE_PAUSE_CAP) {
1193 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1194 if (rmtadv & LPA_PAUSE_CAP)
1195 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1196 else if (rmtadv & LPA_PAUSE_ASYM)
1197 cap = TG3_FLOW_CTRL_RX;
1198 } else {
1199 if (rmtadv & LPA_PAUSE_CAP)
1200 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1201 }
1202 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1203 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1204 cap = TG3_FLOW_CTRL_TX;
1205 }
1206
1207 return cap;
1208}
1209
1210static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1211{
1212 u8 cap = 0;
1213
1214 if (lcladv & ADVERTISE_1000XPAUSE) {
1215 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1216 if (rmtadv & LPA_1000XPAUSE)
1217 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1218 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1219 cap = TG3_FLOW_CTRL_RX;
1220 } else {
1221 if (rmtadv & LPA_1000XPAUSE)
1222 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1223 }
1224 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1225 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1226 cap = TG3_FLOW_CTRL_TX;
1227 }
1228
1229 return cap;
1230}
1231
Matt Carlsonf51f3562008-05-25 23:45:08 -07001232static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
Matt Carlson95e28692008-05-25 23:44:14 -07001233{
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001234 u8 autoneg;
Matt Carlsonf51f3562008-05-25 23:45:08 -07001235 u8 flowctrl = 0;
Matt Carlson95e28692008-05-25 23:44:14 -07001236 u32 old_rx_mode = tp->rx_mode;
1237 u32 old_tx_mode = tp->tx_mode;
1238
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001239 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001240 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001241 else
1242 autoneg = tp->link_config.autoneg;
1243
1244 if (autoneg == AUTONEG_ENABLE &&
Matt Carlson95e28692008-05-25 23:44:14 -07001245 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1246 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Matt Carlsonf51f3562008-05-25 23:45:08 -07001247 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
Matt Carlson95e28692008-05-25 23:44:14 -07001248 else
Matt Carlsonf51f3562008-05-25 23:45:08 -07001249 flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1250 } else
1251 flowctrl = tp->link_config.flowctrl;
Matt Carlson95e28692008-05-25 23:44:14 -07001252
Matt Carlsonf51f3562008-05-25 23:45:08 -07001253 tp->link_config.active_flowctrl = flowctrl;
Matt Carlson95e28692008-05-25 23:44:14 -07001254
Matt Carlsonf51f3562008-05-25 23:45:08 -07001255 if (flowctrl & TG3_FLOW_CTRL_RX)
Matt Carlson95e28692008-05-25 23:44:14 -07001256 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1257 else
1258 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1259
Matt Carlsonf51f3562008-05-25 23:45:08 -07001260 if (old_rx_mode != tp->rx_mode)
Matt Carlson95e28692008-05-25 23:44:14 -07001261 tw32_f(MAC_RX_MODE, tp->rx_mode);
Matt Carlson95e28692008-05-25 23:44:14 -07001262
Matt Carlsonf51f3562008-05-25 23:45:08 -07001263 if (flowctrl & TG3_FLOW_CTRL_TX)
Matt Carlson95e28692008-05-25 23:44:14 -07001264 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1265 else
1266 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1267
Matt Carlsonf51f3562008-05-25 23:45:08 -07001268 if (old_tx_mode != tp->tx_mode)
Matt Carlson95e28692008-05-25 23:44:14 -07001269 tw32_f(MAC_TX_MODE, tp->tx_mode);
Matt Carlson95e28692008-05-25 23:44:14 -07001270}
1271
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001272static void tg3_adjust_link(struct net_device *dev)
1273{
1274 u8 oldflowctrl, linkmesg = 0;
1275 u32 mac_mode, lcl_adv, rmt_adv;
1276 struct tg3 *tp = netdev_priv(dev);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001277 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001278
1279 spin_lock(&tp->lock);
1280
1281 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1282 MAC_MODE_HALF_DUPLEX);
1283
1284 oldflowctrl = tp->link_config.active_flowctrl;
1285
1286 if (phydev->link) {
1287 lcl_adv = 0;
1288 rmt_adv = 0;
1289
1290 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1291 mac_mode |= MAC_MODE_PORT_MODE_MII;
1292 else
1293 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1294
1295 if (phydev->duplex == DUPLEX_HALF)
1296 mac_mode |= MAC_MODE_HALF_DUPLEX;
1297 else {
1298 lcl_adv = tg3_advert_flowctrl_1000T(
1299 tp->link_config.flowctrl);
1300
1301 if (phydev->pause)
1302 rmt_adv = LPA_PAUSE_CAP;
1303 if (phydev->asym_pause)
1304 rmt_adv |= LPA_PAUSE_ASYM;
1305 }
1306
1307 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1308 } else
1309 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1310
1311 if (mac_mode != tp->mac_mode) {
1312 tp->mac_mode = mac_mode;
1313 tw32_f(MAC_MODE, tp->mac_mode);
1314 udelay(40);
1315 }
1316
1317 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1318 tw32(MAC_TX_LENGTHS,
1319 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1320 (6 << TX_LENGTHS_IPG_SHIFT) |
1321 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1322 else
1323 tw32(MAC_TX_LENGTHS,
1324 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1325 (6 << TX_LENGTHS_IPG_SHIFT) |
1326 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1327
1328 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1329 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1330 phydev->speed != tp->link_config.active_speed ||
1331 phydev->duplex != tp->link_config.active_duplex ||
1332 oldflowctrl != tp->link_config.active_flowctrl)
1333 linkmesg = 1;
1334
1335 tp->link_config.active_speed = phydev->speed;
1336 tp->link_config.active_duplex = phydev->duplex;
1337
1338 spin_unlock(&tp->lock);
1339
1340 if (linkmesg)
1341 tg3_link_report(tp);
1342}
1343
1344static int tg3_phy_init(struct tg3 *tp)
1345{
1346 struct phy_device *phydev;
1347
1348 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1349 return 0;
1350
1351 /* Bring the PHY back to a known state. */
1352 tg3_bmcr_reset(tp);
1353
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001354 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001355
1356 /* Attach the MAC to the PHY. */
Matt Carlsona9daf362008-05-25 23:49:44 -07001357 phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link,
1358 phydev->dev_flags, phydev->interface);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001359 if (IS_ERR(phydev)) {
1360 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1361 return PTR_ERR(phydev);
1362 }
1363
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001364 /* Mask with MAC supported features. */
Matt Carlson9c61d6b2008-11-03 16:54:56 -08001365 switch (phydev->interface) {
1366 case PHY_INTERFACE_MODE_GMII:
1367 case PHY_INTERFACE_MODE_RGMII:
1368 phydev->supported &= (PHY_GBIT_FEATURES |
1369 SUPPORTED_Pause |
1370 SUPPORTED_Asym_Pause);
1371 break;
1372 case PHY_INTERFACE_MODE_MII:
1373 phydev->supported &= (PHY_BASIC_FEATURES |
1374 SUPPORTED_Pause |
1375 SUPPORTED_Asym_Pause);
1376 break;
1377 default:
1378 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1379 return -EINVAL;
1380 }
1381
1382 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001383
1384 phydev->advertising = phydev->supported;
1385
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001386 return 0;
1387}
1388
1389static void tg3_phy_start(struct tg3 *tp)
1390{
1391 struct phy_device *phydev;
1392
1393 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1394 return;
1395
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001396 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001397
1398 if (tp->link_config.phy_is_low_power) {
1399 tp->link_config.phy_is_low_power = 0;
1400 phydev->speed = tp->link_config.orig_speed;
1401 phydev->duplex = tp->link_config.orig_duplex;
1402 phydev->autoneg = tp->link_config.orig_autoneg;
1403 phydev->advertising = tp->link_config.orig_advertising;
1404 }
1405
1406 phy_start(phydev);
1407
1408 phy_start_aneg(phydev);
1409}
1410
1411static void tg3_phy_stop(struct tg3 *tp)
1412{
1413 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1414 return;
1415
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001416 phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001417}
1418
1419static void tg3_phy_fini(struct tg3 *tp)
1420{
1421 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001422 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001423 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1424 }
1425}
1426
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001427static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1428{
1429 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1430 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1431}
1432
Matt Carlson9ef8ca92007-07-11 19:48:29 -07001433static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1434{
1435 u32 phy;
1436
1437 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1438 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1439 return;
1440
1441 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1442 u32 ephy;
1443
1444 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1445 tg3_writephy(tp, MII_TG3_EPHY_TEST,
1446 ephy | MII_TG3_EPHY_SHADOW_EN);
1447 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1448 if (enable)
1449 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1450 else
1451 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1452 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1453 }
1454 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1455 }
1456 } else {
1457 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1458 MII_TG3_AUXCTL_SHDWSEL_MISC;
1459 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1460 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1461 if (enable)
1462 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1463 else
1464 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1465 phy |= MII_TG3_AUXCTL_MISC_WREN;
1466 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1467 }
1468 }
1469}
1470
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471static void tg3_phy_set_wirespeed(struct tg3 *tp)
1472{
1473 u32 val;
1474
1475 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1476 return;
1477
1478 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1479 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1480 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1481 (val | (1 << 15) | (1 << 4)));
1482}
1483
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001484static void tg3_phy_apply_otp(struct tg3 *tp)
1485{
1486 u32 otp, phy;
1487
1488 if (!tp->phy_otp)
1489 return;
1490
1491 otp = tp->phy_otp;
1492
1493 /* Enable SM_DSP clock and tx 6dB coding. */
1494 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1495 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1496 MII_TG3_AUXCTL_ACTL_TX_6DB;
1497 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1498
1499 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1500 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1501 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1502
1503 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1504 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1505 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1506
1507 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1508 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1509 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1510
1511 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1512 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1513
1514 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1515 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1516
1517 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1518 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1519 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1520
1521 /* Turn off SM_DSP clock. */
1522 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1523 MII_TG3_AUXCTL_ACTL_TX_6DB;
1524 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1525}
1526
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527static int tg3_wait_macro_done(struct tg3 *tp)
1528{
1529 int limit = 100;
1530
1531 while (limit--) {
1532 u32 tmp32;
1533
1534 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1535 if ((tmp32 & 0x1000) == 0)
1536 break;
1537 }
1538 }
1539 if (limit <= 0)
1540 return -EBUSY;
1541
1542 return 0;
1543}
1544
1545static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1546{
1547 static const u32 test_pat[4][6] = {
1548 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1549 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1550 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1551 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1552 };
1553 int chan;
1554
1555 for (chan = 0; chan < 4; chan++) {
1556 int i;
1557
1558 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1559 (chan * 0x2000) | 0x0200);
1560 tg3_writephy(tp, 0x16, 0x0002);
1561
1562 for (i = 0; i < 6; i++)
1563 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1564 test_pat[chan][i]);
1565
1566 tg3_writephy(tp, 0x16, 0x0202);
1567 if (tg3_wait_macro_done(tp)) {
1568 *resetp = 1;
1569 return -EBUSY;
1570 }
1571
1572 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1573 (chan * 0x2000) | 0x0200);
1574 tg3_writephy(tp, 0x16, 0x0082);
1575 if (tg3_wait_macro_done(tp)) {
1576 *resetp = 1;
1577 return -EBUSY;
1578 }
1579
1580 tg3_writephy(tp, 0x16, 0x0802);
1581 if (tg3_wait_macro_done(tp)) {
1582 *resetp = 1;
1583 return -EBUSY;
1584 }
1585
1586 for (i = 0; i < 6; i += 2) {
1587 u32 low, high;
1588
1589 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1590 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1591 tg3_wait_macro_done(tp)) {
1592 *resetp = 1;
1593 return -EBUSY;
1594 }
1595 low &= 0x7fff;
1596 high &= 0x000f;
1597 if (low != test_pat[chan][i] ||
1598 high != test_pat[chan][i+1]) {
1599 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1600 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1601 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1602
1603 return -EBUSY;
1604 }
1605 }
1606 }
1607
1608 return 0;
1609}
1610
1611static int tg3_phy_reset_chanpat(struct tg3 *tp)
1612{
1613 int chan;
1614
1615 for (chan = 0; chan < 4; chan++) {
1616 int i;
1617
1618 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1619 (chan * 0x2000) | 0x0200);
1620 tg3_writephy(tp, 0x16, 0x0002);
1621 for (i = 0; i < 6; i++)
1622 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1623 tg3_writephy(tp, 0x16, 0x0202);
1624 if (tg3_wait_macro_done(tp))
1625 return -EBUSY;
1626 }
1627
1628 return 0;
1629}
1630
1631static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1632{
1633 u32 reg32, phy9_orig;
1634 int retries, do_phy_reset, err;
1635
1636 retries = 10;
1637 do_phy_reset = 1;
1638 do {
1639 if (do_phy_reset) {
1640 err = tg3_bmcr_reset(tp);
1641 if (err)
1642 return err;
1643 do_phy_reset = 0;
1644 }
1645
1646 /* Disable transmitter and interrupt. */
1647 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1648 continue;
1649
1650 reg32 |= 0x3000;
1651 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1652
1653 /* Set full-duplex, 1000 mbps. */
1654 tg3_writephy(tp, MII_BMCR,
1655 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1656
1657 /* Set to master mode. */
1658 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1659 continue;
1660
1661 tg3_writephy(tp, MII_TG3_CTRL,
1662 (MII_TG3_CTRL_AS_MASTER |
1663 MII_TG3_CTRL_ENABLE_AS_MASTER));
1664
1665 /* Enable SM_DSP_CLOCK and 6dB. */
1666 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1667
1668 /* Block the PHY control access. */
1669 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1670 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1671
1672 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1673 if (!err)
1674 break;
1675 } while (--retries);
1676
1677 err = tg3_phy_reset_chanpat(tp);
1678 if (err)
1679 return err;
1680
1681 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1682 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1683
1684 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1685 tg3_writephy(tp, 0x16, 0x0000);
1686
1687 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1688 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1689 /* Set Extended packet length bit for jumbo frames */
1690 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1691 }
1692 else {
1693 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1694 }
1695
1696 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1697
1698 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1699 reg32 &= ~0x3000;
1700 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1701 } else if (!err)
1702 err = -EBUSY;
1703
1704 return err;
1705}
1706
1707/* This will reset the tigon3 PHY if there is no valid
1708 * link unless the FORCE argument is non-zero.
1709 */
1710static int tg3_phy_reset(struct tg3 *tp)
1711{
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001712 u32 cpmuctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 u32 phy_status;
1714 int err;
1715
Michael Chan60189dd2006-12-17 17:08:07 -08001716 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1717 u32 val;
1718
1719 val = tr32(GRC_MISC_CFG);
1720 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1721 udelay(40);
1722 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1724 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1725 if (err != 0)
1726 return -EBUSY;
1727
Michael Chanc8e1e822006-04-29 18:55:17 -07001728 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1729 netif_carrier_off(tp->dev);
1730 tg3_link_report(tp);
1731 }
1732
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1734 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1735 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1736 err = tg3_phy_reset_5703_4_5(tp);
1737 if (err)
1738 return err;
1739 goto out;
1740 }
1741
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001742 cpmuctrl = 0;
1743 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1744 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1745 cpmuctrl = tr32(TG3_CPMU_CTRL);
1746 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1747 tw32(TG3_CPMU_CTRL,
1748 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1749 }
1750
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 err = tg3_bmcr_reset(tp);
1752 if (err)
1753 return err;
1754
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001755 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1756 u32 phy;
1757
1758 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1759 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1760
1761 tw32(TG3_CPMU_CTRL, cpmuctrl);
1762 }
1763
Matt Carlsonbcb37f62008-11-03 16:52:09 -08001764 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1765 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
Matt Carlsonce057f02007-11-12 21:08:03 -08001766 u32 val;
1767
1768 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1769 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1770 CPMU_LSPD_1000MB_MACCLK_12_5) {
1771 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1772 udelay(40);
1773 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1774 }
Matt Carlson662f38d2007-11-12 21:16:17 -08001775
1776 /* Disable GPHY autopowerdown. */
1777 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1778 MII_TG3_MISC_SHDW_WREN |
1779 MII_TG3_MISC_SHDW_APD_SEL |
1780 MII_TG3_MISC_SHDW_APD_WKTM_84MS);
Matt Carlsonce057f02007-11-12 21:08:03 -08001781 }
1782
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001783 tg3_phy_apply_otp(tp);
1784
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785out:
1786 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1787 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1788 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1789 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1790 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1791 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1792 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1793 }
1794 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1795 tg3_writephy(tp, 0x1c, 0x8d68);
1796 tg3_writephy(tp, 0x1c, 0x8d68);
1797 }
1798 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1799 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1800 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1801 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1802 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1803 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1804 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1805 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1806 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1807 }
Michael Chanc424cb22006-04-29 18:56:34 -07001808 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1809 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1810 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
Michael Chanc1d2a192007-01-08 19:57:20 -08001811 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1812 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1813 tg3_writephy(tp, MII_TG3_TEST1,
1814 MII_TG3_TEST1_TRIM_EN | 0x4);
1815 } else
1816 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
Michael Chanc424cb22006-04-29 18:56:34 -07001817 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1818 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 /* Set Extended packet length bit (bit 14) on all chips that */
1820 /* support jumbo frames */
1821 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1822 /* Cannot do read-modify-write on 5401 */
1823 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
Michael Chan0f893dc2005-07-25 12:30:38 -07001824 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825 u32 phy_reg;
1826
1827 /* Set bit 14 with read-modify-write to preserve other bits */
1828 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1829 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1830 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1831 }
1832
1833 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1834 * jumbo frames transmission.
1835 */
Michael Chan0f893dc2005-07-25 12:30:38 -07001836 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 u32 phy_reg;
1838
1839 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1840 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1841 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1842 }
1843
Michael Chan715116a2006-09-27 16:09:25 -07001844 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan715116a2006-09-27 16:09:25 -07001845 /* adjust output voltage */
1846 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
Michael Chan715116a2006-09-27 16:09:25 -07001847 }
1848
Matt Carlson9ef8ca92007-07-11 19:48:29 -07001849 tg3_phy_toggle_automdix(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850 tg3_phy_set_wirespeed(tp);
1851 return 0;
1852}
1853
1854static void tg3_frob_aux_power(struct tg3 *tp)
1855{
1856 struct tg3 *tp_peer = tp;
1857
Michael Chan9d26e212006-12-07 00:21:14 -08001858 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 return;
1860
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001861 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1862 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1863 struct net_device *dev_peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001865 dev_peer = pci_get_drvdata(tp->pdev_peer);
Michael Chanbc1c7562006-03-20 17:48:03 -08001866 /* remove_one() may have been run on the peer. */
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001867 if (!dev_peer)
Michael Chanbc1c7562006-03-20 17:48:03 -08001868 tp_peer = tp;
1869 else
1870 tp_peer = netdev_priv(dev_peer);
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001871 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872
1873 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
Michael Chan6921d202005-12-13 21:15:53 -08001874 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1875 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1876 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1878 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
Michael Chanb401e9e2005-12-19 16:27:04 -08001879 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1880 (GRC_LCLCTRL_GPIO_OE0 |
1881 GRC_LCLCTRL_GPIO_OE1 |
1882 GRC_LCLCTRL_GPIO_OE2 |
1883 GRC_LCLCTRL_GPIO_OUTPUT0 |
1884 GRC_LCLCTRL_GPIO_OUTPUT1),
1885 100);
Matt Carlson5f0c4a32008-06-09 15:41:12 -07001886 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1887 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1888 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1889 GRC_LCLCTRL_GPIO_OE1 |
1890 GRC_LCLCTRL_GPIO_OE2 |
1891 GRC_LCLCTRL_GPIO_OUTPUT0 |
1892 GRC_LCLCTRL_GPIO_OUTPUT1 |
1893 tp->grc_local_ctrl;
1894 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1895
1896 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1897 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1898
1899 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1900 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 } else {
1902 u32 no_gpio2;
Michael Chandc56b7d2005-12-19 16:26:28 -08001903 u32 grc_local_ctrl = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904
1905 if (tp_peer != tp &&
1906 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1907 return;
1908
Michael Chandc56b7d2005-12-19 16:26:28 -08001909 /* Workaround to prevent overdrawing Amps. */
1910 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1911 ASIC_REV_5714) {
1912 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chanb401e9e2005-12-19 16:27:04 -08001913 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1914 grc_local_ctrl, 100);
Michael Chandc56b7d2005-12-19 16:26:28 -08001915 }
1916
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 /* On 5753 and variants, GPIO2 cannot be used. */
1918 no_gpio2 = tp->nic_sram_data_cfg &
1919 NIC_SRAM_DATA_CFG_NO_GPIO2;
1920
Michael Chandc56b7d2005-12-19 16:26:28 -08001921 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 GRC_LCLCTRL_GPIO_OE1 |
1923 GRC_LCLCTRL_GPIO_OE2 |
1924 GRC_LCLCTRL_GPIO_OUTPUT1 |
1925 GRC_LCLCTRL_GPIO_OUTPUT2;
1926 if (no_gpio2) {
1927 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1928 GRC_LCLCTRL_GPIO_OUTPUT2);
1929 }
Michael Chanb401e9e2005-12-19 16:27:04 -08001930 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1931 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932
1933 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1934
Michael Chanb401e9e2005-12-19 16:27:04 -08001935 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1936 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937
1938 if (!no_gpio2) {
1939 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chanb401e9e2005-12-19 16:27:04 -08001940 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1941 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 }
1943 }
1944 } else {
1945 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1946 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1947 if (tp_peer != tp &&
1948 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1949 return;
1950
Michael Chanb401e9e2005-12-19 16:27:04 -08001951 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1952 (GRC_LCLCTRL_GPIO_OE1 |
1953 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954
Michael Chanb401e9e2005-12-19 16:27:04 -08001955 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1956 GRC_LCLCTRL_GPIO_OE1, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957
Michael Chanb401e9e2005-12-19 16:27:04 -08001958 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1959 (GRC_LCLCTRL_GPIO_OE1 |
1960 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 }
1962 }
1963}
1964
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07001965static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1966{
1967 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1968 return 1;
1969 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1970 if (speed != SPEED_10)
1971 return 1;
1972 } else if (speed == SPEED_10)
1973 return 1;
1974
1975 return 0;
1976}
1977
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978static int tg3_setup_phy(struct tg3 *, int);
1979
1980#define RESET_KIND_SHUTDOWN 0
1981#define RESET_KIND_INIT 1
1982#define RESET_KIND_SUSPEND 2
1983
1984static void tg3_write_sig_post_reset(struct tg3 *, int);
1985static int tg3_halt_cpu(struct tg3 *, u32);
Michael Chan6921d202005-12-13 21:15:53 -08001986static int tg3_nvram_lock(struct tg3 *);
1987static void tg3_nvram_unlock(struct tg3 *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988
Matt Carlson0a459aa2008-11-03 16:54:15 -08001989static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
Michael Chan15c3b692006-03-22 01:06:52 -08001990{
Matt Carlsonce057f02007-11-12 21:08:03 -08001991 u32 val;
1992
Michael Chan51297242007-02-13 12:17:57 -08001993 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1994 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1995 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1996 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1997
1998 sg_dig_ctrl |=
1999 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2000 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2001 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2002 }
Michael Chan3f7045c2006-09-27 16:02:29 -07002003 return;
Michael Chan51297242007-02-13 12:17:57 -08002004 }
Michael Chan3f7045c2006-09-27 16:02:29 -07002005
Michael Chan60189dd2006-12-17 17:08:07 -08002006 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan60189dd2006-12-17 17:08:07 -08002007 tg3_bmcr_reset(tp);
2008 val = tr32(GRC_MISC_CFG);
2009 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2010 udelay(40);
2011 return;
Matt Carlson0a459aa2008-11-03 16:54:15 -08002012 } else if (do_low_power) {
Michael Chan715116a2006-09-27 16:09:25 -07002013 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2014 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
Matt Carlson0a459aa2008-11-03 16:54:15 -08002015
2016 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2017 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2018 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2019 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2020 MII_TG3_AUXCTL_PCTL_VREG_11V);
Michael Chan715116a2006-09-27 16:09:25 -07002021 }
Michael Chan3f7045c2006-09-27 16:02:29 -07002022
Michael Chan15c3b692006-03-22 01:06:52 -08002023 /* The PHY should not be powered down on some chips because
2024 * of bugs.
2025 */
2026 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2027 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2028 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2029 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2030 return;
Matt Carlsonce057f02007-11-12 21:08:03 -08002031
Matt Carlsonbcb37f62008-11-03 16:52:09 -08002032 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2033 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
Matt Carlsonce057f02007-11-12 21:08:03 -08002034 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2035 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2036 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2037 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2038 }
2039
Michael Chan15c3b692006-03-22 01:06:52 -08002040 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2041}
2042
Matt Carlson3f007892008-11-03 16:51:36 -08002043/* tp->lock is held. */
2044static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2045{
2046 u32 addr_high, addr_low;
2047 int i;
2048
2049 addr_high = ((tp->dev->dev_addr[0] << 8) |
2050 tp->dev->dev_addr[1]);
2051 addr_low = ((tp->dev->dev_addr[2] << 24) |
2052 (tp->dev->dev_addr[3] << 16) |
2053 (tp->dev->dev_addr[4] << 8) |
2054 (tp->dev->dev_addr[5] << 0));
2055 for (i = 0; i < 4; i++) {
2056 if (i == 1 && skip_mac_1)
2057 continue;
2058 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2059 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2060 }
2061
2062 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2063 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2064 for (i = 0; i < 12; i++) {
2065 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2066 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2067 }
2068 }
2069
2070 addr_high = (tp->dev->dev_addr[0] +
2071 tp->dev->dev_addr[1] +
2072 tp->dev->dev_addr[2] +
2073 tp->dev->dev_addr[3] +
2074 tp->dev->dev_addr[4] +
2075 tp->dev->dev_addr[5]) &
2076 TX_BACKOFF_SEED_MASK;
2077 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2078}
2079
Michael Chanbc1c7562006-03-20 17:48:03 -08002080static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081{
2082 u32 misc_host_ctrl;
Matt Carlson0a459aa2008-11-03 16:54:15 -08002083 bool device_should_wake, do_low_power;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084
2085 /* Make sure register accesses (indirect or otherwise)
2086 * will function correctly.
2087 */
2088 pci_write_config_dword(tp->pdev,
2089 TG3PCI_MISC_HOST_CTRL,
2090 tp->misc_host_ctrl);
2091
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092 switch (state) {
Michael Chanbc1c7562006-03-20 17:48:03 -08002093 case PCI_D0:
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002094 pci_enable_wake(tp->pdev, state, false);
2095 pci_set_power_state(tp->pdev, PCI_D0);
Michael Chan8c6bda12005-04-21 17:09:08 -07002096
Michael Chan9d26e212006-12-07 00:21:14 -08002097 /* Switch out of Vaux if it is a NIC */
2098 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
Michael Chanb401e9e2005-12-19 16:27:04 -08002099 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100
2101 return 0;
2102
Michael Chanbc1c7562006-03-20 17:48:03 -08002103 case PCI_D1:
Michael Chanbc1c7562006-03-20 17:48:03 -08002104 case PCI_D2:
Michael Chanbc1c7562006-03-20 17:48:03 -08002105 case PCI_D3hot:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106 break;
2107
2108 default:
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002109 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2110 tp->dev->name, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111 return -EINVAL;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002112 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2114 tw32(TG3PCI_MISC_HOST_CTRL,
2115 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2116
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002117 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2118 device_may_wakeup(&tp->pdev->dev) &&
2119 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2120
Matt Carlsondd477002008-05-25 23:45:58 -07002121 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
Matt Carlson0a459aa2008-11-03 16:54:15 -08002122 do_low_power = false;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002123 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2124 !tp->link_config.phy_is_low_power) {
2125 struct phy_device *phydev;
Matt Carlson0a459aa2008-11-03 16:54:15 -08002126 u32 phyid, advertising;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002127
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07002128 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002129
2130 tp->link_config.phy_is_low_power = 1;
2131
2132 tp->link_config.orig_speed = phydev->speed;
2133 tp->link_config.orig_duplex = phydev->duplex;
2134 tp->link_config.orig_autoneg = phydev->autoneg;
2135 tp->link_config.orig_advertising = phydev->advertising;
2136
2137 advertising = ADVERTISED_TP |
2138 ADVERTISED_Pause |
2139 ADVERTISED_Autoneg |
2140 ADVERTISED_10baseT_Half;
2141
2142 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002143 device_should_wake) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002144 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2145 advertising |=
2146 ADVERTISED_100baseT_Half |
2147 ADVERTISED_100baseT_Full |
2148 ADVERTISED_10baseT_Full;
2149 else
2150 advertising |= ADVERTISED_10baseT_Full;
2151 }
2152
2153 phydev->advertising = advertising;
2154
2155 phy_start_aneg(phydev);
Matt Carlson0a459aa2008-11-03 16:54:15 -08002156
2157 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2158 if (phyid != TG3_PHY_ID_BCMAC131) {
2159 phyid &= TG3_PHY_OUI_MASK;
2160 if (phyid == TG3_PHY_OUI_1 &&
2161 phyid == TG3_PHY_OUI_2 &&
2162 phyid == TG3_PHY_OUI_3)
2163 do_low_power = true;
2164 }
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002165 }
Matt Carlsondd477002008-05-25 23:45:58 -07002166 } else {
Matt Carlson0a459aa2008-11-03 16:54:15 -08002167 do_low_power = false;
2168
Matt Carlsondd477002008-05-25 23:45:58 -07002169 if (tp->link_config.phy_is_low_power == 0) {
2170 tp->link_config.phy_is_low_power = 1;
2171 tp->link_config.orig_speed = tp->link_config.speed;
2172 tp->link_config.orig_duplex = tp->link_config.duplex;
2173 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2174 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175
Matt Carlsondd477002008-05-25 23:45:58 -07002176 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2177 tp->link_config.speed = SPEED_10;
2178 tp->link_config.duplex = DUPLEX_HALF;
2179 tp->link_config.autoneg = AUTONEG_ENABLE;
2180 tg3_setup_phy(tp, 0);
2181 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 }
2183
Matt Carlson3f007892008-11-03 16:51:36 -08002184 __tg3_set_mac_addr(tp, 0);
2185
Michael Chanb5d37722006-09-27 16:06:21 -07002186 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2187 u32 val;
2188
2189 val = tr32(GRC_VCPU_EXT_CTRL);
2190 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2191 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chan6921d202005-12-13 21:15:53 -08002192 int i;
2193 u32 val;
2194
2195 for (i = 0; i < 200; i++) {
2196 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2197 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2198 break;
2199 msleep(1);
2200 }
2201 }
Gary Zambranoa85feb82007-05-05 11:52:19 -07002202 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2203 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2204 WOL_DRV_STATE_SHUTDOWN |
2205 WOL_DRV_WOL |
2206 WOL_SET_MAGIC_PKT);
Michael Chan6921d202005-12-13 21:15:53 -08002207
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002208 if (device_should_wake) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 u32 mac_mode;
2210
2211 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
Matt Carlson0a459aa2008-11-03 16:54:15 -08002212 if (do_low_power) {
Matt Carlsondd477002008-05-25 23:45:58 -07002213 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2214 udelay(40);
2215 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216
Michael Chan3f7045c2006-09-27 16:02:29 -07002217 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2218 mac_mode = MAC_MODE_PORT_MODE_GMII;
2219 else
2220 mac_mode = MAC_MODE_PORT_MODE_MII;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002222 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2223 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2224 ASIC_REV_5700) {
2225 u32 speed = (tp->tg3_flags &
2226 TG3_FLAG_WOL_SPEED_100MB) ?
2227 SPEED_100 : SPEED_10;
2228 if (tg3_5700_link_polarity(tp, speed))
2229 mac_mode |= MAC_MODE_LINK_POLARITY;
2230 else
2231 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2232 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 } else {
2234 mac_mode = MAC_MODE_PORT_MODE_TBI;
2235 }
2236
John W. Linvillecbf46852005-04-21 17:01:29 -07002237 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 tw32(MAC_LED_CTRL, tp->led_ctrl);
2239
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002240 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2241 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2242 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2243 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2244 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2245 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246
Matt Carlson3bda1252008-08-15 14:08:22 -07002247 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2248 mac_mode |= tp->mac_mode &
2249 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2250 if (mac_mode & MAC_MODE_APE_TX_EN)
2251 mac_mode |= MAC_MODE_TDE_ENABLE;
2252 }
2253
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 tw32_f(MAC_MODE, mac_mode);
2255 udelay(100);
2256
2257 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2258 udelay(10);
2259 }
2260
2261 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2262 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2263 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2264 u32 base_val;
2265
2266 base_val = tp->pci_clock_ctrl;
2267 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2268 CLOCK_CTRL_TXCLK_DISABLE);
2269
Michael Chanb401e9e2005-12-19 16:27:04 -08002270 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2271 CLOCK_CTRL_PWRDOWN_PLL133, 40);
Michael Chand7b0a852007-02-13 12:17:38 -08002272 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
Matt Carlson795d01c2007-10-07 23:28:17 -07002273 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
Michael Chand7b0a852007-02-13 12:17:38 -08002274 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
Michael Chan4cf78e42005-07-25 12:29:19 -07002275 /* do nothing */
Michael Chan85e94ce2005-04-21 17:05:28 -07002276 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2278 u32 newbits1, newbits2;
2279
2280 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2281 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2282 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2283 CLOCK_CTRL_TXCLK_DISABLE |
2284 CLOCK_CTRL_ALTCLK);
2285 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2286 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2287 newbits1 = CLOCK_CTRL_625_CORE;
2288 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2289 } else {
2290 newbits1 = CLOCK_CTRL_ALTCLK;
2291 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2292 }
2293
Michael Chanb401e9e2005-12-19 16:27:04 -08002294 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2295 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296
Michael Chanb401e9e2005-12-19 16:27:04 -08002297 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2298 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299
2300 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2301 u32 newbits3;
2302
2303 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2304 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2305 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2306 CLOCK_CTRL_TXCLK_DISABLE |
2307 CLOCK_CTRL_44MHZ_CORE);
2308 } else {
2309 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2310 }
2311
Michael Chanb401e9e2005-12-19 16:27:04 -08002312 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2313 tp->pci_clock_ctrl | newbits3, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 }
2315 }
2316
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002317 if (!(device_should_wake) &&
Matt Carlson0d3031d2007-10-10 18:02:43 -07002318 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
2319 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
Matt Carlson0a459aa2008-11-03 16:54:15 -08002320 tg3_power_down_phy(tp, do_low_power);
Michael Chan6921d202005-12-13 21:15:53 -08002321
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 tg3_frob_aux_power(tp);
2323
2324 /* Workaround for unstable PLL clock */
2325 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2326 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2327 u32 val = tr32(0x7d00);
2328
2329 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2330 tw32(0x7d00, val);
Michael Chan6921d202005-12-13 21:15:53 -08002331 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chanec41c7d2006-01-17 02:40:55 -08002332 int err;
2333
2334 err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 tg3_halt_cpu(tp, RX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -08002336 if (!err)
2337 tg3_nvram_unlock(tp);
Michael Chan6921d202005-12-13 21:15:53 -08002338 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 }
2340
Michael Chanbbadf502006-04-06 21:46:34 -07002341 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2342
Matt Carlson05ac4cb2008-11-03 16:53:46 -08002343 if (device_should_wake)
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002344 pci_enable_wake(tp->pdev, state, true);
2345
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 /* Finally, set the new power state. */
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002347 pci_set_power_state(tp->pdev, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349 return 0;
2350}
2351
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2353{
2354 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2355 case MII_TG3_AUX_STAT_10HALF:
2356 *speed = SPEED_10;
2357 *duplex = DUPLEX_HALF;
2358 break;
2359
2360 case MII_TG3_AUX_STAT_10FULL:
2361 *speed = SPEED_10;
2362 *duplex = DUPLEX_FULL;
2363 break;
2364
2365 case MII_TG3_AUX_STAT_100HALF:
2366 *speed = SPEED_100;
2367 *duplex = DUPLEX_HALF;
2368 break;
2369
2370 case MII_TG3_AUX_STAT_100FULL:
2371 *speed = SPEED_100;
2372 *duplex = DUPLEX_FULL;
2373 break;
2374
2375 case MII_TG3_AUX_STAT_1000HALF:
2376 *speed = SPEED_1000;
2377 *duplex = DUPLEX_HALF;
2378 break;
2379
2380 case MII_TG3_AUX_STAT_1000FULL:
2381 *speed = SPEED_1000;
2382 *duplex = DUPLEX_FULL;
2383 break;
2384
2385 default:
Michael Chan715116a2006-09-27 16:09:25 -07002386 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2387 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2388 SPEED_10;
2389 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2390 DUPLEX_HALF;
2391 break;
2392 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393 *speed = SPEED_INVALID;
2394 *duplex = DUPLEX_INVALID;
2395 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002396 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397}
2398
2399static void tg3_phy_copper_begin(struct tg3 *tp)
2400{
2401 u32 new_adv;
2402 int i;
2403
2404 if (tp->link_config.phy_is_low_power) {
2405 /* Entering low power mode. Disable gigabit and
2406 * 100baseT advertisements.
2407 */
2408 tg3_writephy(tp, MII_TG3_CTRL, 0);
2409
2410 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2411 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2412 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2413 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2414
2415 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2416 } else if (tp->link_config.speed == SPEED_INVALID) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2418 tp->link_config.advertising &=
2419 ~(ADVERTISED_1000baseT_Half |
2420 ADVERTISED_1000baseT_Full);
2421
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002422 new_adv = ADVERTISE_CSMA;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2424 new_adv |= ADVERTISE_10HALF;
2425 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2426 new_adv |= ADVERTISE_10FULL;
2427 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2428 new_adv |= ADVERTISE_100HALF;
2429 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2430 new_adv |= ADVERTISE_100FULL;
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002431
2432 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2433
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2435
2436 if (tp->link_config.advertising &
2437 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2438 new_adv = 0;
2439 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2440 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2441 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2442 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2443 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2444 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2445 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2446 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2447 MII_TG3_CTRL_ENABLE_AS_MASTER);
2448 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2449 } else {
2450 tg3_writephy(tp, MII_TG3_CTRL, 0);
2451 }
2452 } else {
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002453 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2454 new_adv |= ADVERTISE_CSMA;
2455
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456 /* Asking for a specific link mode. */
2457 if (tp->link_config.speed == SPEED_1000) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2459
2460 if (tp->link_config.duplex == DUPLEX_FULL)
2461 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2462 else
2463 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2464 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2465 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2466 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2467 MII_TG3_CTRL_ENABLE_AS_MASTER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469 if (tp->link_config.speed == SPEED_100) {
2470 if (tp->link_config.duplex == DUPLEX_FULL)
2471 new_adv |= ADVERTISE_100FULL;
2472 else
2473 new_adv |= ADVERTISE_100HALF;
2474 } else {
2475 if (tp->link_config.duplex == DUPLEX_FULL)
2476 new_adv |= ADVERTISE_10FULL;
2477 else
2478 new_adv |= ADVERTISE_10HALF;
2479 }
2480 tg3_writephy(tp, MII_ADVERTISE, new_adv);
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002481
2482 new_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 }
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002484
2485 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486 }
2487
2488 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2489 tp->link_config.speed != SPEED_INVALID) {
2490 u32 bmcr, orig_bmcr;
2491
2492 tp->link_config.active_speed = tp->link_config.speed;
2493 tp->link_config.active_duplex = tp->link_config.duplex;
2494
2495 bmcr = 0;
2496 switch (tp->link_config.speed) {
2497 default:
2498 case SPEED_10:
2499 break;
2500
2501 case SPEED_100:
2502 bmcr |= BMCR_SPEED100;
2503 break;
2504
2505 case SPEED_1000:
2506 bmcr |= TG3_BMCR_SPEED1000;
2507 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002508 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509
2510 if (tp->link_config.duplex == DUPLEX_FULL)
2511 bmcr |= BMCR_FULLDPLX;
2512
2513 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2514 (bmcr != orig_bmcr)) {
2515 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2516 for (i = 0; i < 1500; i++) {
2517 u32 tmp;
2518
2519 udelay(10);
2520 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2521 tg3_readphy(tp, MII_BMSR, &tmp))
2522 continue;
2523 if (!(tmp & BMSR_LSTATUS)) {
2524 udelay(40);
2525 break;
2526 }
2527 }
2528 tg3_writephy(tp, MII_BMCR, bmcr);
2529 udelay(40);
2530 }
2531 } else {
2532 tg3_writephy(tp, MII_BMCR,
2533 BMCR_ANENABLE | BMCR_ANRESTART);
2534 }
2535}
2536
2537static int tg3_init_5401phy_dsp(struct tg3 *tp)
2538{
2539 int err;
2540
2541 /* Turn off tap power management. */
2542 /* Set Extended packet length bit */
2543 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2544
2545 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2546 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2547
2548 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2549 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2550
2551 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2552 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2553
2554 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2555 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2556
2557 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2558 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2559
2560 udelay(40);
2561
2562 return err;
2563}
2564
Michael Chan3600d912006-12-07 00:21:48 -08002565static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566{
Michael Chan3600d912006-12-07 00:21:48 -08002567 u32 adv_reg, all_mask = 0;
2568
2569 if (mask & ADVERTISED_10baseT_Half)
2570 all_mask |= ADVERTISE_10HALF;
2571 if (mask & ADVERTISED_10baseT_Full)
2572 all_mask |= ADVERTISE_10FULL;
2573 if (mask & ADVERTISED_100baseT_Half)
2574 all_mask |= ADVERTISE_100HALF;
2575 if (mask & ADVERTISED_100baseT_Full)
2576 all_mask |= ADVERTISE_100FULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577
2578 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2579 return 0;
2580
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581 if ((adv_reg & all_mask) != all_mask)
2582 return 0;
2583 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2584 u32 tg3_ctrl;
2585
Michael Chan3600d912006-12-07 00:21:48 -08002586 all_mask = 0;
2587 if (mask & ADVERTISED_1000baseT_Half)
2588 all_mask |= ADVERTISE_1000HALF;
2589 if (mask & ADVERTISED_1000baseT_Full)
2590 all_mask |= ADVERTISE_1000FULL;
2591
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2593 return 0;
2594
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595 if ((tg3_ctrl & all_mask) != all_mask)
2596 return 0;
2597 }
2598 return 1;
2599}
2600
Matt Carlsonef167e22007-12-20 20:10:01 -08002601static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2602{
2603 u32 curadv, reqadv;
2604
2605 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2606 return 1;
2607
2608 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2609 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2610
2611 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2612 if (curadv != reqadv)
2613 return 0;
2614
2615 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2616 tg3_readphy(tp, MII_LPA, rmtadv);
2617 } else {
2618 /* Reprogram the advertisement register, even if it
2619 * does not affect the current link. If the link
2620 * gets renegotiated in the future, we can save an
2621 * additional renegotiation cycle by advertising
2622 * it correctly in the first place.
2623 */
2624 if (curadv != reqadv) {
2625 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2626 ADVERTISE_PAUSE_ASYM);
2627 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2628 }
2629 }
2630
2631 return 1;
2632}
2633
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2635{
2636 int current_link_up;
2637 u32 bmsr, dummy;
Matt Carlsonef167e22007-12-20 20:10:01 -08002638 u32 lcl_adv, rmt_adv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639 u16 current_speed;
2640 u8 current_duplex;
2641 int i, err;
2642
2643 tw32(MAC_EVENT, 0);
2644
2645 tw32_f(MAC_STATUS,
2646 (MAC_STATUS_SYNC_CHANGED |
2647 MAC_STATUS_CFG_CHANGED |
2648 MAC_STATUS_MI_COMPLETION |
2649 MAC_STATUS_LNKSTATE_CHANGED));
2650 udelay(40);
2651
Matt Carlson8ef21422008-05-02 16:47:53 -07002652 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2653 tw32_f(MAC_MI_MODE,
2654 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2655 udelay(80);
2656 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657
2658 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2659
2660 /* Some third-party PHYs need to be reset on link going
2661 * down.
2662 */
2663 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2664 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2665 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2666 netif_carrier_ok(tp->dev)) {
2667 tg3_readphy(tp, MII_BMSR, &bmsr);
2668 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2669 !(bmsr & BMSR_LSTATUS))
2670 force_reset = 1;
2671 }
2672 if (force_reset)
2673 tg3_phy_reset(tp);
2674
2675 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2676 tg3_readphy(tp, MII_BMSR, &bmsr);
2677 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2678 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2679 bmsr = 0;
2680
2681 if (!(bmsr & BMSR_LSTATUS)) {
2682 err = tg3_init_5401phy_dsp(tp);
2683 if (err)
2684 return err;
2685
2686 tg3_readphy(tp, MII_BMSR, &bmsr);
2687 for (i = 0; i < 1000; i++) {
2688 udelay(10);
2689 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2690 (bmsr & BMSR_LSTATUS)) {
2691 udelay(40);
2692 break;
2693 }
2694 }
2695
2696 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2697 !(bmsr & BMSR_LSTATUS) &&
2698 tp->link_config.active_speed == SPEED_1000) {
2699 err = tg3_phy_reset(tp);
2700 if (!err)
2701 err = tg3_init_5401phy_dsp(tp);
2702 if (err)
2703 return err;
2704 }
2705 }
2706 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2707 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2708 /* 5701 {A0,B0} CRC bug workaround */
2709 tg3_writephy(tp, 0x15, 0x0a75);
2710 tg3_writephy(tp, 0x1c, 0x8c68);
2711 tg3_writephy(tp, 0x1c, 0x8d68);
2712 tg3_writephy(tp, 0x1c, 0x8c68);
2713 }
2714
2715 /* Clear pending interrupts... */
2716 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2717 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2718
2719 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2720 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
Michael Chan715116a2006-09-27 16:09:25 -07002721 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2723
2724 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2725 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2726 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2727 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2728 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2729 else
2730 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2731 }
2732
2733 current_link_up = 0;
2734 current_speed = SPEED_INVALID;
2735 current_duplex = DUPLEX_INVALID;
2736
2737 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2738 u32 val;
2739
2740 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2741 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2742 if (!(val & (1 << 10))) {
2743 val |= (1 << 10);
2744 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2745 goto relink;
2746 }
2747 }
2748
2749 bmsr = 0;
2750 for (i = 0; i < 100; i++) {
2751 tg3_readphy(tp, MII_BMSR, &bmsr);
2752 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2753 (bmsr & BMSR_LSTATUS))
2754 break;
2755 udelay(40);
2756 }
2757
2758 if (bmsr & BMSR_LSTATUS) {
2759 u32 aux_stat, bmcr;
2760
2761 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2762 for (i = 0; i < 2000; i++) {
2763 udelay(10);
2764 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2765 aux_stat)
2766 break;
2767 }
2768
2769 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2770 &current_speed,
2771 &current_duplex);
2772
2773 bmcr = 0;
2774 for (i = 0; i < 200; i++) {
2775 tg3_readphy(tp, MII_BMCR, &bmcr);
2776 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2777 continue;
2778 if (bmcr && bmcr != 0x7fff)
2779 break;
2780 udelay(10);
2781 }
2782
Matt Carlsonef167e22007-12-20 20:10:01 -08002783 lcl_adv = 0;
2784 rmt_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785
Matt Carlsonef167e22007-12-20 20:10:01 -08002786 tp->link_config.active_speed = current_speed;
2787 tp->link_config.active_duplex = current_duplex;
2788
2789 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2790 if ((bmcr & BMCR_ANENABLE) &&
2791 tg3_copper_is_advertising_all(tp,
2792 tp->link_config.advertising)) {
2793 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2794 &rmt_adv))
2795 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796 }
2797 } else {
2798 if (!(bmcr & BMCR_ANENABLE) &&
2799 tp->link_config.speed == current_speed &&
Matt Carlsonef167e22007-12-20 20:10:01 -08002800 tp->link_config.duplex == current_duplex &&
2801 tp->link_config.flowctrl ==
2802 tp->link_config.active_flowctrl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804 }
2805 }
2806
Matt Carlsonef167e22007-12-20 20:10:01 -08002807 if (current_link_up == 1 &&
2808 tp->link_config.active_duplex == DUPLEX_FULL)
2809 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810 }
2811
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812relink:
Michael Chan6921d202005-12-13 21:15:53 -08002813 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814 u32 tmp;
2815
2816 tg3_phy_copper_begin(tp);
2817
2818 tg3_readphy(tp, MII_BMSR, &tmp);
2819 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2820 (tmp & BMSR_LSTATUS))
2821 current_link_up = 1;
2822 }
2823
2824 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2825 if (current_link_up == 1) {
2826 if (tp->link_config.active_speed == SPEED_100 ||
2827 tp->link_config.active_speed == SPEED_10)
2828 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2829 else
2830 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2831 } else
2832 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2833
2834 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2835 if (tp->link_config.active_duplex == DUPLEX_HALF)
2836 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2837
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002839 if (current_link_up == 1 &&
2840 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002842 else
2843 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844 }
2845
2846 /* ??? Without this setting Netgear GA302T PHY does not
2847 * ??? send/receive packets...
2848 */
2849 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2850 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2851 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2852 tw32_f(MAC_MI_MODE, tp->mi_mode);
2853 udelay(80);
2854 }
2855
2856 tw32_f(MAC_MODE, tp->mac_mode);
2857 udelay(40);
2858
2859 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2860 /* Polled via timer. */
2861 tw32_f(MAC_EVENT, 0);
2862 } else {
2863 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2864 }
2865 udelay(40);
2866
2867 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2868 current_link_up == 1 &&
2869 tp->link_config.active_speed == SPEED_1000 &&
2870 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2871 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2872 udelay(120);
2873 tw32_f(MAC_STATUS,
2874 (MAC_STATUS_SYNC_CHANGED |
2875 MAC_STATUS_CFG_CHANGED));
2876 udelay(40);
2877 tg3_write_mem(tp,
2878 NIC_SRAM_FIRMWARE_MBOX,
2879 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2880 }
2881
2882 if (current_link_up != netif_carrier_ok(tp->dev)) {
2883 if (current_link_up)
2884 netif_carrier_on(tp->dev);
2885 else
2886 netif_carrier_off(tp->dev);
2887 tg3_link_report(tp);
2888 }
2889
2890 return 0;
2891}
2892
2893struct tg3_fiber_aneginfo {
2894 int state;
2895#define ANEG_STATE_UNKNOWN 0
2896#define ANEG_STATE_AN_ENABLE 1
2897#define ANEG_STATE_RESTART_INIT 2
2898#define ANEG_STATE_RESTART 3
2899#define ANEG_STATE_DISABLE_LINK_OK 4
2900#define ANEG_STATE_ABILITY_DETECT_INIT 5
2901#define ANEG_STATE_ABILITY_DETECT 6
2902#define ANEG_STATE_ACK_DETECT_INIT 7
2903#define ANEG_STATE_ACK_DETECT 8
2904#define ANEG_STATE_COMPLETE_ACK_INIT 9
2905#define ANEG_STATE_COMPLETE_ACK 10
2906#define ANEG_STATE_IDLE_DETECT_INIT 11
2907#define ANEG_STATE_IDLE_DETECT 12
2908#define ANEG_STATE_LINK_OK 13
2909#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2910#define ANEG_STATE_NEXT_PAGE_WAIT 15
2911
2912 u32 flags;
2913#define MR_AN_ENABLE 0x00000001
2914#define MR_RESTART_AN 0x00000002
2915#define MR_AN_COMPLETE 0x00000004
2916#define MR_PAGE_RX 0x00000008
2917#define MR_NP_LOADED 0x00000010
2918#define MR_TOGGLE_TX 0x00000020
2919#define MR_LP_ADV_FULL_DUPLEX 0x00000040
2920#define MR_LP_ADV_HALF_DUPLEX 0x00000080
2921#define MR_LP_ADV_SYM_PAUSE 0x00000100
2922#define MR_LP_ADV_ASYM_PAUSE 0x00000200
2923#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2924#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2925#define MR_LP_ADV_NEXT_PAGE 0x00001000
2926#define MR_TOGGLE_RX 0x00002000
2927#define MR_NP_RX 0x00004000
2928
2929#define MR_LINK_OK 0x80000000
2930
2931 unsigned long link_time, cur_time;
2932
2933 u32 ability_match_cfg;
2934 int ability_match_count;
2935
2936 char ability_match, idle_match, ack_match;
2937
2938 u32 txconfig, rxconfig;
2939#define ANEG_CFG_NP 0x00000080
2940#define ANEG_CFG_ACK 0x00000040
2941#define ANEG_CFG_RF2 0x00000020
2942#define ANEG_CFG_RF1 0x00000010
2943#define ANEG_CFG_PS2 0x00000001
2944#define ANEG_CFG_PS1 0x00008000
2945#define ANEG_CFG_HD 0x00004000
2946#define ANEG_CFG_FD 0x00002000
2947#define ANEG_CFG_INVAL 0x00001f06
2948
2949};
2950#define ANEG_OK 0
2951#define ANEG_DONE 1
2952#define ANEG_TIMER_ENAB 2
2953#define ANEG_FAILED -1
2954
2955#define ANEG_STATE_SETTLE_TIME 10000
2956
2957static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2958 struct tg3_fiber_aneginfo *ap)
2959{
Matt Carlson5be73b42007-12-20 20:09:29 -08002960 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961 unsigned long delta;
2962 u32 rx_cfg_reg;
2963 int ret;
2964
2965 if (ap->state == ANEG_STATE_UNKNOWN) {
2966 ap->rxconfig = 0;
2967 ap->link_time = 0;
2968 ap->cur_time = 0;
2969 ap->ability_match_cfg = 0;
2970 ap->ability_match_count = 0;
2971 ap->ability_match = 0;
2972 ap->idle_match = 0;
2973 ap->ack_match = 0;
2974 }
2975 ap->cur_time++;
2976
2977 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2978 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2979
2980 if (rx_cfg_reg != ap->ability_match_cfg) {
2981 ap->ability_match_cfg = rx_cfg_reg;
2982 ap->ability_match = 0;
2983 ap->ability_match_count = 0;
2984 } else {
2985 if (++ap->ability_match_count > 1) {
2986 ap->ability_match = 1;
2987 ap->ability_match_cfg = rx_cfg_reg;
2988 }
2989 }
2990 if (rx_cfg_reg & ANEG_CFG_ACK)
2991 ap->ack_match = 1;
2992 else
2993 ap->ack_match = 0;
2994
2995 ap->idle_match = 0;
2996 } else {
2997 ap->idle_match = 1;
2998 ap->ability_match_cfg = 0;
2999 ap->ability_match_count = 0;
3000 ap->ability_match = 0;
3001 ap->ack_match = 0;
3002
3003 rx_cfg_reg = 0;
3004 }
3005
3006 ap->rxconfig = rx_cfg_reg;
3007 ret = ANEG_OK;
3008
3009 switch(ap->state) {
3010 case ANEG_STATE_UNKNOWN:
3011 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3012 ap->state = ANEG_STATE_AN_ENABLE;
3013
3014 /* fallthru */
3015 case ANEG_STATE_AN_ENABLE:
3016 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3017 if (ap->flags & MR_AN_ENABLE) {
3018 ap->link_time = 0;
3019 ap->cur_time = 0;
3020 ap->ability_match_cfg = 0;
3021 ap->ability_match_count = 0;
3022 ap->ability_match = 0;
3023 ap->idle_match = 0;
3024 ap->ack_match = 0;
3025
3026 ap->state = ANEG_STATE_RESTART_INIT;
3027 } else {
3028 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3029 }
3030 break;
3031
3032 case ANEG_STATE_RESTART_INIT:
3033 ap->link_time = ap->cur_time;
3034 ap->flags &= ~(MR_NP_LOADED);
3035 ap->txconfig = 0;
3036 tw32(MAC_TX_AUTO_NEG, 0);
3037 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3038 tw32_f(MAC_MODE, tp->mac_mode);
3039 udelay(40);
3040
3041 ret = ANEG_TIMER_ENAB;
3042 ap->state = ANEG_STATE_RESTART;
3043
3044 /* fallthru */
3045 case ANEG_STATE_RESTART:
3046 delta = ap->cur_time - ap->link_time;
3047 if (delta > ANEG_STATE_SETTLE_TIME) {
3048 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3049 } else {
3050 ret = ANEG_TIMER_ENAB;
3051 }
3052 break;
3053
3054 case ANEG_STATE_DISABLE_LINK_OK:
3055 ret = ANEG_DONE;
3056 break;
3057
3058 case ANEG_STATE_ABILITY_DETECT_INIT:
3059 ap->flags &= ~(MR_TOGGLE_TX);
Matt Carlson5be73b42007-12-20 20:09:29 -08003060 ap->txconfig = ANEG_CFG_FD;
3061 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3062 if (flowctrl & ADVERTISE_1000XPAUSE)
3063 ap->txconfig |= ANEG_CFG_PS1;
3064 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3065 ap->txconfig |= ANEG_CFG_PS2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003066 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3067 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3068 tw32_f(MAC_MODE, tp->mac_mode);
3069 udelay(40);
3070
3071 ap->state = ANEG_STATE_ABILITY_DETECT;
3072 break;
3073
3074 case ANEG_STATE_ABILITY_DETECT:
3075 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3076 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3077 }
3078 break;
3079
3080 case ANEG_STATE_ACK_DETECT_INIT:
3081 ap->txconfig |= ANEG_CFG_ACK;
3082 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3083 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3084 tw32_f(MAC_MODE, tp->mac_mode);
3085 udelay(40);
3086
3087 ap->state = ANEG_STATE_ACK_DETECT;
3088
3089 /* fallthru */
3090 case ANEG_STATE_ACK_DETECT:
3091 if (ap->ack_match != 0) {
3092 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3093 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3094 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3095 } else {
3096 ap->state = ANEG_STATE_AN_ENABLE;
3097 }
3098 } else if (ap->ability_match != 0 &&
3099 ap->rxconfig == 0) {
3100 ap->state = ANEG_STATE_AN_ENABLE;
3101 }
3102 break;
3103
3104 case ANEG_STATE_COMPLETE_ACK_INIT:
3105 if (ap->rxconfig & ANEG_CFG_INVAL) {
3106 ret = ANEG_FAILED;
3107 break;
3108 }
3109 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3110 MR_LP_ADV_HALF_DUPLEX |
3111 MR_LP_ADV_SYM_PAUSE |
3112 MR_LP_ADV_ASYM_PAUSE |
3113 MR_LP_ADV_REMOTE_FAULT1 |
3114 MR_LP_ADV_REMOTE_FAULT2 |
3115 MR_LP_ADV_NEXT_PAGE |
3116 MR_TOGGLE_RX |
3117 MR_NP_RX);
3118 if (ap->rxconfig & ANEG_CFG_FD)
3119 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3120 if (ap->rxconfig & ANEG_CFG_HD)
3121 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3122 if (ap->rxconfig & ANEG_CFG_PS1)
3123 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3124 if (ap->rxconfig & ANEG_CFG_PS2)
3125 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3126 if (ap->rxconfig & ANEG_CFG_RF1)
3127 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3128 if (ap->rxconfig & ANEG_CFG_RF2)
3129 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3130 if (ap->rxconfig & ANEG_CFG_NP)
3131 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3132
3133 ap->link_time = ap->cur_time;
3134
3135 ap->flags ^= (MR_TOGGLE_TX);
3136 if (ap->rxconfig & 0x0008)
3137 ap->flags |= MR_TOGGLE_RX;
3138 if (ap->rxconfig & ANEG_CFG_NP)
3139 ap->flags |= MR_NP_RX;
3140 ap->flags |= MR_PAGE_RX;
3141
3142 ap->state = ANEG_STATE_COMPLETE_ACK;
3143 ret = ANEG_TIMER_ENAB;
3144 break;
3145
3146 case ANEG_STATE_COMPLETE_ACK:
3147 if (ap->ability_match != 0 &&
3148 ap->rxconfig == 0) {
3149 ap->state = ANEG_STATE_AN_ENABLE;
3150 break;
3151 }
3152 delta = ap->cur_time - ap->link_time;
3153 if (delta > ANEG_STATE_SETTLE_TIME) {
3154 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3155 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3156 } else {
3157 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3158 !(ap->flags & MR_NP_RX)) {
3159 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3160 } else {
3161 ret = ANEG_FAILED;
3162 }
3163 }
3164 }
3165 break;
3166
3167 case ANEG_STATE_IDLE_DETECT_INIT:
3168 ap->link_time = ap->cur_time;
3169 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3170 tw32_f(MAC_MODE, tp->mac_mode);
3171 udelay(40);
3172
3173 ap->state = ANEG_STATE_IDLE_DETECT;
3174 ret = ANEG_TIMER_ENAB;
3175 break;
3176
3177 case ANEG_STATE_IDLE_DETECT:
3178 if (ap->ability_match != 0 &&
3179 ap->rxconfig == 0) {
3180 ap->state = ANEG_STATE_AN_ENABLE;
3181 break;
3182 }
3183 delta = ap->cur_time - ap->link_time;
3184 if (delta > ANEG_STATE_SETTLE_TIME) {
3185 /* XXX another gem from the Broadcom driver :( */
3186 ap->state = ANEG_STATE_LINK_OK;
3187 }
3188 break;
3189
3190 case ANEG_STATE_LINK_OK:
3191 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3192 ret = ANEG_DONE;
3193 break;
3194
3195 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3196 /* ??? unimplemented */
3197 break;
3198
3199 case ANEG_STATE_NEXT_PAGE_WAIT:
3200 /* ??? unimplemented */
3201 break;
3202
3203 default:
3204 ret = ANEG_FAILED;
3205 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07003206 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207
3208 return ret;
3209}
3210
Matt Carlson5be73b42007-12-20 20:09:29 -08003211static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003212{
3213 int res = 0;
3214 struct tg3_fiber_aneginfo aninfo;
3215 int status = ANEG_FAILED;
3216 unsigned int tick;
3217 u32 tmp;
3218
3219 tw32_f(MAC_TX_AUTO_NEG, 0);
3220
3221 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3222 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3223 udelay(40);
3224
3225 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3226 udelay(40);
3227
3228 memset(&aninfo, 0, sizeof(aninfo));
3229 aninfo.flags |= MR_AN_ENABLE;
3230 aninfo.state = ANEG_STATE_UNKNOWN;
3231 aninfo.cur_time = 0;
3232 tick = 0;
3233 while (++tick < 195000) {
3234 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3235 if (status == ANEG_DONE || status == ANEG_FAILED)
3236 break;
3237
3238 udelay(1);
3239 }
3240
3241 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3242 tw32_f(MAC_MODE, tp->mac_mode);
3243 udelay(40);
3244
Matt Carlson5be73b42007-12-20 20:09:29 -08003245 *txflags = aninfo.txconfig;
3246 *rxflags = aninfo.flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247
3248 if (status == ANEG_DONE &&
3249 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3250 MR_LP_ADV_FULL_DUPLEX)))
3251 res = 1;
3252
3253 return res;
3254}
3255
3256static void tg3_init_bcm8002(struct tg3 *tp)
3257{
3258 u32 mac_status = tr32(MAC_STATUS);
3259 int i;
3260
3261 /* Reset when initting first time or we have a link. */
3262 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3263 !(mac_status & MAC_STATUS_PCS_SYNCED))
3264 return;
3265
3266 /* Set PLL lock range. */
3267 tg3_writephy(tp, 0x16, 0x8007);
3268
3269 /* SW reset */
3270 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3271
3272 /* Wait for reset to complete. */
3273 /* XXX schedule_timeout() ... */
3274 for (i = 0; i < 500; i++)
3275 udelay(10);
3276
3277 /* Config mode; select PMA/Ch 1 regs. */
3278 tg3_writephy(tp, 0x10, 0x8411);
3279
3280 /* Enable auto-lock and comdet, select txclk for tx. */
3281 tg3_writephy(tp, 0x11, 0x0a10);
3282
3283 tg3_writephy(tp, 0x18, 0x00a0);
3284 tg3_writephy(tp, 0x16, 0x41ff);
3285
3286 /* Assert and deassert POR. */
3287 tg3_writephy(tp, 0x13, 0x0400);
3288 udelay(40);
3289 tg3_writephy(tp, 0x13, 0x0000);
3290
3291 tg3_writephy(tp, 0x11, 0x0a50);
3292 udelay(40);
3293 tg3_writephy(tp, 0x11, 0x0a10);
3294
3295 /* Wait for signal to stabilize */
3296 /* XXX schedule_timeout() ... */
3297 for (i = 0; i < 15000; i++)
3298 udelay(10);
3299
3300 /* Deselect the channel register so we can read the PHYID
3301 * later.
3302 */
3303 tg3_writephy(tp, 0x10, 0x8011);
3304}
3305
3306static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3307{
Matt Carlson82cd3d12007-12-20 20:09:00 -08003308 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003309 u32 sg_dig_ctrl, sg_dig_status;
3310 u32 serdes_cfg, expected_sg_dig_ctrl;
3311 int workaround, port_a;
3312 int current_link_up;
3313
3314 serdes_cfg = 0;
3315 expected_sg_dig_ctrl = 0;
3316 workaround = 0;
3317 port_a = 1;
3318 current_link_up = 0;
3319
3320 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3321 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3322 workaround = 1;
3323 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3324 port_a = 0;
3325
3326 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3327 /* preserve bits 20-23 for voltage regulator */
3328 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3329 }
3330
3331 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3332
3333 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003334 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003335 if (workaround) {
3336 u32 val = serdes_cfg;
3337
3338 if (port_a)
3339 val |= 0xc010000;
3340 else
3341 val |= 0x4010000;
3342 tw32_f(MAC_SERDES_CFG, val);
3343 }
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003344
3345 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003346 }
3347 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3348 tg3_setup_flow_control(tp, 0, 0);
3349 current_link_up = 1;
3350 }
3351 goto out;
3352 }
3353
3354 /* Want auto-negotiation. */
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003355 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356
Matt Carlson82cd3d12007-12-20 20:09:00 -08003357 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3358 if (flowctrl & ADVERTISE_1000XPAUSE)
3359 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3360 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3361 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003362
3363 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003364 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3365 tp->serdes_counter &&
3366 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3367 MAC_STATUS_RCVD_CFG)) ==
3368 MAC_STATUS_PCS_SYNCED)) {
3369 tp->serdes_counter--;
3370 current_link_up = 1;
3371 goto out;
3372 }
3373restart_autoneg:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374 if (workaround)
3375 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003376 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003377 udelay(5);
3378 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3379
Michael Chan3d3ebe72006-09-27 15:59:15 -07003380 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3381 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003382 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3383 MAC_STATUS_SIGNAL_DET)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003384 sg_dig_status = tr32(SG_DIG_STATUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003385 mac_status = tr32(MAC_STATUS);
3386
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003387 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003388 (mac_status & MAC_STATUS_PCS_SYNCED)) {
Matt Carlson82cd3d12007-12-20 20:09:00 -08003389 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003390
Matt Carlson82cd3d12007-12-20 20:09:00 -08003391 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3392 local_adv |= ADVERTISE_1000XPAUSE;
3393 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3394 local_adv |= ADVERTISE_1000XPSE_ASYM;
3395
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003396 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08003397 remote_adv |= LPA_1000XPAUSE;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003398 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08003399 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003400
3401 tg3_setup_flow_control(tp, local_adv, remote_adv);
3402 current_link_up = 1;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003403 tp->serdes_counter = 0;
3404 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003405 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003406 if (tp->serdes_counter)
3407 tp->serdes_counter--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003408 else {
3409 if (workaround) {
3410 u32 val = serdes_cfg;
3411
3412 if (port_a)
3413 val |= 0xc010000;
3414 else
3415 val |= 0x4010000;
3416
3417 tw32_f(MAC_SERDES_CFG, val);
3418 }
3419
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003420 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421 udelay(40);
3422
3423 /* Link parallel detection - link is up */
3424 /* only if we have PCS_SYNC and not */
3425 /* receiving config code words */
3426 mac_status = tr32(MAC_STATUS);
3427 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3428 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3429 tg3_setup_flow_control(tp, 0, 0);
3430 current_link_up = 1;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003431 tp->tg3_flags2 |=
3432 TG3_FLG2_PARALLEL_DETECT;
3433 tp->serdes_counter =
3434 SERDES_PARALLEL_DET_TIMEOUT;
3435 } else
3436 goto restart_autoneg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003437 }
3438 }
Michael Chan3d3ebe72006-09-27 15:59:15 -07003439 } else {
3440 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3441 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442 }
3443
3444out:
3445 return current_link_up;
3446}
3447
3448static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3449{
3450 int current_link_up = 0;
3451
Michael Chan5cf64b82007-05-05 12:11:21 -07003452 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003453 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003454
3455 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
Matt Carlson5be73b42007-12-20 20:09:29 -08003456 u32 txflags, rxflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003457 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003458
Matt Carlson5be73b42007-12-20 20:09:29 -08003459 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3460 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003461
Matt Carlson5be73b42007-12-20 20:09:29 -08003462 if (txflags & ANEG_CFG_PS1)
3463 local_adv |= ADVERTISE_1000XPAUSE;
3464 if (txflags & ANEG_CFG_PS2)
3465 local_adv |= ADVERTISE_1000XPSE_ASYM;
3466
3467 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3468 remote_adv |= LPA_1000XPAUSE;
3469 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3470 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471
3472 tg3_setup_flow_control(tp, local_adv, remote_adv);
3473
Linus Torvalds1da177e2005-04-16 15:20:36 -07003474 current_link_up = 1;
3475 }
3476 for (i = 0; i < 30; i++) {
3477 udelay(20);
3478 tw32_f(MAC_STATUS,
3479 (MAC_STATUS_SYNC_CHANGED |
3480 MAC_STATUS_CFG_CHANGED));
3481 udelay(40);
3482 if ((tr32(MAC_STATUS) &
3483 (MAC_STATUS_SYNC_CHANGED |
3484 MAC_STATUS_CFG_CHANGED)) == 0)
3485 break;
3486 }
3487
3488 mac_status = tr32(MAC_STATUS);
3489 if (current_link_up == 0 &&
3490 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3491 !(mac_status & MAC_STATUS_RCVD_CFG))
3492 current_link_up = 1;
3493 } else {
Matt Carlson5be73b42007-12-20 20:09:29 -08003494 tg3_setup_flow_control(tp, 0, 0);
3495
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496 /* Forcing 1000FD link up. */
3497 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498
3499 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3500 udelay(40);
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07003501
3502 tw32_f(MAC_MODE, tp->mac_mode);
3503 udelay(40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504 }
3505
3506out:
3507 return current_link_up;
3508}
3509
3510static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3511{
3512 u32 orig_pause_cfg;
3513 u16 orig_active_speed;
3514 u8 orig_active_duplex;
3515 u32 mac_status;
3516 int current_link_up;
3517 int i;
3518
Matt Carlson8d018622007-12-20 20:05:44 -08003519 orig_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003520 orig_active_speed = tp->link_config.active_speed;
3521 orig_active_duplex = tp->link_config.active_duplex;
3522
3523 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3524 netif_carrier_ok(tp->dev) &&
3525 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3526 mac_status = tr32(MAC_STATUS);
3527 mac_status &= (MAC_STATUS_PCS_SYNCED |
3528 MAC_STATUS_SIGNAL_DET |
3529 MAC_STATUS_CFG_CHANGED |
3530 MAC_STATUS_RCVD_CFG);
3531 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3532 MAC_STATUS_SIGNAL_DET)) {
3533 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3534 MAC_STATUS_CFG_CHANGED));
3535 return 0;
3536 }
3537 }
3538
3539 tw32_f(MAC_TX_AUTO_NEG, 0);
3540
3541 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3542 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3543 tw32_f(MAC_MODE, tp->mac_mode);
3544 udelay(40);
3545
3546 if (tp->phy_id == PHY_ID_BCM8002)
3547 tg3_init_bcm8002(tp);
3548
3549 /* Enable link change event even when serdes polling. */
3550 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3551 udelay(40);
3552
3553 current_link_up = 0;
3554 mac_status = tr32(MAC_STATUS);
3555
3556 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3557 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3558 else
3559 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3560
Linus Torvalds1da177e2005-04-16 15:20:36 -07003561 tp->hw_status->status =
3562 (SD_STATUS_UPDATED |
3563 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3564
3565 for (i = 0; i < 100; i++) {
3566 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3567 MAC_STATUS_CFG_CHANGED));
3568 udelay(5);
3569 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
Michael Chan3d3ebe72006-09-27 15:59:15 -07003570 MAC_STATUS_CFG_CHANGED |
3571 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003572 break;
3573 }
3574
3575 mac_status = tr32(MAC_STATUS);
3576 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3577 current_link_up = 0;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003578 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3579 tp->serdes_counter == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003580 tw32_f(MAC_MODE, (tp->mac_mode |
3581 MAC_MODE_SEND_CONFIGS));
3582 udelay(1);
3583 tw32_f(MAC_MODE, tp->mac_mode);
3584 }
3585 }
3586
3587 if (current_link_up == 1) {
3588 tp->link_config.active_speed = SPEED_1000;
3589 tp->link_config.active_duplex = DUPLEX_FULL;
3590 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3591 LED_CTRL_LNKLED_OVERRIDE |
3592 LED_CTRL_1000MBPS_ON));
3593 } else {
3594 tp->link_config.active_speed = SPEED_INVALID;
3595 tp->link_config.active_duplex = DUPLEX_INVALID;
3596 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3597 LED_CTRL_LNKLED_OVERRIDE |
3598 LED_CTRL_TRAFFIC_OVERRIDE));
3599 }
3600
3601 if (current_link_up != netif_carrier_ok(tp->dev)) {
3602 if (current_link_up)
3603 netif_carrier_on(tp->dev);
3604 else
3605 netif_carrier_off(tp->dev);
3606 tg3_link_report(tp);
3607 } else {
Matt Carlson8d018622007-12-20 20:05:44 -08003608 u32 now_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003609 if (orig_pause_cfg != now_pause_cfg ||
3610 orig_active_speed != tp->link_config.active_speed ||
3611 orig_active_duplex != tp->link_config.active_duplex)
3612 tg3_link_report(tp);
3613 }
3614
3615 return 0;
3616}
3617
Michael Chan747e8f82005-07-25 12:33:22 -07003618static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3619{
3620 int current_link_up, err = 0;
3621 u32 bmsr, bmcr;
3622 u16 current_speed;
3623 u8 current_duplex;
Matt Carlsonef167e22007-12-20 20:10:01 -08003624 u32 local_adv, remote_adv;
Michael Chan747e8f82005-07-25 12:33:22 -07003625
3626 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3627 tw32_f(MAC_MODE, tp->mac_mode);
3628 udelay(40);
3629
3630 tw32(MAC_EVENT, 0);
3631
3632 tw32_f(MAC_STATUS,
3633 (MAC_STATUS_SYNC_CHANGED |
3634 MAC_STATUS_CFG_CHANGED |
3635 MAC_STATUS_MI_COMPLETION |
3636 MAC_STATUS_LNKSTATE_CHANGED));
3637 udelay(40);
3638
3639 if (force_reset)
3640 tg3_phy_reset(tp);
3641
3642 current_link_up = 0;
3643 current_speed = SPEED_INVALID;
3644 current_duplex = DUPLEX_INVALID;
3645
3646 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3647 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08003648 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3649 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3650 bmsr |= BMSR_LSTATUS;
3651 else
3652 bmsr &= ~BMSR_LSTATUS;
3653 }
Michael Chan747e8f82005-07-25 12:33:22 -07003654
3655 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3656
3657 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
Matt Carlson2bd3ed02008-06-09 15:39:55 -07003658 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
Michael Chan747e8f82005-07-25 12:33:22 -07003659 /* do nothing, just check for link up at the end */
3660 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3661 u32 adv, new_adv;
3662
3663 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3664 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3665 ADVERTISE_1000XPAUSE |
3666 ADVERTISE_1000XPSE_ASYM |
3667 ADVERTISE_SLCT);
3668
Matt Carlsonba4d07a2007-12-20 20:08:00 -08003669 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
Michael Chan747e8f82005-07-25 12:33:22 -07003670
3671 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3672 new_adv |= ADVERTISE_1000XHALF;
3673 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3674 new_adv |= ADVERTISE_1000XFULL;
3675
3676 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3677 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3678 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3679 tg3_writephy(tp, MII_BMCR, bmcr);
3680
3681 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
Michael Chan3d3ebe72006-09-27 15:59:15 -07003682 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
Michael Chan747e8f82005-07-25 12:33:22 -07003683 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3684
3685 return err;
3686 }
3687 } else {
3688 u32 new_bmcr;
3689
3690 bmcr &= ~BMCR_SPEED1000;
3691 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3692
3693 if (tp->link_config.duplex == DUPLEX_FULL)
3694 new_bmcr |= BMCR_FULLDPLX;
3695
3696 if (new_bmcr != bmcr) {
3697 /* BMCR_SPEED1000 is a reserved bit that needs
3698 * to be set on write.
3699 */
3700 new_bmcr |= BMCR_SPEED1000;
3701
3702 /* Force a linkdown */
3703 if (netif_carrier_ok(tp->dev)) {
3704 u32 adv;
3705
3706 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3707 adv &= ~(ADVERTISE_1000XFULL |
3708 ADVERTISE_1000XHALF |
3709 ADVERTISE_SLCT);
3710 tg3_writephy(tp, MII_ADVERTISE, adv);
3711 tg3_writephy(tp, MII_BMCR, bmcr |
3712 BMCR_ANRESTART |
3713 BMCR_ANENABLE);
3714 udelay(10);
3715 netif_carrier_off(tp->dev);
3716 }
3717 tg3_writephy(tp, MII_BMCR, new_bmcr);
3718 bmcr = new_bmcr;
3719 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3720 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08003721 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3722 ASIC_REV_5714) {
3723 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3724 bmsr |= BMSR_LSTATUS;
3725 else
3726 bmsr &= ~BMSR_LSTATUS;
3727 }
Michael Chan747e8f82005-07-25 12:33:22 -07003728 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3729 }
3730 }
3731
3732 if (bmsr & BMSR_LSTATUS) {
3733 current_speed = SPEED_1000;
3734 current_link_up = 1;
3735 if (bmcr & BMCR_FULLDPLX)
3736 current_duplex = DUPLEX_FULL;
3737 else
3738 current_duplex = DUPLEX_HALF;
3739
Matt Carlsonef167e22007-12-20 20:10:01 -08003740 local_adv = 0;
3741 remote_adv = 0;
3742
Michael Chan747e8f82005-07-25 12:33:22 -07003743 if (bmcr & BMCR_ANENABLE) {
Matt Carlsonef167e22007-12-20 20:10:01 -08003744 u32 common;
Michael Chan747e8f82005-07-25 12:33:22 -07003745
3746 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3747 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3748 common = local_adv & remote_adv;
3749 if (common & (ADVERTISE_1000XHALF |
3750 ADVERTISE_1000XFULL)) {
3751 if (common & ADVERTISE_1000XFULL)
3752 current_duplex = DUPLEX_FULL;
3753 else
3754 current_duplex = DUPLEX_HALF;
Michael Chan747e8f82005-07-25 12:33:22 -07003755 }
3756 else
3757 current_link_up = 0;
3758 }
3759 }
3760
Matt Carlsonef167e22007-12-20 20:10:01 -08003761 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3762 tg3_setup_flow_control(tp, local_adv, remote_adv);
3763
Michael Chan747e8f82005-07-25 12:33:22 -07003764 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3765 if (tp->link_config.active_duplex == DUPLEX_HALF)
3766 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3767
3768 tw32_f(MAC_MODE, tp->mac_mode);
3769 udelay(40);
3770
3771 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3772
3773 tp->link_config.active_speed = current_speed;
3774 tp->link_config.active_duplex = current_duplex;
3775
3776 if (current_link_up != netif_carrier_ok(tp->dev)) {
3777 if (current_link_up)
3778 netif_carrier_on(tp->dev);
3779 else {
3780 netif_carrier_off(tp->dev);
3781 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3782 }
3783 tg3_link_report(tp);
3784 }
3785 return err;
3786}
3787
3788static void tg3_serdes_parallel_detect(struct tg3 *tp)
3789{
Michael Chan3d3ebe72006-09-27 15:59:15 -07003790 if (tp->serdes_counter) {
Michael Chan747e8f82005-07-25 12:33:22 -07003791 /* Give autoneg time to complete. */
Michael Chan3d3ebe72006-09-27 15:59:15 -07003792 tp->serdes_counter--;
Michael Chan747e8f82005-07-25 12:33:22 -07003793 return;
3794 }
3795 if (!netif_carrier_ok(tp->dev) &&
3796 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3797 u32 bmcr;
3798
3799 tg3_readphy(tp, MII_BMCR, &bmcr);
3800 if (bmcr & BMCR_ANENABLE) {
3801 u32 phy1, phy2;
3802
3803 /* Select shadow register 0x1f */
3804 tg3_writephy(tp, 0x1c, 0x7c00);
3805 tg3_readphy(tp, 0x1c, &phy1);
3806
3807 /* Select expansion interrupt status register */
3808 tg3_writephy(tp, 0x17, 0x0f01);
3809 tg3_readphy(tp, 0x15, &phy2);
3810 tg3_readphy(tp, 0x15, &phy2);
3811
3812 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3813 /* We have signal detect and not receiving
3814 * config code words, link is up by parallel
3815 * detection.
3816 */
3817
3818 bmcr &= ~BMCR_ANENABLE;
3819 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3820 tg3_writephy(tp, MII_BMCR, bmcr);
3821 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3822 }
3823 }
3824 }
3825 else if (netif_carrier_ok(tp->dev) &&
3826 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3827 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3828 u32 phy2;
3829
3830 /* Select expansion interrupt status register */
3831 tg3_writephy(tp, 0x17, 0x0f01);
3832 tg3_readphy(tp, 0x15, &phy2);
3833 if (phy2 & 0x20) {
3834 u32 bmcr;
3835
3836 /* Config code words received, turn on autoneg. */
3837 tg3_readphy(tp, MII_BMCR, &bmcr);
3838 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3839
3840 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3841
3842 }
3843 }
3844}
3845
Linus Torvalds1da177e2005-04-16 15:20:36 -07003846static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3847{
3848 int err;
3849
3850 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3851 err = tg3_setup_fiber_phy(tp, force_reset);
Michael Chan747e8f82005-07-25 12:33:22 -07003852 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3853 err = tg3_setup_fiber_mii_phy(tp, force_reset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003854 } else {
3855 err = tg3_setup_copper_phy(tp, force_reset);
3856 }
3857
Matt Carlsonbcb37f62008-11-03 16:52:09 -08003858 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
Matt Carlsonaa6c91f2007-11-12 21:18:04 -08003859 u32 val, scale;
3860
3861 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3862 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3863 scale = 65;
3864 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3865 scale = 6;
3866 else
3867 scale = 12;
3868
3869 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3870 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3871 tw32(GRC_MISC_CFG, val);
3872 }
3873
Linus Torvalds1da177e2005-04-16 15:20:36 -07003874 if (tp->link_config.active_speed == SPEED_1000 &&
3875 tp->link_config.active_duplex == DUPLEX_HALF)
3876 tw32(MAC_TX_LENGTHS,
3877 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3878 (6 << TX_LENGTHS_IPG_SHIFT) |
3879 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3880 else
3881 tw32(MAC_TX_LENGTHS,
3882 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3883 (6 << TX_LENGTHS_IPG_SHIFT) |
3884 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3885
3886 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3887 if (netif_carrier_ok(tp->dev)) {
3888 tw32(HOSTCC_STAT_COAL_TICKS,
David S. Miller15f98502005-05-18 22:49:26 -07003889 tp->coal.stats_block_coalesce_usecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003890 } else {
3891 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3892 }
3893 }
3894
Matt Carlson8ed5d972007-05-07 00:25:49 -07003895 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3896 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3897 if (!netif_carrier_ok(tp->dev))
3898 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3899 tp->pwrmgmt_thresh;
3900 else
3901 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3902 tw32(PCIE_PWR_MGMT_THRESH, val);
3903 }
3904
Linus Torvalds1da177e2005-04-16 15:20:36 -07003905 return err;
3906}
3907
Michael Chandf3e6542006-05-26 17:48:07 -07003908/* This is called whenever we suspect that the system chipset is re-
3909 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3910 * is bogus tx completions. We try to recover by setting the
3911 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3912 * in the workqueue.
3913 */
3914static void tg3_tx_recover(struct tg3 *tp)
3915{
3916 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3917 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3918
3919 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3920 "mapped I/O cycles to the network device, attempting to "
3921 "recover. Please report the problem to the driver maintainer "
3922 "and include system chipset information.\n", tp->dev->name);
3923
3924 spin_lock(&tp->lock);
Michael Chandf3e6542006-05-26 17:48:07 -07003925 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
Michael Chandf3e6542006-05-26 17:48:07 -07003926 spin_unlock(&tp->lock);
3927}
3928
Michael Chan1b2a7202006-08-07 21:46:02 -07003929static inline u32 tg3_tx_avail(struct tg3 *tp)
3930{
3931 smp_mb();
3932 return (tp->tx_pending -
3933 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3934}
3935
Linus Torvalds1da177e2005-04-16 15:20:36 -07003936/* Tigon3 never reports partial packet sends. So we do not
3937 * need special logic to handle SKBs that have not had all
3938 * of their frags sent yet, like SunGEM does.
3939 */
3940static void tg3_tx(struct tg3 *tp)
3941{
3942 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3943 u32 sw_idx = tp->tx_cons;
3944
3945 while (sw_idx != hw_idx) {
3946 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3947 struct sk_buff *skb = ri->skb;
Michael Chandf3e6542006-05-26 17:48:07 -07003948 int i, tx_bug = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003949
Michael Chandf3e6542006-05-26 17:48:07 -07003950 if (unlikely(skb == NULL)) {
3951 tg3_tx_recover(tp);
3952 return;
3953 }
3954
David S. Miller90079ce2008-09-11 04:52:51 -07003955 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003956
3957 ri->skb = NULL;
3958
3959 sw_idx = NEXT_TX(sw_idx);
3960
3961 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003962 ri = &tp->tx_buffers[sw_idx];
Michael Chandf3e6542006-05-26 17:48:07 -07003963 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3964 tx_bug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003965 sw_idx = NEXT_TX(sw_idx);
3966 }
3967
David S. Millerf47c11e2005-06-24 20:18:35 -07003968 dev_kfree_skb(skb);
Michael Chandf3e6542006-05-26 17:48:07 -07003969
3970 if (unlikely(tx_bug)) {
3971 tg3_tx_recover(tp);
3972 return;
3973 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003974 }
3975
3976 tp->tx_cons = sw_idx;
3977
Michael Chan1b2a7202006-08-07 21:46:02 -07003978 /* Need to make the tx_cons update visible to tg3_start_xmit()
3979 * before checking for netif_queue_stopped(). Without the
3980 * memory barrier, there is a small possibility that tg3_start_xmit()
3981 * will miss it and cause the queue to be stopped forever.
3982 */
3983 smp_mb();
3984
3985 if (unlikely(netif_queue_stopped(tp->dev) &&
Ranjit Manomohan42952232006-10-18 20:54:26 -07003986 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
Michael Chan1b2a7202006-08-07 21:46:02 -07003987 netif_tx_lock(tp->dev);
Michael Chan51b91462005-09-01 17:41:28 -07003988 if (netif_queue_stopped(tp->dev) &&
Ranjit Manomohan42952232006-10-18 20:54:26 -07003989 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
Michael Chan51b91462005-09-01 17:41:28 -07003990 netif_wake_queue(tp->dev);
Michael Chan1b2a7202006-08-07 21:46:02 -07003991 netif_tx_unlock(tp->dev);
Michael Chan51b91462005-09-01 17:41:28 -07003992 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003993}
3994
3995/* Returns size of skb allocated or < 0 on error.
3996 *
3997 * We only need to fill in the address because the other members
3998 * of the RX descriptor are invariant, see tg3_init_rings.
3999 *
4000 * Note the purposeful assymetry of cpu vs. chip accesses. For
4001 * posting buffers we only dirty the first cache line of the RX
4002 * descriptor (containing the address). Whereas for the RX status
4003 * buffers the cpu only reads the last cacheline of the RX descriptor
4004 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4005 */
4006static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
4007 int src_idx, u32 dest_idx_unmasked)
4008{
4009 struct tg3_rx_buffer_desc *desc;
4010 struct ring_info *map, *src_map;
4011 struct sk_buff *skb;
4012 dma_addr_t mapping;
4013 int skb_size, dest_idx;
4014
4015 src_map = NULL;
4016 switch (opaque_key) {
4017 case RXD_OPAQUE_RING_STD:
4018 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4019 desc = &tp->rx_std[dest_idx];
4020 map = &tp->rx_std_buffers[dest_idx];
4021 if (src_idx >= 0)
4022 src_map = &tp->rx_std_buffers[src_idx];
Michael Chan7e72aad2005-07-25 12:31:17 -07004023 skb_size = tp->rx_pkt_buf_sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004024 break;
4025
4026 case RXD_OPAQUE_RING_JUMBO:
4027 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4028 desc = &tp->rx_jumbo[dest_idx];
4029 map = &tp->rx_jumbo_buffers[dest_idx];
4030 if (src_idx >= 0)
4031 src_map = &tp->rx_jumbo_buffers[src_idx];
4032 skb_size = RX_JUMBO_PKT_BUF_SZ;
4033 break;
4034
4035 default:
4036 return -EINVAL;
Stephen Hemminger855e1112008-04-16 16:37:28 -07004037 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004038
4039 /* Do not overwrite any of the map or rp information
4040 * until we are sure we can commit to a new buffer.
4041 *
4042 * Callers depend upon this behavior and assume that
4043 * we leave everything unchanged if we fail.
4044 */
David S. Millera20e9c62006-07-31 22:38:16 -07004045 skb = netdev_alloc_skb(tp->dev, skb_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004046 if (skb == NULL)
4047 return -ENOMEM;
4048
Linus Torvalds1da177e2005-04-16 15:20:36 -07004049 skb_reserve(skb, tp->rx_offset);
4050
4051 mapping = pci_map_single(tp->pdev, skb->data,
4052 skb_size - tp->rx_offset,
4053 PCI_DMA_FROMDEVICE);
4054
4055 map->skb = skb;
4056 pci_unmap_addr_set(map, mapping, mapping);
4057
4058 if (src_map != NULL)
4059 src_map->skb = NULL;
4060
4061 desc->addr_hi = ((u64)mapping >> 32);
4062 desc->addr_lo = ((u64)mapping & 0xffffffff);
4063
4064 return skb_size;
4065}
4066
4067/* We only need to move over in the address because the other
4068 * members of the RX descriptor are invariant. See notes above
4069 * tg3_alloc_rx_skb for full details.
4070 */
4071static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
4072 int src_idx, u32 dest_idx_unmasked)
4073{
4074 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4075 struct ring_info *src_map, *dest_map;
4076 int dest_idx;
4077
4078 switch (opaque_key) {
4079 case RXD_OPAQUE_RING_STD:
4080 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4081 dest_desc = &tp->rx_std[dest_idx];
4082 dest_map = &tp->rx_std_buffers[dest_idx];
4083 src_desc = &tp->rx_std[src_idx];
4084 src_map = &tp->rx_std_buffers[src_idx];
4085 break;
4086
4087 case RXD_OPAQUE_RING_JUMBO:
4088 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4089 dest_desc = &tp->rx_jumbo[dest_idx];
4090 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4091 src_desc = &tp->rx_jumbo[src_idx];
4092 src_map = &tp->rx_jumbo_buffers[src_idx];
4093 break;
4094
4095 default:
4096 return;
Stephen Hemminger855e1112008-04-16 16:37:28 -07004097 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004098
4099 dest_map->skb = src_map->skb;
4100 pci_unmap_addr_set(dest_map, mapping,
4101 pci_unmap_addr(src_map, mapping));
4102 dest_desc->addr_hi = src_desc->addr_hi;
4103 dest_desc->addr_lo = src_desc->addr_lo;
4104
4105 src_map->skb = NULL;
4106}
4107
4108#if TG3_VLAN_TAG_USED
4109static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4110{
4111 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4112}
4113#endif
4114
4115/* The RX ring scheme is composed of multiple rings which post fresh
4116 * buffers to the chip, and one special ring the chip uses to report
4117 * status back to the host.
4118 *
4119 * The special ring reports the status of received packets to the
4120 * host. The chip does not write into the original descriptor the
4121 * RX buffer was obtained from. The chip simply takes the original
4122 * descriptor as provided by the host, updates the status and length
4123 * field, then writes this into the next status ring entry.
4124 *
4125 * Each ring the host uses to post buffers to the chip is described
4126 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4127 * it is first placed into the on-chip ram. When the packet's length
4128 * is known, it walks down the TG3_BDINFO entries to select the ring.
4129 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4130 * which is within the range of the new packet's length is chosen.
4131 *
4132 * The "separate ring for rx status" scheme may sound queer, but it makes
4133 * sense from a cache coherency perspective. If only the host writes
4134 * to the buffer post rings, and only the chip writes to the rx status
4135 * rings, then cache lines never move beyond shared-modified state.
4136 * If both the host and chip were to write into the same ring, cache line
4137 * eviction could occur since both entities want it in an exclusive state.
4138 */
4139static int tg3_rx(struct tg3 *tp, int budget)
4140{
Michael Chanf92905d2006-06-29 20:14:29 -07004141 u32 work_mask, rx_std_posted = 0;
Michael Chan483ba502005-04-25 15:14:03 -07004142 u32 sw_idx = tp->rx_rcb_ptr;
4143 u16 hw_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004144 int received;
4145
4146 hw_idx = tp->hw_status->idx[0].rx_producer;
4147 /*
4148 * We need to order the read of hw_idx and the read of
4149 * the opaque cookie.
4150 */
4151 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004152 work_mask = 0;
4153 received = 0;
4154 while (sw_idx != hw_idx && budget > 0) {
4155 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4156 unsigned int len;
4157 struct sk_buff *skb;
4158 dma_addr_t dma_addr;
4159 u32 opaque_key, desc_idx, *post_ptr;
4160
4161 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4162 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4163 if (opaque_key == RXD_OPAQUE_RING_STD) {
4164 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4165 mapping);
4166 skb = tp->rx_std_buffers[desc_idx].skb;
4167 post_ptr = &tp->rx_std_ptr;
Michael Chanf92905d2006-06-29 20:14:29 -07004168 rx_std_posted++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004169 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4170 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4171 mapping);
4172 skb = tp->rx_jumbo_buffers[desc_idx].skb;
4173 post_ptr = &tp->rx_jumbo_ptr;
4174 }
4175 else {
4176 goto next_pkt_nopost;
4177 }
4178
4179 work_mask |= opaque_key;
4180
4181 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4182 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4183 drop_it:
4184 tg3_recycle_rx(tp, opaque_key,
4185 desc_idx, *post_ptr);
4186 drop_it_no_recycle:
4187 /* Other statistics kept track of by card. */
4188 tp->net_stats.rx_dropped++;
4189 goto next_pkt;
4190 }
4191
4192 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
4193
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004194 if (len > RX_COPY_THRESHOLD
Linus Torvalds1da177e2005-04-16 15:20:36 -07004195 && tp->rx_offset == 2
4196 /* rx_offset != 2 iff this is a 5701 card running
4197 * in PCI-X mode [see tg3_get_invariants()] */
4198 ) {
4199 int skb_size;
4200
4201 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4202 desc_idx, *post_ptr);
4203 if (skb_size < 0)
4204 goto drop_it;
4205
4206 pci_unmap_single(tp->pdev, dma_addr,
4207 skb_size - tp->rx_offset,
4208 PCI_DMA_FROMDEVICE);
4209
4210 skb_put(skb, len);
4211 } else {
4212 struct sk_buff *copy_skb;
4213
4214 tg3_recycle_rx(tp, opaque_key,
4215 desc_idx, *post_ptr);
4216
David S. Millera20e9c62006-07-31 22:38:16 -07004217 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218 if (copy_skb == NULL)
4219 goto drop_it_no_recycle;
4220
Linus Torvalds1da177e2005-04-16 15:20:36 -07004221 skb_reserve(copy_skb, 2);
4222 skb_put(copy_skb, len);
4223 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03004224 skb_copy_from_linear_data(skb, copy_skb->data, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004225 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4226
4227 /* We'll reuse the original ring buffer. */
4228 skb = copy_skb;
4229 }
4230
4231 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4232 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4233 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4234 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4235 skb->ip_summed = CHECKSUM_UNNECESSARY;
4236 else
4237 skb->ip_summed = CHECKSUM_NONE;
4238
4239 skb->protocol = eth_type_trans(skb, tp->dev);
4240#if TG3_VLAN_TAG_USED
4241 if (tp->vlgrp != NULL &&
4242 desc->type_flags & RXD_FLAG_VLAN) {
4243 tg3_vlan_rx(tp, skb,
4244 desc->err_vlan & RXD_VLAN_MASK);
4245 } else
4246#endif
4247 netif_receive_skb(skb);
4248
4249 tp->dev->last_rx = jiffies;
4250 received++;
4251 budget--;
4252
4253next_pkt:
4254 (*post_ptr)++;
Michael Chanf92905d2006-06-29 20:14:29 -07004255
4256 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4257 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4258
4259 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4260 TG3_64BIT_REG_LOW, idx);
4261 work_mask &= ~RXD_OPAQUE_RING_STD;
4262 rx_std_posted = 0;
4263 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004264next_pkt_nopost:
Michael Chan483ba502005-04-25 15:14:03 -07004265 sw_idx++;
Eric Dumazet6b31a512007-02-06 13:29:21 -08004266 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
Michael Chan52f6d692005-04-25 15:14:32 -07004267
4268 /* Refresh hw_idx to see if there is new work */
4269 if (sw_idx == hw_idx) {
4270 hw_idx = tp->hw_status->idx[0].rx_producer;
4271 rmb();
4272 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004273 }
4274
4275 /* ACK the status ring. */
Michael Chan483ba502005-04-25 15:14:03 -07004276 tp->rx_rcb_ptr = sw_idx;
4277 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004278
4279 /* Refill RX ring(s). */
4280 if (work_mask & RXD_OPAQUE_RING_STD) {
4281 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4282 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4283 sw_idx);
4284 }
4285 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4286 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4287 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4288 sw_idx);
4289 }
4290 mmiowb();
4291
4292 return received;
4293}
4294
David S. Miller6f535762007-10-11 18:08:29 -07004295static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004296{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004297 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004298
Linus Torvalds1da177e2005-04-16 15:20:36 -07004299 /* handle link change and other phy events */
4300 if (!(tp->tg3_flags &
4301 (TG3_FLAG_USE_LINKCHG_REG |
4302 TG3_FLAG_POLL_SERDES))) {
4303 if (sblk->status & SD_STATUS_LINK_CHG) {
4304 sblk->status = SD_STATUS_UPDATED |
4305 (sblk->status & ~SD_STATUS_LINK_CHG);
David S. Millerf47c11e2005-06-24 20:18:35 -07004306 spin_lock(&tp->lock);
Matt Carlsondd477002008-05-25 23:45:58 -07004307 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4308 tw32_f(MAC_STATUS,
4309 (MAC_STATUS_SYNC_CHANGED |
4310 MAC_STATUS_CFG_CHANGED |
4311 MAC_STATUS_MI_COMPLETION |
4312 MAC_STATUS_LNKSTATE_CHANGED));
4313 udelay(40);
4314 } else
4315 tg3_setup_phy(tp, 0);
David S. Millerf47c11e2005-06-24 20:18:35 -07004316 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004317 }
4318 }
4319
4320 /* run TX completion thread */
4321 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004322 tg3_tx(tp);
David S. Miller6f535762007-10-11 18:08:29 -07004323 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
Michael Chan4fd7ab52007-10-12 01:39:50 -07004324 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004325 }
4326
Linus Torvalds1da177e2005-04-16 15:20:36 -07004327 /* run RX thread, within the bounds set by NAPI.
4328 * All RX "locking" is done by ensuring outside
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004329 * code synchronizes with tg3->napi.poll()
Linus Torvalds1da177e2005-04-16 15:20:36 -07004330 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004331 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
David S. Miller6f535762007-10-11 18:08:29 -07004332 work_done += tg3_rx(tp, budget - work_done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004333
David S. Miller6f535762007-10-11 18:08:29 -07004334 return work_done;
4335}
David S. Millerf7383c22005-05-18 22:50:53 -07004336
David S. Miller6f535762007-10-11 18:08:29 -07004337static int tg3_poll(struct napi_struct *napi, int budget)
4338{
4339 struct tg3 *tp = container_of(napi, struct tg3, napi);
4340 int work_done = 0;
Michael Chan4fd7ab52007-10-12 01:39:50 -07004341 struct tg3_hw_status *sblk = tp->hw_status;
David S. Miller6f535762007-10-11 18:08:29 -07004342
4343 while (1) {
4344 work_done = tg3_poll_work(tp, work_done, budget);
4345
4346 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4347 goto tx_recovery;
4348
4349 if (unlikely(work_done >= budget))
4350 break;
4351
Michael Chan4fd7ab52007-10-12 01:39:50 -07004352 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4353 /* tp->last_tag is used in tg3_restart_ints() below
4354 * to tell the hw how much work has been processed,
4355 * so we must read it before checking for more work.
4356 */
4357 tp->last_tag = sblk->status_tag;
4358 rmb();
4359 } else
4360 sblk->status &= ~SD_STATUS_UPDATED;
4361
David S. Miller6f535762007-10-11 18:08:29 -07004362 if (likely(!tg3_has_work(tp))) {
David S. Miller6f535762007-10-11 18:08:29 -07004363 netif_rx_complete(tp->dev, napi);
4364 tg3_restart_ints(tp);
4365 break;
4366 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004367 }
4368
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004369 return work_done;
David S. Miller6f535762007-10-11 18:08:29 -07004370
4371tx_recovery:
Michael Chan4fd7ab52007-10-12 01:39:50 -07004372 /* work_done is guaranteed to be less than budget. */
David S. Miller6f535762007-10-11 18:08:29 -07004373 netif_rx_complete(tp->dev, napi);
4374 schedule_work(&tp->reset_task);
Michael Chan4fd7ab52007-10-12 01:39:50 -07004375 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004376}
4377
David S. Millerf47c11e2005-06-24 20:18:35 -07004378static void tg3_irq_quiesce(struct tg3 *tp)
4379{
4380 BUG_ON(tp->irq_sync);
4381
4382 tp->irq_sync = 1;
4383 smp_mb();
4384
4385 synchronize_irq(tp->pdev->irq);
4386}
4387
4388static inline int tg3_irq_sync(struct tg3 *tp)
4389{
4390 return tp->irq_sync;
4391}
4392
4393/* Fully shutdown all tg3 driver activity elsewhere in the system.
4394 * If irq_sync is non-zero, then the IRQ handler must be synchronized
4395 * with as well. Most of the time, this is not necessary except when
4396 * shutting down the device.
4397 */
4398static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4399{
Michael Chan46966542007-07-11 19:47:19 -07004400 spin_lock_bh(&tp->lock);
David S. Millerf47c11e2005-06-24 20:18:35 -07004401 if (irq_sync)
4402 tg3_irq_quiesce(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07004403}
4404
4405static inline void tg3_full_unlock(struct tg3 *tp)
4406{
David S. Millerf47c11e2005-06-24 20:18:35 -07004407 spin_unlock_bh(&tp->lock);
4408}
4409
Michael Chanfcfa0a32006-03-20 22:28:41 -08004410/* One-shot MSI handler - Chip automatically disables interrupt
4411 * after sending MSI so driver doesn't have to do it.
4412 */
David Howells7d12e782006-10-05 14:55:46 +01004413static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
Michael Chanfcfa0a32006-03-20 22:28:41 -08004414{
4415 struct net_device *dev = dev_id;
4416 struct tg3 *tp = netdev_priv(dev);
4417
4418 prefetch(tp->hw_status);
4419 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4420
4421 if (likely(!tg3_irq_sync(tp)))
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004422 netif_rx_schedule(dev, &tp->napi);
Michael Chanfcfa0a32006-03-20 22:28:41 -08004423
4424 return IRQ_HANDLED;
4425}
4426
Michael Chan88b06bc2005-04-21 17:13:25 -07004427/* MSI ISR - No need to check for interrupt sharing and no need to
4428 * flush status block and interrupt mailbox. PCI ordering rules
4429 * guarantee that MSI will arrive after the status block.
4430 */
David Howells7d12e782006-10-05 14:55:46 +01004431static irqreturn_t tg3_msi(int irq, void *dev_id)
Michael Chan88b06bc2005-04-21 17:13:25 -07004432{
4433 struct net_device *dev = dev_id;
4434 struct tg3 *tp = netdev_priv(dev);
Michael Chan88b06bc2005-04-21 17:13:25 -07004435
Michael Chan61487482005-09-05 17:53:19 -07004436 prefetch(tp->hw_status);
4437 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Michael Chan88b06bc2005-04-21 17:13:25 -07004438 /*
David S. Millerfac9b832005-05-18 22:46:34 -07004439 * Writing any value to intr-mbox-0 clears PCI INTA# and
Michael Chan88b06bc2005-04-21 17:13:25 -07004440 * chip-internal interrupt pending events.
David S. Millerfac9b832005-05-18 22:46:34 -07004441 * Writing non-zero to intr-mbox-0 additional tells the
Michael Chan88b06bc2005-04-21 17:13:25 -07004442 * NIC to stop sending us irqs, engaging "in-intr-handler"
4443 * event coalescing.
4444 */
4445 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chan61487482005-09-05 17:53:19 -07004446 if (likely(!tg3_irq_sync(tp)))
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004447 netif_rx_schedule(dev, &tp->napi);
Michael Chan61487482005-09-05 17:53:19 -07004448
Michael Chan88b06bc2005-04-21 17:13:25 -07004449 return IRQ_RETVAL(1);
4450}
4451
David Howells7d12e782006-10-05 14:55:46 +01004452static irqreturn_t tg3_interrupt(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004453{
4454 struct net_device *dev = dev_id;
4455 struct tg3 *tp = netdev_priv(dev);
4456 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004457 unsigned int handled = 1;
4458
Linus Torvalds1da177e2005-04-16 15:20:36 -07004459 /* In INTx mode, it is possible for the interrupt to arrive at
4460 * the CPU before the status block posted prior to the interrupt.
4461 * Reading the PCI State register will confirm whether the
4462 * interrupt is ours and will flush the status block.
4463 */
Michael Chand18edcb2007-03-24 20:57:11 -07004464 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4465 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4466 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4467 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07004468 goto out;
David S. Millerfac9b832005-05-18 22:46:34 -07004469 }
Michael Chand18edcb2007-03-24 20:57:11 -07004470 }
4471
4472 /*
4473 * Writing any value to intr-mbox-0 clears PCI INTA# and
4474 * chip-internal interrupt pending events.
4475 * Writing non-zero to intr-mbox-0 additional tells the
4476 * NIC to stop sending us irqs, engaging "in-intr-handler"
4477 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07004478 *
4479 * Flush the mailbox to de-assert the IRQ immediately to prevent
4480 * spurious interrupts. The flush impacts performance but
4481 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07004482 */
Michael Chanc04cb342007-05-07 00:26:15 -07004483 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chand18edcb2007-03-24 20:57:11 -07004484 if (tg3_irq_sync(tp))
4485 goto out;
4486 sblk->status &= ~SD_STATUS_UPDATED;
4487 if (likely(tg3_has_work(tp))) {
4488 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004489 netif_rx_schedule(dev, &tp->napi);
Michael Chand18edcb2007-03-24 20:57:11 -07004490 } else {
4491 /* No work, shared interrupt perhaps? re-enable
4492 * interrupts, and flush that PCI write
4493 */
4494 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4495 0x00000000);
David S. Millerfac9b832005-05-18 22:46:34 -07004496 }
David S. Millerf47c11e2005-06-24 20:18:35 -07004497out:
David S. Millerfac9b832005-05-18 22:46:34 -07004498 return IRQ_RETVAL(handled);
4499}
4500
David Howells7d12e782006-10-05 14:55:46 +01004501static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
David S. Millerfac9b832005-05-18 22:46:34 -07004502{
4503 struct net_device *dev = dev_id;
4504 struct tg3 *tp = netdev_priv(dev);
4505 struct tg3_hw_status *sblk = tp->hw_status;
David S. Millerfac9b832005-05-18 22:46:34 -07004506 unsigned int handled = 1;
4507
David S. Millerfac9b832005-05-18 22:46:34 -07004508 /* In INTx mode, it is possible for the interrupt to arrive at
4509 * the CPU before the status block posted prior to the interrupt.
4510 * Reading the PCI State register will confirm whether the
4511 * interrupt is ours and will flush the status block.
4512 */
Michael Chand18edcb2007-03-24 20:57:11 -07004513 if (unlikely(sblk->status_tag == tp->last_tag)) {
4514 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4515 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4516 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07004517 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004518 }
Michael Chand18edcb2007-03-24 20:57:11 -07004519 }
4520
4521 /*
4522 * writing any value to intr-mbox-0 clears PCI INTA# and
4523 * chip-internal interrupt pending events.
4524 * writing non-zero to intr-mbox-0 additional tells the
4525 * NIC to stop sending us irqs, engaging "in-intr-handler"
4526 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07004527 *
4528 * Flush the mailbox to de-assert the IRQ immediately to prevent
4529 * spurious interrupts. The flush impacts performance but
4530 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07004531 */
Michael Chanc04cb342007-05-07 00:26:15 -07004532 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chand18edcb2007-03-24 20:57:11 -07004533 if (tg3_irq_sync(tp))
4534 goto out;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004535 if (netif_rx_schedule_prep(dev, &tp->napi)) {
Michael Chand18edcb2007-03-24 20:57:11 -07004536 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4537 /* Update last_tag to mark that this status has been
4538 * seen. Because interrupt may be shared, we may be
4539 * racing with tg3_poll(), so only update last_tag
4540 * if tg3_poll() is not scheduled.
4541 */
4542 tp->last_tag = sblk->status_tag;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004543 __netif_rx_schedule(dev, &tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004544 }
David S. Millerf47c11e2005-06-24 20:18:35 -07004545out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004546 return IRQ_RETVAL(handled);
4547}
4548
Michael Chan79381092005-04-21 17:13:59 -07004549/* ISR for interrupt test */
David Howells7d12e782006-10-05 14:55:46 +01004550static irqreturn_t tg3_test_isr(int irq, void *dev_id)
Michael Chan79381092005-04-21 17:13:59 -07004551{
4552 struct net_device *dev = dev_id;
4553 struct tg3 *tp = netdev_priv(dev);
4554 struct tg3_hw_status *sblk = tp->hw_status;
4555
Michael Chanf9804dd2005-09-27 12:13:10 -07004556 if ((sblk->status & SD_STATUS_UPDATED) ||
4557 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
Michael Chanb16250e2006-09-27 16:10:14 -07004558 tg3_disable_ints(tp);
Michael Chan79381092005-04-21 17:13:59 -07004559 return IRQ_RETVAL(1);
4560 }
4561 return IRQ_RETVAL(0);
4562}
4563
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07004564static int tg3_init_hw(struct tg3 *, int);
Michael Chan944d9802005-05-29 14:57:48 -07004565static int tg3_halt(struct tg3 *, int, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004566
Michael Chanb9ec6c12006-07-25 16:37:27 -07004567/* Restart hardware after configuration changes, self-test, etc.
4568 * Invoked with tp->lock held.
4569 */
4570static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
Eric Dumazet78c61462008-04-24 23:33:06 -07004571 __releases(tp->lock)
4572 __acquires(tp->lock)
Michael Chanb9ec6c12006-07-25 16:37:27 -07004573{
4574 int err;
4575
4576 err = tg3_init_hw(tp, reset_phy);
4577 if (err) {
4578 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4579 "aborting.\n", tp->dev->name);
4580 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4581 tg3_full_unlock(tp);
4582 del_timer_sync(&tp->timer);
4583 tp->irq_sync = 0;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004584 napi_enable(&tp->napi);
Michael Chanb9ec6c12006-07-25 16:37:27 -07004585 dev_close(tp->dev);
4586 tg3_full_lock(tp, 0);
4587 }
4588 return err;
4589}
4590
Linus Torvalds1da177e2005-04-16 15:20:36 -07004591#ifdef CONFIG_NET_POLL_CONTROLLER
4592static void tg3_poll_controller(struct net_device *dev)
4593{
Michael Chan88b06bc2005-04-21 17:13:25 -07004594 struct tg3 *tp = netdev_priv(dev);
4595
David Howells7d12e782006-10-05 14:55:46 +01004596 tg3_interrupt(tp->pdev->irq, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004597}
4598#endif
4599
David Howellsc4028952006-11-22 14:57:56 +00004600static void tg3_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004601{
David Howellsc4028952006-11-22 14:57:56 +00004602 struct tg3 *tp = container_of(work, struct tg3, reset_task);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004603 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004604 unsigned int restart_timer;
4605
Michael Chan7faa0062006-02-02 17:29:28 -08004606 tg3_full_lock(tp, 0);
Michael Chan7faa0062006-02-02 17:29:28 -08004607
4608 if (!netif_running(tp->dev)) {
Michael Chan7faa0062006-02-02 17:29:28 -08004609 tg3_full_unlock(tp);
4610 return;
4611 }
4612
4613 tg3_full_unlock(tp);
4614
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004615 tg3_phy_stop(tp);
4616
Linus Torvalds1da177e2005-04-16 15:20:36 -07004617 tg3_netif_stop(tp);
4618
David S. Millerf47c11e2005-06-24 20:18:35 -07004619 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004620
4621 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4622 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4623
Michael Chandf3e6542006-05-26 17:48:07 -07004624 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4625 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4626 tp->write32_rx_mbox = tg3_write_flush_reg32;
4627 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4628 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4629 }
4630
Michael Chan944d9802005-05-29 14:57:48 -07004631 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004632 err = tg3_init_hw(tp, 1);
4633 if (err)
Michael Chanb9ec6c12006-07-25 16:37:27 -07004634 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004635
4636 tg3_netif_start(tp);
4637
Linus Torvalds1da177e2005-04-16 15:20:36 -07004638 if (restart_timer)
4639 mod_timer(&tp->timer, jiffies + 1);
Michael Chan7faa0062006-02-02 17:29:28 -08004640
Michael Chanb9ec6c12006-07-25 16:37:27 -07004641out:
Michael Chan7faa0062006-02-02 17:29:28 -08004642 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004643
4644 if (!err)
4645 tg3_phy_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004646}
4647
Michael Chanb0408752007-02-13 12:18:30 -08004648static void tg3_dump_short_state(struct tg3 *tp)
4649{
4650 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4651 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4652 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4653 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4654}
4655
Linus Torvalds1da177e2005-04-16 15:20:36 -07004656static void tg3_tx_timeout(struct net_device *dev)
4657{
4658 struct tg3 *tp = netdev_priv(dev);
4659
Michael Chanb0408752007-02-13 12:18:30 -08004660 if (netif_msg_tx_err(tp)) {
Michael Chan9f88f292006-12-07 00:22:54 -08004661 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4662 dev->name);
Michael Chanb0408752007-02-13 12:18:30 -08004663 tg3_dump_short_state(tp);
4664 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004665
4666 schedule_work(&tp->reset_task);
4667}
4668
Michael Chanc58ec932005-09-17 00:46:27 -07004669/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4670static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4671{
4672 u32 base = (u32) mapping & 0xffffffff;
4673
4674 return ((base > 0xffffdcc0) &&
4675 (base + len + 8 < base));
4676}
4677
Michael Chan72f2afb2006-03-06 19:28:35 -08004678/* Test for DMA addresses > 40-bit */
4679static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4680 int len)
4681{
4682#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
Michael Chan6728a8e2006-03-27 23:16:49 -08004683 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
Michael Chan72f2afb2006-03-06 19:28:35 -08004684 return (((u64) mapping + len) > DMA_40BIT_MASK);
4685 return 0;
4686#else
4687 return 0;
4688#endif
4689}
4690
Linus Torvalds1da177e2005-04-16 15:20:36 -07004691static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4692
Michael Chan72f2afb2006-03-06 19:28:35 -08004693/* Workaround 4GB and 40-bit hardware DMA bugs. */
4694static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
Michael Chanc58ec932005-09-17 00:46:27 -07004695 u32 last_plus_one, u32 *start,
4696 u32 base_flags, u32 mss)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004697{
Matt Carlson41588ba2008-04-19 18:12:33 -07004698 struct sk_buff *new_skb;
Michael Chanc58ec932005-09-17 00:46:27 -07004699 dma_addr_t new_addr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004700 u32 entry = *start;
Michael Chanc58ec932005-09-17 00:46:27 -07004701 int i, ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004702
Matt Carlson41588ba2008-04-19 18:12:33 -07004703 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4704 new_skb = skb_copy(skb, GFP_ATOMIC);
4705 else {
4706 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4707
4708 new_skb = skb_copy_expand(skb,
4709 skb_headroom(skb) + more_headroom,
4710 skb_tailroom(skb), GFP_ATOMIC);
4711 }
4712
Linus Torvalds1da177e2005-04-16 15:20:36 -07004713 if (!new_skb) {
Michael Chanc58ec932005-09-17 00:46:27 -07004714 ret = -1;
4715 } else {
4716 /* New SKB is guaranteed to be linear. */
4717 entry = *start;
David S. Miller90079ce2008-09-11 04:52:51 -07004718 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4719 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4720
Michael Chanc58ec932005-09-17 00:46:27 -07004721 /* Make sure new skb does not cross any 4G boundaries.
4722 * Drop the packet if it does.
4723 */
David S. Miller90079ce2008-09-11 04:52:51 -07004724 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
David S. Miller638266f2008-09-11 15:45:19 -07004725 if (!ret)
4726 skb_dma_unmap(&tp->pdev->dev, new_skb,
4727 DMA_TO_DEVICE);
Michael Chanc58ec932005-09-17 00:46:27 -07004728 ret = -1;
4729 dev_kfree_skb(new_skb);
4730 new_skb = NULL;
4731 } else {
4732 tg3_set_txd(tp, entry, new_addr, new_skb->len,
4733 base_flags, 1 | (mss << 1));
4734 *start = NEXT_TX(entry);
4735 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004736 }
4737
Linus Torvalds1da177e2005-04-16 15:20:36 -07004738 /* Now clean up the sw ring entries. */
4739 i = 0;
4740 while (entry != last_plus_one) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004741 if (i == 0) {
4742 tp->tx_buffers[entry].skb = new_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743 } else {
4744 tp->tx_buffers[entry].skb = NULL;
4745 }
4746 entry = NEXT_TX(entry);
4747 i++;
4748 }
4749
David S. Miller90079ce2008-09-11 04:52:51 -07004750 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004751 dev_kfree_skb(skb);
4752
Michael Chanc58ec932005-09-17 00:46:27 -07004753 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004754}
4755
4756static void tg3_set_txd(struct tg3 *tp, int entry,
4757 dma_addr_t mapping, int len, u32 flags,
4758 u32 mss_and_is_end)
4759{
4760 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4761 int is_end = (mss_and_is_end & 0x1);
4762 u32 mss = (mss_and_is_end >> 1);
4763 u32 vlan_tag = 0;
4764
4765 if (is_end)
4766 flags |= TXD_FLAG_END;
4767 if (flags & TXD_FLAG_VLAN) {
4768 vlan_tag = flags >> 16;
4769 flags &= 0xffff;
4770 }
4771 vlan_tag |= (mss << TXD_MSS_SHIFT);
4772
4773 txd->addr_hi = ((u64) mapping >> 32);
4774 txd->addr_lo = ((u64) mapping & 0xffffffff);
4775 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4776 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4777}
4778
Michael Chan5a6f3072006-03-20 22:28:05 -08004779/* hard_start_xmit for devices that don't have any bugs and
4780 * support TG3_FLG2_HW_TSO_2 only.
4781 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004782static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4783{
4784 struct tg3 *tp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004785 u32 len, entry, base_flags, mss;
David S. Miller90079ce2008-09-11 04:52:51 -07004786 struct skb_shared_info *sp;
4787 dma_addr_t mapping;
Michael Chan5a6f3072006-03-20 22:28:05 -08004788
4789 len = skb_headlen(skb);
4790
Michael Chan00b70502006-06-17 21:58:45 -07004791 /* We are running in BH disabled context with netif_tx_lock
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004792 * and TX reclaim runs via tp->napi.poll inside of a software
Michael Chan5a6f3072006-03-20 22:28:05 -08004793 * interrupt. Furthermore, IRQ processing runs lockless so we have
4794 * no IRQ context deadlocks to worry about either. Rejoice!
4795 */
Michael Chan1b2a7202006-08-07 21:46:02 -07004796 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004797 if (!netif_queue_stopped(dev)) {
4798 netif_stop_queue(dev);
4799
4800 /* This is a hard error, log it. */
4801 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4802 "queue awake!\n", dev->name);
4803 }
Michael Chan5a6f3072006-03-20 22:28:05 -08004804 return NETDEV_TX_BUSY;
4805 }
4806
4807 entry = tp->tx_prod;
4808 base_flags = 0;
Michael Chan5a6f3072006-03-20 22:28:05 -08004809 mss = 0;
Matt Carlsonc13e3712007-05-05 11:50:04 -07004810 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004811 int tcp_opt_len, ip_tcp_len;
4812
4813 if (skb_header_cloned(skb) &&
4814 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4815 dev_kfree_skb(skb);
4816 goto out_unlock;
4817 }
4818
Michael Chanb0026622006-07-03 19:42:14 -07004819 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4820 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4821 else {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004822 struct iphdr *iph = ip_hdr(skb);
4823
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004824 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -03004825 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
Michael Chanb0026622006-07-03 19:42:14 -07004826
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004827 iph->check = 0;
4828 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
Michael Chanb0026622006-07-03 19:42:14 -07004829 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4830 }
Michael Chan5a6f3072006-03-20 22:28:05 -08004831
4832 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4833 TXD_FLAG_CPU_POST_DMA);
4834
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004835 tcp_hdr(skb)->check = 0;
Michael Chan5a6f3072006-03-20 22:28:05 -08004836
Michael Chan5a6f3072006-03-20 22:28:05 -08004837 }
Patrick McHardy84fa7932006-08-29 16:44:56 -07004838 else if (skb->ip_summed == CHECKSUM_PARTIAL)
Michael Chan5a6f3072006-03-20 22:28:05 -08004839 base_flags |= TXD_FLAG_TCPUDP_CSUM;
Michael Chan5a6f3072006-03-20 22:28:05 -08004840#if TG3_VLAN_TAG_USED
4841 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4842 base_flags |= (TXD_FLAG_VLAN |
4843 (vlan_tx_tag_get(skb) << 16));
4844#endif
4845
David S. Miller90079ce2008-09-11 04:52:51 -07004846 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4847 dev_kfree_skb(skb);
4848 goto out_unlock;
4849 }
4850
4851 sp = skb_shinfo(skb);
4852
4853 mapping = sp->dma_maps[0];
Michael Chan5a6f3072006-03-20 22:28:05 -08004854
4855 tp->tx_buffers[entry].skb = skb;
Michael Chan5a6f3072006-03-20 22:28:05 -08004856
4857 tg3_set_txd(tp, entry, mapping, len, base_flags,
4858 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4859
4860 entry = NEXT_TX(entry);
4861
4862 /* Now loop through additional data fragments, and queue them. */
4863 if (skb_shinfo(skb)->nr_frags > 0) {
4864 unsigned int i, last;
4865
4866 last = skb_shinfo(skb)->nr_frags - 1;
4867 for (i = 0; i <= last; i++) {
4868 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4869
4870 len = frag->size;
David S. Miller90079ce2008-09-11 04:52:51 -07004871 mapping = sp->dma_maps[i + 1];
Michael Chan5a6f3072006-03-20 22:28:05 -08004872 tp->tx_buffers[entry].skb = NULL;
Michael Chan5a6f3072006-03-20 22:28:05 -08004873
4874 tg3_set_txd(tp, entry, mapping, len,
4875 base_flags, (i == last) | (mss << 1));
4876
4877 entry = NEXT_TX(entry);
4878 }
4879 }
4880
4881 /* Packets are ready, update Tx producer idx local and on card. */
4882 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4883
4884 tp->tx_prod = entry;
Michael Chan1b2a7202006-08-07 21:46:02 -07004885 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004886 netif_stop_queue(dev);
Ranjit Manomohan42952232006-10-18 20:54:26 -07004887 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
Michael Chan5a6f3072006-03-20 22:28:05 -08004888 netif_wake_queue(tp->dev);
4889 }
4890
4891out_unlock:
4892 mmiowb();
Michael Chan5a6f3072006-03-20 22:28:05 -08004893
4894 dev->trans_start = jiffies;
4895
4896 return NETDEV_TX_OK;
4897}
4898
Michael Chan52c0fd82006-06-29 20:15:54 -07004899static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4900
4901/* Use GSO to workaround a rare TSO bug that may be triggered when the
4902 * TSO header is greater than 80 bytes.
4903 */
4904static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4905{
4906 struct sk_buff *segs, *nskb;
4907
4908 /* Estimate the number of fragments in the worst case */
Michael Chan1b2a7202006-08-07 21:46:02 -07004909 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
Michael Chan52c0fd82006-06-29 20:15:54 -07004910 netif_stop_queue(tp->dev);
Michael Chan7f62ad52007-02-20 23:25:40 -08004911 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4912 return NETDEV_TX_BUSY;
4913
4914 netif_wake_queue(tp->dev);
Michael Chan52c0fd82006-06-29 20:15:54 -07004915 }
4916
4917 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07004918 if (IS_ERR(segs))
Michael Chan52c0fd82006-06-29 20:15:54 -07004919 goto tg3_tso_bug_end;
4920
4921 do {
4922 nskb = segs;
4923 segs = segs->next;
4924 nskb->next = NULL;
4925 tg3_start_xmit_dma_bug(nskb, tp->dev);
4926 } while (segs);
4927
4928tg3_tso_bug_end:
4929 dev_kfree_skb(skb);
4930
4931 return NETDEV_TX_OK;
4932}
Michael Chan52c0fd82006-06-29 20:15:54 -07004933
Michael Chan5a6f3072006-03-20 22:28:05 -08004934/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4935 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4936 */
4937static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4938{
4939 struct tg3 *tp = netdev_priv(dev);
Michael Chan5a6f3072006-03-20 22:28:05 -08004940 u32 len, entry, base_flags, mss;
David S. Miller90079ce2008-09-11 04:52:51 -07004941 struct skb_shared_info *sp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004942 int would_hit_hwbug;
David S. Miller90079ce2008-09-11 04:52:51 -07004943 dma_addr_t mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004944
4945 len = skb_headlen(skb);
4946
Michael Chan00b70502006-06-17 21:58:45 -07004947 /* We are running in BH disabled context with netif_tx_lock
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004948 * and TX reclaim runs via tp->napi.poll inside of a software
David S. Millerf47c11e2005-06-24 20:18:35 -07004949 * interrupt. Furthermore, IRQ processing runs lockless so we have
4950 * no IRQ context deadlocks to worry about either. Rejoice!
Linus Torvalds1da177e2005-04-16 15:20:36 -07004951 */
Michael Chan1b2a7202006-08-07 21:46:02 -07004952 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
Stephen Hemminger1f064a82005-12-06 17:36:44 -08004953 if (!netif_queue_stopped(dev)) {
4954 netif_stop_queue(dev);
4955
4956 /* This is a hard error, log it. */
4957 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4958 "queue awake!\n", dev->name);
4959 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004960 return NETDEV_TX_BUSY;
4961 }
4962
4963 entry = tp->tx_prod;
4964 base_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07004965 if (skb->ip_summed == CHECKSUM_PARTIAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004966 base_flags |= TXD_FLAG_TCPUDP_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004967 mss = 0;
Matt Carlsonc13e3712007-05-05 11:50:04 -07004968 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004969 struct iphdr *iph;
Michael Chan52c0fd82006-06-29 20:15:54 -07004970 int tcp_opt_len, ip_tcp_len, hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004971
4972 if (skb_header_cloned(skb) &&
4973 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4974 dev_kfree_skb(skb);
4975 goto out_unlock;
4976 }
4977
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004978 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -03004979 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004980
Michael Chan52c0fd82006-06-29 20:15:54 -07004981 hdr_len = ip_tcp_len + tcp_opt_len;
4982 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
Michael Chan7f62ad52007-02-20 23:25:40 -08004983 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
Michael Chan52c0fd82006-06-29 20:15:54 -07004984 return (tg3_tso_bug(tp, skb));
4985
Linus Torvalds1da177e2005-04-16 15:20:36 -07004986 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4987 TXD_FLAG_CPU_POST_DMA);
4988
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004989 iph = ip_hdr(skb);
4990 iph->check = 0;
4991 iph->tot_len = htons(mss + hdr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004992 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004993 tcp_hdr(skb)->check = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004994 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004995 } else
4996 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4997 iph->daddr, 0,
4998 IPPROTO_TCP,
4999 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005000
5001 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
5002 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005003 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005004 int tsflags;
5005
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005006 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005007 mss |= (tsflags << 11);
5008 }
5009 } else {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005010 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005011 int tsflags;
5012
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07005013 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005014 base_flags |= tsflags << 12;
5015 }
5016 }
5017 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005018#if TG3_VLAN_TAG_USED
5019 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5020 base_flags |= (TXD_FLAG_VLAN |
5021 (vlan_tx_tag_get(skb) << 16));
5022#endif
5023
David S. Miller90079ce2008-09-11 04:52:51 -07005024 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5025 dev_kfree_skb(skb);
5026 goto out_unlock;
5027 }
5028
5029 sp = skb_shinfo(skb);
5030
5031 mapping = sp->dma_maps[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -07005032
5033 tp->tx_buffers[entry].skb = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005034
5035 would_hit_hwbug = 0;
5036
Matt Carlson41588ba2008-04-19 18:12:33 -07005037 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5038 would_hit_hwbug = 1;
5039 else if (tg3_4g_overflow_test(mapping, len))
Michael Chanc58ec932005-09-17 00:46:27 -07005040 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005041
5042 tg3_set_txd(tp, entry, mapping, len, base_flags,
5043 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5044
5045 entry = NEXT_TX(entry);
5046
5047 /* Now loop through additional data fragments, and queue them. */
5048 if (skb_shinfo(skb)->nr_frags > 0) {
5049 unsigned int i, last;
5050
5051 last = skb_shinfo(skb)->nr_frags - 1;
5052 for (i = 0; i <= last; i++) {
5053 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5054
5055 len = frag->size;
David S. Miller90079ce2008-09-11 04:52:51 -07005056 mapping = sp->dma_maps[i + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07005057
5058 tp->tx_buffers[entry].skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005059
Michael Chanc58ec932005-09-17 00:46:27 -07005060 if (tg3_4g_overflow_test(mapping, len))
5061 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005062
Michael Chan72f2afb2006-03-06 19:28:35 -08005063 if (tg3_40bit_overflow_test(tp, mapping, len))
5064 would_hit_hwbug = 1;
5065
Linus Torvalds1da177e2005-04-16 15:20:36 -07005066 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5067 tg3_set_txd(tp, entry, mapping, len,
5068 base_flags, (i == last)|(mss << 1));
5069 else
5070 tg3_set_txd(tp, entry, mapping, len,
5071 base_flags, (i == last));
5072
5073 entry = NEXT_TX(entry);
5074 }
5075 }
5076
5077 if (would_hit_hwbug) {
5078 u32 last_plus_one = entry;
5079 u32 start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005080
Michael Chanc58ec932005-09-17 00:46:27 -07005081 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5082 start &= (TG3_TX_RING_SIZE - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005083
5084 /* If the workaround fails due to memory/mapping
5085 * failure, silently drop this packet.
5086 */
Michael Chan72f2afb2006-03-06 19:28:35 -08005087 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
Michael Chanc58ec932005-09-17 00:46:27 -07005088 &start, base_flags, mss))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005089 goto out_unlock;
5090
5091 entry = start;
5092 }
5093
5094 /* Packets are ready, update Tx producer idx local and on card. */
5095 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5096
5097 tp->tx_prod = entry;
Michael Chan1b2a7202006-08-07 21:46:02 -07005098 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005099 netif_stop_queue(dev);
Ranjit Manomohan42952232006-10-18 20:54:26 -07005100 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
Michael Chan51b91462005-09-01 17:41:28 -07005101 netif_wake_queue(tp->dev);
5102 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005103
5104out_unlock:
5105 mmiowb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005106
5107 dev->trans_start = jiffies;
5108
5109 return NETDEV_TX_OK;
5110}
5111
5112static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5113 int new_mtu)
5114{
5115 dev->mtu = new_mtu;
5116
Michael Chanef7f5ec2005-07-25 12:32:25 -07005117 if (new_mtu > ETH_DATA_LEN) {
Michael Chana4e2b342005-10-26 15:46:52 -07005118 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chanef7f5ec2005-07-25 12:32:25 -07005119 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5120 ethtool_op_set_tso(dev, 0);
5121 }
5122 else
5123 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5124 } else {
Michael Chana4e2b342005-10-26 15:46:52 -07005125 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chanef7f5ec2005-07-25 12:32:25 -07005126 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
Michael Chan0f893dc2005-07-25 12:30:38 -07005127 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
Michael Chanef7f5ec2005-07-25 12:32:25 -07005128 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005129}
5130
5131static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5132{
5133 struct tg3 *tp = netdev_priv(dev);
Michael Chanb9ec6c12006-07-25 16:37:27 -07005134 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005135
5136 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5137 return -EINVAL;
5138
5139 if (!netif_running(dev)) {
5140 /* We'll just catch it later when the
5141 * device is up'd.
5142 */
5143 tg3_set_mtu(dev, tp, new_mtu);
5144 return 0;
5145 }
5146
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07005147 tg3_phy_stop(tp);
5148
Linus Torvalds1da177e2005-04-16 15:20:36 -07005149 tg3_netif_stop(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07005150
5151 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005152
Michael Chan944d9802005-05-29 14:57:48 -07005153 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005154
5155 tg3_set_mtu(dev, tp, new_mtu);
5156
Michael Chanb9ec6c12006-07-25 16:37:27 -07005157 err = tg3_restart_hw(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005158
Michael Chanb9ec6c12006-07-25 16:37:27 -07005159 if (!err)
5160 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005161
David S. Millerf47c11e2005-06-24 20:18:35 -07005162 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005163
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07005164 if (!err)
5165 tg3_phy_start(tp);
5166
Michael Chanb9ec6c12006-07-25 16:37:27 -07005167 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005168}
5169
5170/* Free up pending packets in all rx/tx rings.
5171 *
5172 * The chip has been shut down and the driver detached from
5173 * the networking, so no interrupts or new tx packets will
5174 * end up in the driver. tp->{tx,}lock is not held and we are not
5175 * in an interrupt context and thus may sleep.
5176 */
5177static void tg3_free_rings(struct tg3 *tp)
5178{
5179 struct ring_info *rxp;
5180 int i;
5181
5182 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5183 rxp = &tp->rx_std_buffers[i];
5184
5185 if (rxp->skb == NULL)
5186 continue;
5187 pci_unmap_single(tp->pdev,
5188 pci_unmap_addr(rxp, mapping),
Michael Chan7e72aad2005-07-25 12:31:17 -07005189 tp->rx_pkt_buf_sz - tp->rx_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005190 PCI_DMA_FROMDEVICE);
5191 dev_kfree_skb_any(rxp->skb);
5192 rxp->skb = NULL;
5193 }
5194
5195 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5196 rxp = &tp->rx_jumbo_buffers[i];
5197
5198 if (rxp->skb == NULL)
5199 continue;
5200 pci_unmap_single(tp->pdev,
5201 pci_unmap_addr(rxp, mapping),
5202 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5203 PCI_DMA_FROMDEVICE);
5204 dev_kfree_skb_any(rxp->skb);
5205 rxp->skb = NULL;
5206 }
5207
5208 for (i = 0; i < TG3_TX_RING_SIZE; ) {
5209 struct tx_ring_info *txp;
5210 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005211
5212 txp = &tp->tx_buffers[i];
5213 skb = txp->skb;
5214
5215 if (skb == NULL) {
5216 i++;
5217 continue;
5218 }
5219
David S. Miller90079ce2008-09-11 04:52:51 -07005220 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5221
Linus Torvalds1da177e2005-04-16 15:20:36 -07005222 txp->skb = NULL;
5223
David S. Miller90079ce2008-09-11 04:52:51 -07005224 i += skb_shinfo(skb)->nr_frags + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005225
5226 dev_kfree_skb_any(skb);
5227 }
5228}
5229
5230/* Initialize tx/rx rings for packet processing.
5231 *
5232 * The chip has been shut down and the driver detached from
5233 * the networking, so no interrupts or new tx packets will
5234 * end up in the driver. tp->{tx,}lock are held and thus
5235 * we may not sleep.
5236 */
Michael Chan32d8c572006-07-25 16:38:29 -07005237static int tg3_init_rings(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005238{
5239 u32 i;
5240
5241 /* Free up all the SKBs. */
5242 tg3_free_rings(tp);
5243
5244 /* Zero out all descriptors. */
5245 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5246 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5247 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5248 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5249
Michael Chan7e72aad2005-07-25 12:31:17 -07005250 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
Michael Chana4e2b342005-10-26 15:46:52 -07005251 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
Michael Chan7e72aad2005-07-25 12:31:17 -07005252 (tp->dev->mtu > ETH_DATA_LEN))
5253 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5254
Linus Torvalds1da177e2005-04-16 15:20:36 -07005255 /* Initialize invariants of the rings, we only set this
5256 * stuff once. This works because the card does not
5257 * write into the rx buffer posting rings.
5258 */
5259 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5260 struct tg3_rx_buffer_desc *rxd;
5261
5262 rxd = &tp->rx_std[i];
Michael Chan7e72aad2005-07-25 12:31:17 -07005263 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005264 << RXD_LEN_SHIFT;
5265 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5266 rxd->opaque = (RXD_OPAQUE_RING_STD |
5267 (i << RXD_OPAQUE_INDEX_SHIFT));
5268 }
5269
Michael Chan0f893dc2005-07-25 12:30:38 -07005270 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005271 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5272 struct tg3_rx_buffer_desc *rxd;
5273
5274 rxd = &tp->rx_jumbo[i];
5275 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5276 << RXD_LEN_SHIFT;
5277 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5278 RXD_FLAG_JUMBO;
5279 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5280 (i << RXD_OPAQUE_INDEX_SHIFT));
5281 }
5282 }
5283
5284 /* Now allocate fresh SKBs for each rx ring. */
5285 for (i = 0; i < tp->rx_pending; i++) {
Michael Chan32d8c572006-07-25 16:38:29 -07005286 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5287 printk(KERN_WARNING PFX
5288 "%s: Using a smaller RX standard ring, "
5289 "only %d out of %d buffers were allocated "
5290 "successfully.\n",
5291 tp->dev->name, i, tp->rx_pending);
5292 if (i == 0)
5293 return -ENOMEM;
5294 tp->rx_pending = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005295 break;
Michael Chan32d8c572006-07-25 16:38:29 -07005296 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005297 }
5298
Michael Chan0f893dc2005-07-25 12:30:38 -07005299 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005300 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5301 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
Michael Chan32d8c572006-07-25 16:38:29 -07005302 -1, i) < 0) {
5303 printk(KERN_WARNING PFX
5304 "%s: Using a smaller RX jumbo ring, "
5305 "only %d out of %d buffers were "
5306 "allocated successfully.\n",
5307 tp->dev->name, i, tp->rx_jumbo_pending);
5308 if (i == 0) {
5309 tg3_free_rings(tp);
5310 return -ENOMEM;
5311 }
5312 tp->rx_jumbo_pending = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005313 break;
Michael Chan32d8c572006-07-25 16:38:29 -07005314 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005315 }
5316 }
Michael Chan32d8c572006-07-25 16:38:29 -07005317 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005318}
5319
5320/*
5321 * Must not be invoked with interrupt sources disabled and
5322 * the hardware shutdown down.
5323 */
5324static void tg3_free_consistent(struct tg3 *tp)
5325{
Jesper Juhlb4558ea2005-10-28 16:53:13 -04005326 kfree(tp->rx_std_buffers);
5327 tp->rx_std_buffers = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005328 if (tp->rx_std) {
5329 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5330 tp->rx_std, tp->rx_std_mapping);
5331 tp->rx_std = NULL;
5332 }
5333 if (tp->rx_jumbo) {
5334 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5335 tp->rx_jumbo, tp->rx_jumbo_mapping);
5336 tp->rx_jumbo = NULL;
5337 }
5338 if (tp->rx_rcb) {
5339 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5340 tp->rx_rcb, tp->rx_rcb_mapping);
5341 tp->rx_rcb = NULL;
5342 }
5343 if (tp->tx_ring) {
5344 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5345 tp->tx_ring, tp->tx_desc_mapping);
5346 tp->tx_ring = NULL;
5347 }
5348 if (tp->hw_status) {
5349 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5350 tp->hw_status, tp->status_mapping);
5351 tp->hw_status = NULL;
5352 }
5353 if (tp->hw_stats) {
5354 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5355 tp->hw_stats, tp->stats_mapping);
5356 tp->hw_stats = NULL;
5357 }
5358}
5359
5360/*
5361 * Must not be invoked with interrupt sources disabled and
5362 * the hardware shutdown down. Can sleep.
5363 */
5364static int tg3_alloc_consistent(struct tg3 *tp)
5365{
Yan Burmanbd2b3342006-12-14 15:25:00 -08005366 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005367 (TG3_RX_RING_SIZE +
5368 TG3_RX_JUMBO_RING_SIZE)) +
5369 (sizeof(struct tx_ring_info) *
5370 TG3_TX_RING_SIZE),
5371 GFP_KERNEL);
5372 if (!tp->rx_std_buffers)
5373 return -ENOMEM;
5374
Linus Torvalds1da177e2005-04-16 15:20:36 -07005375 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5376 tp->tx_buffers = (struct tx_ring_info *)
5377 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5378
5379 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5380 &tp->rx_std_mapping);
5381 if (!tp->rx_std)
5382 goto err_out;
5383
5384 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5385 &tp->rx_jumbo_mapping);
5386
5387 if (!tp->rx_jumbo)
5388 goto err_out;
5389
5390 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5391 &tp->rx_rcb_mapping);
5392 if (!tp->rx_rcb)
5393 goto err_out;
5394
5395 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5396 &tp->tx_desc_mapping);
5397 if (!tp->tx_ring)
5398 goto err_out;
5399
5400 tp->hw_status = pci_alloc_consistent(tp->pdev,
5401 TG3_HW_STATUS_SIZE,
5402 &tp->status_mapping);
5403 if (!tp->hw_status)
5404 goto err_out;
5405
5406 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5407 sizeof(struct tg3_hw_stats),
5408 &tp->stats_mapping);
5409 if (!tp->hw_stats)
5410 goto err_out;
5411
5412 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5413 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5414
5415 return 0;
5416
5417err_out:
5418 tg3_free_consistent(tp);
5419 return -ENOMEM;
5420}
5421
5422#define MAX_WAIT_CNT 1000
5423
5424/* To stop a block, clear the enable bit and poll till it
5425 * clears. tp->lock is held.
5426 */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005427static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005428{
5429 unsigned int i;
5430 u32 val;
5431
5432 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5433 switch (ofs) {
5434 case RCVLSC_MODE:
5435 case DMAC_MODE:
5436 case MBFREE_MODE:
5437 case BUFMGR_MODE:
5438 case MEMARB_MODE:
5439 /* We can't enable/disable these bits of the
5440 * 5705/5750, just say success.
5441 */
5442 return 0;
5443
5444 default:
5445 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005446 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005447 }
5448
5449 val = tr32(ofs);
5450 val &= ~enable_bit;
5451 tw32_f(ofs, val);
5452
5453 for (i = 0; i < MAX_WAIT_CNT; i++) {
5454 udelay(100);
5455 val = tr32(ofs);
5456 if ((val & enable_bit) == 0)
5457 break;
5458 }
5459
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005460 if (i == MAX_WAIT_CNT && !silent) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005461 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5462 "ofs=%lx enable_bit=%x\n",
5463 ofs, enable_bit);
5464 return -ENODEV;
5465 }
5466
5467 return 0;
5468}
5469
5470/* tp->lock is held. */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005471static int tg3_abort_hw(struct tg3 *tp, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005472{
5473 int i, err;
5474
5475 tg3_disable_ints(tp);
5476
5477 tp->rx_mode &= ~RX_MODE_ENABLE;
5478 tw32_f(MAC_RX_MODE, tp->rx_mode);
5479 udelay(10);
5480
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005481 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5482 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5483 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5484 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5485 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5486 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005487
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005488 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5489 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5490 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5491 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5492 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5493 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5494 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005495
5496 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5497 tw32_f(MAC_MODE, tp->mac_mode);
5498 udelay(40);
5499
5500 tp->tx_mode &= ~TX_MODE_ENABLE;
5501 tw32_f(MAC_TX_MODE, tp->tx_mode);
5502
5503 for (i = 0; i < MAX_WAIT_CNT; i++) {
5504 udelay(100);
5505 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5506 break;
5507 }
5508 if (i >= MAX_WAIT_CNT) {
5509 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5510 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5511 tp->dev->name, tr32(MAC_TX_MODE));
Michael Chane6de8ad2005-05-05 14:42:41 -07005512 err |= -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005513 }
5514
Michael Chane6de8ad2005-05-05 14:42:41 -07005515 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005516 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5517 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005518
5519 tw32(FTQ_RESET, 0xffffffff);
5520 tw32(FTQ_RESET, 0x00000000);
5521
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005522 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5523 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005524
5525 if (tp->hw_status)
5526 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5527 if (tp->hw_stats)
5528 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5529
Linus Torvalds1da177e2005-04-16 15:20:36 -07005530 return err;
5531}
5532
5533/* tp->lock is held. */
5534static int tg3_nvram_lock(struct tg3 *tp)
5535{
5536 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5537 int i;
5538
Michael Chanec41c7d2006-01-17 02:40:55 -08005539 if (tp->nvram_lock_cnt == 0) {
5540 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5541 for (i = 0; i < 8000; i++) {
5542 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5543 break;
5544 udelay(20);
5545 }
5546 if (i == 8000) {
5547 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5548 return -ENODEV;
5549 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005550 }
Michael Chanec41c7d2006-01-17 02:40:55 -08005551 tp->nvram_lock_cnt++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005552 }
5553 return 0;
5554}
5555
5556/* tp->lock is held. */
5557static void tg3_nvram_unlock(struct tg3 *tp)
5558{
Michael Chanec41c7d2006-01-17 02:40:55 -08005559 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5560 if (tp->nvram_lock_cnt > 0)
5561 tp->nvram_lock_cnt--;
5562 if (tp->nvram_lock_cnt == 0)
5563 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5564 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005565}
5566
5567/* tp->lock is held. */
Michael Chane6af3012005-04-21 17:12:05 -07005568static void tg3_enable_nvram_access(struct tg3 *tp)
5569{
5570 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5571 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5572 u32 nvaccess = tr32(NVRAM_ACCESS);
5573
5574 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5575 }
5576}
5577
5578/* tp->lock is held. */
5579static void tg3_disable_nvram_access(struct tg3 *tp)
5580{
5581 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5582 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5583 u32 nvaccess = tr32(NVRAM_ACCESS);
5584
5585 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5586 }
5587}
5588
Matt Carlson0d3031d2007-10-10 18:02:43 -07005589static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5590{
5591 int i;
5592 u32 apedata;
5593
5594 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5595 if (apedata != APE_SEG_SIG_MAGIC)
5596 return;
5597
5598 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
Matt Carlson731fd792008-08-15 14:07:51 -07005599 if (!(apedata & APE_FW_STATUS_READY))
Matt Carlson0d3031d2007-10-10 18:02:43 -07005600 return;
5601
5602 /* Wait for up to 1 millisecond for APE to service previous event. */
5603 for (i = 0; i < 10; i++) {
5604 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5605 return;
5606
5607 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5608
5609 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5610 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5611 event | APE_EVENT_STATUS_EVENT_PENDING);
5612
5613 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5614
5615 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5616 break;
5617
5618 udelay(100);
5619 }
5620
5621 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5622 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5623}
5624
5625static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5626{
5627 u32 event;
5628 u32 apedata;
5629
5630 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5631 return;
5632
5633 switch (kind) {
5634 case RESET_KIND_INIT:
5635 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5636 APE_HOST_SEG_SIG_MAGIC);
5637 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5638 APE_HOST_SEG_LEN_MAGIC);
5639 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5640 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5641 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5642 APE_HOST_DRIVER_ID_MAGIC);
5643 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5644 APE_HOST_BEHAV_NO_PHYLOCK);
5645
5646 event = APE_EVENT_STATUS_STATE_START;
5647 break;
5648 case RESET_KIND_SHUTDOWN:
Matt Carlsonb2aee152008-11-03 16:51:11 -08005649 /* With the interface we are currently using,
5650 * APE does not track driver state. Wiping
5651 * out the HOST SEGMENT SIGNATURE forces
5652 * the APE to assume OS absent status.
5653 */
5654 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
5655
Matt Carlson0d3031d2007-10-10 18:02:43 -07005656 event = APE_EVENT_STATUS_STATE_UNLOAD;
5657 break;
5658 case RESET_KIND_SUSPEND:
5659 event = APE_EVENT_STATUS_STATE_SUSPEND;
5660 break;
5661 default:
5662 return;
5663 }
5664
5665 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5666
5667 tg3_ape_send_event(tp, event);
5668}
5669
Michael Chane6af3012005-04-21 17:12:05 -07005670/* tp->lock is held. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005671static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5672{
David S. Millerf49639e2006-06-09 11:58:36 -07005673 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5674 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005675
5676 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5677 switch (kind) {
5678 case RESET_KIND_INIT:
5679 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5680 DRV_STATE_START);
5681 break;
5682
5683 case RESET_KIND_SHUTDOWN:
5684 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5685 DRV_STATE_UNLOAD);
5686 break;
5687
5688 case RESET_KIND_SUSPEND:
5689 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5690 DRV_STATE_SUSPEND);
5691 break;
5692
5693 default:
5694 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005695 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005696 }
Matt Carlson0d3031d2007-10-10 18:02:43 -07005697
5698 if (kind == RESET_KIND_INIT ||
5699 kind == RESET_KIND_SUSPEND)
5700 tg3_ape_driver_state_change(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005701}
5702
5703/* tp->lock is held. */
5704static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5705{
5706 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5707 switch (kind) {
5708 case RESET_KIND_INIT:
5709 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5710 DRV_STATE_START_DONE);
5711 break;
5712
5713 case RESET_KIND_SHUTDOWN:
5714 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5715 DRV_STATE_UNLOAD_DONE);
5716 break;
5717
5718 default:
5719 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005720 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005721 }
Matt Carlson0d3031d2007-10-10 18:02:43 -07005722
5723 if (kind == RESET_KIND_SHUTDOWN)
5724 tg3_ape_driver_state_change(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005725}
5726
5727/* tp->lock is held. */
5728static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5729{
5730 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5731 switch (kind) {
5732 case RESET_KIND_INIT:
5733 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5734 DRV_STATE_START);
5735 break;
5736
5737 case RESET_KIND_SHUTDOWN:
5738 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5739 DRV_STATE_UNLOAD);
5740 break;
5741
5742 case RESET_KIND_SUSPEND:
5743 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5744 DRV_STATE_SUSPEND);
5745 break;
5746
5747 default:
5748 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005749 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005750 }
5751}
5752
Michael Chan7a6f4362006-09-27 16:03:31 -07005753static int tg3_poll_fw(struct tg3 *tp)
5754{
5755 int i;
5756 u32 val;
5757
Michael Chanb5d37722006-09-27 16:06:21 -07005758 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Gary Zambrano0ccead12006-11-14 16:34:00 -08005759 /* Wait up to 20ms for init done. */
5760 for (i = 0; i < 200; i++) {
Michael Chanb5d37722006-09-27 16:06:21 -07005761 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5762 return 0;
Gary Zambrano0ccead12006-11-14 16:34:00 -08005763 udelay(100);
Michael Chanb5d37722006-09-27 16:06:21 -07005764 }
5765 return -ENODEV;
5766 }
5767
Michael Chan7a6f4362006-09-27 16:03:31 -07005768 /* Wait for firmware initialization to complete. */
5769 for (i = 0; i < 100000; i++) {
5770 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5771 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5772 break;
5773 udelay(10);
5774 }
5775
5776 /* Chip might not be fitted with firmware. Some Sun onboard
5777 * parts are configured like that. So don't signal the timeout
5778 * of the above loop as an error, but do report the lack of
5779 * running firmware once.
5780 */
5781 if (i >= 100000 &&
5782 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5783 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5784
5785 printk(KERN_INFO PFX "%s: No firmware running.\n",
5786 tp->dev->name);
5787 }
5788
5789 return 0;
5790}
5791
Michael Chanee6a99b2007-07-18 21:49:10 -07005792/* Save PCI command register before chip reset */
5793static void tg3_save_pci_state(struct tg3 *tp)
5794{
Matt Carlson8a6eac92007-10-21 16:17:55 -07005795 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07005796}
5797
5798/* Restore PCI state after chip reset */
5799static void tg3_restore_pci_state(struct tg3 *tp)
5800{
5801 u32 val;
5802
5803 /* Re-enable indirect register accesses. */
5804 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5805 tp->misc_host_ctrl);
5806
5807 /* Set MAX PCI retry to zero. */
5808 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5809 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5810 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5811 val |= PCISTATE_RETRY_SAME_DMA;
Matt Carlson0d3031d2007-10-10 18:02:43 -07005812 /* Allow reads and writes to the APE register and memory space. */
5813 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5814 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5815 PCISTATE_ALLOW_APE_SHMEM_WR;
Michael Chanee6a99b2007-07-18 21:49:10 -07005816 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5817
Matt Carlson8a6eac92007-10-21 16:17:55 -07005818 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07005819
Matt Carlson5f5c51e2007-11-12 21:19:37 -08005820 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5821 pcie_set_readrq(tp->pdev, 4096);
5822 else {
Michael Chan114342f2007-10-15 02:12:26 -07005823 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5824 tp->pci_cacheline_sz);
5825 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5826 tp->pci_lat_timer);
5827 }
Matt Carlson5f5c51e2007-11-12 21:19:37 -08005828
Michael Chanee6a99b2007-07-18 21:49:10 -07005829 /* Make sure PCI-X relaxed ordering bit is clear. */
Matt Carlson9974a352007-10-07 23:27:28 -07005830 if (tp->pcix_cap) {
5831 u16 pcix_cmd;
5832
5833 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5834 &pcix_cmd);
5835 pcix_cmd &= ~PCI_X_CMD_ERO;
5836 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5837 pcix_cmd);
5838 }
Michael Chanee6a99b2007-07-18 21:49:10 -07005839
5840 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chanee6a99b2007-07-18 21:49:10 -07005841
5842 /* Chip reset on 5780 will reset MSI enable bit,
5843 * so need to restore it.
5844 */
5845 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5846 u16 ctrl;
5847
5848 pci_read_config_word(tp->pdev,
5849 tp->msi_cap + PCI_MSI_FLAGS,
5850 &ctrl);
5851 pci_write_config_word(tp->pdev,
5852 tp->msi_cap + PCI_MSI_FLAGS,
5853 ctrl | PCI_MSI_FLAGS_ENABLE);
5854 val = tr32(MSGINT_MODE);
5855 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5856 }
5857 }
5858}
5859
Linus Torvalds1da177e2005-04-16 15:20:36 -07005860static void tg3_stop_fw(struct tg3 *);
5861
5862/* tp->lock is held. */
5863static int tg3_chip_reset(struct tg3 *tp)
5864{
5865 u32 val;
Michael Chan1ee582d2005-08-09 20:16:46 -07005866 void (*write_op)(struct tg3 *, u32, u32);
Michael Chan7a6f4362006-09-27 16:03:31 -07005867 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005868
David S. Millerf49639e2006-06-09 11:58:36 -07005869 tg3_nvram_lock(tp);
5870
Matt Carlson158d7ab2008-05-29 01:37:54 -07005871 tg3_mdio_stop(tp);
5872
Matt Carlson77b483f2008-08-15 14:07:24 -07005873 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5874
David S. Millerf49639e2006-06-09 11:58:36 -07005875 /* No matching tg3_nvram_unlock() after this because
5876 * chip reset below will undo the nvram lock.
5877 */
5878 tp->nvram_lock_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005879
Michael Chanee6a99b2007-07-18 21:49:10 -07005880 /* GRC_MISC_CFG core clock reset will clear the memory
5881 * enable bit in PCI register 4 and the MSI enable bit
5882 * on some chips, so we save relevant registers here.
5883 */
5884 tg3_save_pci_state(tp);
5885
Michael Chand9ab5ad2006-03-20 22:27:35 -08005886 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -08005887 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07005888 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07005889 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -07005890 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5891 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chand9ab5ad2006-03-20 22:27:35 -08005892 tw32(GRC_FASTBOOT_PC, 0);
5893
Linus Torvalds1da177e2005-04-16 15:20:36 -07005894 /*
5895 * We must avoid the readl() that normally takes place.
5896 * It locks machines, causes machine checks, and other
5897 * fun things. So, temporarily disable the 5701
5898 * hardware workaround, while we do the reset.
5899 */
Michael Chan1ee582d2005-08-09 20:16:46 -07005900 write_op = tp->write32;
5901 if (write_op == tg3_write_flush_reg32)
5902 tp->write32 = tg3_write32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005903
Michael Chand18edcb2007-03-24 20:57:11 -07005904 /* Prevent the irq handler from reading or writing PCI registers
5905 * during chip reset when the memory enable bit in the PCI command
5906 * register may be cleared. The chip does not generate interrupt
5907 * at this time, but the irq handler may still be called due to irq
5908 * sharing or irqpoll.
5909 */
5910 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
Michael Chanb8fa2f32007-04-06 17:35:37 -07005911 if (tp->hw_status) {
5912 tp->hw_status->status = 0;
5913 tp->hw_status->status_tag = 0;
5914 }
Michael Chand18edcb2007-03-24 20:57:11 -07005915 tp->last_tag = 0;
5916 smp_mb();
5917 synchronize_irq(tp->pdev->irq);
5918
Linus Torvalds1da177e2005-04-16 15:20:36 -07005919 /* do the reset */
5920 val = GRC_MISC_CFG_CORECLK_RESET;
5921
5922 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5923 if (tr32(0x7e2c) == 0x60) {
5924 tw32(0x7e2c, 0x20);
5925 }
5926 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5927 tw32(GRC_MISC_CFG, (1 << 29));
5928 val |= (1 << 29);
5929 }
5930 }
5931
Michael Chanb5d37722006-09-27 16:06:21 -07005932 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5933 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5934 tw32(GRC_VCPU_EXT_CTRL,
5935 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5936 }
5937
Linus Torvalds1da177e2005-04-16 15:20:36 -07005938 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5939 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5940 tw32(GRC_MISC_CFG, val);
5941
Michael Chan1ee582d2005-08-09 20:16:46 -07005942 /* restore 5701 hardware bug workaround write method */
5943 tp->write32 = write_op;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005944
5945 /* Unfortunately, we have to delay before the PCI read back.
5946 * Some 575X chips even will not respond to a PCI cfg access
5947 * when the reset command is given to the chip.
5948 *
5949 * How do these hardware designers expect things to work
5950 * properly if the PCI write is posted for a long period
5951 * of time? It is always necessary to have some method by
5952 * which a register read back can occur to push the write
5953 * out which does the reset.
5954 *
5955 * For most tg3 variants the trick below was working.
5956 * Ho hum...
5957 */
5958 udelay(120);
5959
5960 /* Flush PCI posted writes. The normal MMIO registers
5961 * are inaccessible at this time so this is the only
5962 * way to make this reliably (actually, this is no longer
5963 * the case, see above). I tried to use indirect
5964 * register read/write but this upset some 5701 variants.
5965 */
5966 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5967
5968 udelay(120);
5969
5970 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5971 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5972 int i;
5973 u32 cfg_val;
5974
5975 /* Wait for link training to complete. */
5976 for (i = 0; i < 5000; i++)
5977 udelay(100);
5978
5979 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5980 pci_write_config_dword(tp->pdev, 0xc4,
5981 cfg_val | (1 << 15));
5982 }
5983 /* Set PCIE max payload size and clear error status. */
5984 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5985 }
5986
Michael Chanee6a99b2007-07-18 21:49:10 -07005987 tg3_restore_pci_state(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005988
Michael Chand18edcb2007-03-24 20:57:11 -07005989 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5990
Michael Chanee6a99b2007-07-18 21:49:10 -07005991 val = 0;
5992 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan4cf78e42005-07-25 12:29:19 -07005993 val = tr32(MEMARB_MODE);
Michael Chanee6a99b2007-07-18 21:49:10 -07005994 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005995
5996 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5997 tg3_stop_fw(tp);
5998 tw32(0x5000, 0x400);
5999 }
6000
6001 tw32(GRC_MODE, tp->grc_mode);
6002
6003 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01006004 val = tr32(0xc4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006005
6006 tw32(0xc4, val | (1 << 15));
6007 }
6008
6009 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6010 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6011 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6012 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6013 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6014 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6015 }
6016
6017 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6018 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6019 tw32_f(MAC_MODE, tp->mac_mode);
Michael Chan747e8f82005-07-25 12:33:22 -07006020 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6021 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6022 tw32_f(MAC_MODE, tp->mac_mode);
Matt Carlson3bda1252008-08-15 14:08:22 -07006023 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6024 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6025 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6026 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6027 tw32_f(MAC_MODE, tp->mac_mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006028 } else
6029 tw32_f(MAC_MODE, 0);
6030 udelay(40);
6031
Matt Carlson158d7ab2008-05-29 01:37:54 -07006032 tg3_mdio_start(tp);
6033
Matt Carlson77b483f2008-08-15 14:07:24 -07006034 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6035
Michael Chan7a6f4362006-09-27 16:03:31 -07006036 err = tg3_poll_fw(tp);
6037 if (err)
6038 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006039
6040 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6041 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01006042 val = tr32(0x7c00);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006043
6044 tw32(0x7c00, val | (1 << 25));
6045 }
6046
6047 /* Reprobe ASF enable state. */
6048 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6049 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6050 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6051 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6052 u32 nic_cfg;
6053
6054 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6055 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6056 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
Matt Carlson4ba526c2008-08-15 14:10:04 -07006057 tp->last_event_jiffies = jiffies;
John W. Linvillecbf46852005-04-21 17:01:29 -07006058 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006059 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6060 }
6061 }
6062
6063 return 0;
6064}
6065
6066/* tp->lock is held. */
6067static void tg3_stop_fw(struct tg3 *tp)
6068{
Matt Carlson0d3031d2007-10-10 18:02:43 -07006069 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6070 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Matt Carlson7c5026a2008-05-02 16:49:29 -07006071 /* Wait for RX cpu to ACK the previous event. */
6072 tg3_wait_for_event_ack(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006073
6074 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
Matt Carlson4ba526c2008-08-15 14:10:04 -07006075
6076 tg3_generate_fw_event(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006077
Matt Carlson7c5026a2008-05-02 16:49:29 -07006078 /* Wait for RX cpu to ACK this event. */
6079 tg3_wait_for_event_ack(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006080 }
6081}
6082
6083/* tp->lock is held. */
Michael Chan944d9802005-05-29 14:57:48 -07006084static int tg3_halt(struct tg3 *tp, int kind, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006085{
6086 int err;
6087
6088 tg3_stop_fw(tp);
6089
Michael Chan944d9802005-05-29 14:57:48 -07006090 tg3_write_sig_pre_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006091
David S. Millerb3b7d6b2005-05-05 14:40:20 -07006092 tg3_abort_hw(tp, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006093 err = tg3_chip_reset(tp);
6094
Michael Chan944d9802005-05-29 14:57:48 -07006095 tg3_write_sig_legacy(tp, kind);
6096 tg3_write_sig_post_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006097
6098 if (err)
6099 return err;
6100
6101 return 0;
6102}
6103
6104#define TG3_FW_RELEASE_MAJOR 0x0
6105#define TG3_FW_RELASE_MINOR 0x0
6106#define TG3_FW_RELEASE_FIX 0x0
6107#define TG3_FW_START_ADDR 0x08000000
6108#define TG3_FW_TEXT_ADDR 0x08000000
6109#define TG3_FW_TEXT_LEN 0x9c0
6110#define TG3_FW_RODATA_ADDR 0x080009c0
6111#define TG3_FW_RODATA_LEN 0x60
6112#define TG3_FW_DATA_ADDR 0x08000a40
6113#define TG3_FW_DATA_LEN 0x20
6114#define TG3_FW_SBSS_ADDR 0x08000a60
6115#define TG3_FW_SBSS_LEN 0xc
6116#define TG3_FW_BSS_ADDR 0x08000a70
6117#define TG3_FW_BSS_LEN 0x10
6118
Andreas Mohr50da8592006-08-14 23:54:30 -07006119static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006120 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6121 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6122 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6123 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6124 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6125 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6126 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6127 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6128 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6129 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6130 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6131 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6132 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6133 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6134 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6135 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6136 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6137 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6138 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6139 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6140 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6141 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6142 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6143 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6144 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6145 0, 0, 0, 0, 0, 0,
6146 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6147 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6148 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6149 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6150 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6151 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6152 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6153 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6154 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6155 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6156 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6157 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6158 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6159 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6160 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6161 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6162 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6163 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6164 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6165 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6166 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6167 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6168 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6169 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6170 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6171 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6172 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6173 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6174 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6175 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6176 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6177 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6178 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6179 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6180 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6181 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6182 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6183 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6184 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6185 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6186 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6187 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6188 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6189 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6190 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6191 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6192 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6193 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6194 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6195 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6196 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6197 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6198 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6199 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6200 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6201 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6202 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6203 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6204 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6205 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6206 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6207 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6208 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6209 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6210 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6211};
6212
Andreas Mohr50da8592006-08-14 23:54:30 -07006213static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006214 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6215 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6216 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6217 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6218 0x00000000
6219};
6220
6221#if 0 /* All zeros, don't eat up space with it. */
6222u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6223 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6224 0x00000000, 0x00000000, 0x00000000, 0x00000000
6225};
6226#endif
6227
6228#define RX_CPU_SCRATCH_BASE 0x30000
6229#define RX_CPU_SCRATCH_SIZE 0x04000
6230#define TX_CPU_SCRATCH_BASE 0x34000
6231#define TX_CPU_SCRATCH_SIZE 0x04000
6232
6233/* tp->lock is held. */
6234static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6235{
6236 int i;
6237
Eric Sesterhenn5d9428d2006-04-02 13:52:48 +02006238 BUG_ON(offset == TX_CPU_BASE &&
6239 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006240
Michael Chanb5d37722006-09-27 16:06:21 -07006241 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6242 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6243
6244 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6245 return 0;
6246 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006247 if (offset == RX_CPU_BASE) {
6248 for (i = 0; i < 10000; i++) {
6249 tw32(offset + CPU_STATE, 0xffffffff);
6250 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6251 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6252 break;
6253 }
6254
6255 tw32(offset + CPU_STATE, 0xffffffff);
6256 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
6257 udelay(10);
6258 } else {
6259 for (i = 0; i < 10000; i++) {
6260 tw32(offset + CPU_STATE, 0xffffffff);
6261 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6262 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6263 break;
6264 }
6265 }
6266
6267 if (i >= 10000) {
6268 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6269 "and %s CPU\n",
6270 tp->dev->name,
6271 (offset == RX_CPU_BASE ? "RX" : "TX"));
6272 return -ENODEV;
6273 }
Michael Chanec41c7d2006-01-17 02:40:55 -08006274
6275 /* Clear firmware's nvram arbitration. */
6276 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6277 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006278 return 0;
6279}
6280
6281struct fw_info {
6282 unsigned int text_base;
6283 unsigned int text_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006284 const u32 *text_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006285 unsigned int rodata_base;
6286 unsigned int rodata_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006287 const u32 *rodata_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006288 unsigned int data_base;
6289 unsigned int data_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006290 const u32 *data_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006291};
6292
6293/* tp->lock is held. */
6294static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6295 int cpu_scratch_size, struct fw_info *info)
6296{
Michael Chanec41c7d2006-01-17 02:40:55 -08006297 int err, lock_err, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006298 void (*write_op)(struct tg3 *, u32, u32);
6299
6300 if (cpu_base == TX_CPU_BASE &&
6301 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6302 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6303 "TX cpu firmware on %s which is 5705.\n",
6304 tp->dev->name);
6305 return -EINVAL;
6306 }
6307
6308 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6309 write_op = tg3_write_mem;
6310 else
6311 write_op = tg3_write_indirect_reg32;
6312
Michael Chan1b628152005-05-29 14:59:49 -07006313 /* It is possible that bootcode is still loading at this point.
6314 * Get the nvram lock first before halting the cpu.
6315 */
Michael Chanec41c7d2006-01-17 02:40:55 -08006316 lock_err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006317 err = tg3_halt_cpu(tp, cpu_base);
Michael Chanec41c7d2006-01-17 02:40:55 -08006318 if (!lock_err)
6319 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006320 if (err)
6321 goto out;
6322
6323 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6324 write_op(tp, cpu_scratch_base + i, 0);
6325 tw32(cpu_base + CPU_STATE, 0xffffffff);
6326 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6327 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6328 write_op(tp, (cpu_scratch_base +
6329 (info->text_base & 0xffff) +
6330 (i * sizeof(u32))),
6331 (info->text_data ?
6332 info->text_data[i] : 0));
6333 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6334 write_op(tp, (cpu_scratch_base +
6335 (info->rodata_base & 0xffff) +
6336 (i * sizeof(u32))),
6337 (info->rodata_data ?
6338 info->rodata_data[i] : 0));
6339 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6340 write_op(tp, (cpu_scratch_base +
6341 (info->data_base & 0xffff) +
6342 (i * sizeof(u32))),
6343 (info->data_data ?
6344 info->data_data[i] : 0));
6345
6346 err = 0;
6347
6348out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006349 return err;
6350}
6351
6352/* tp->lock is held. */
6353static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6354{
6355 struct fw_info info;
6356 int err, i;
6357
6358 info.text_base = TG3_FW_TEXT_ADDR;
6359 info.text_len = TG3_FW_TEXT_LEN;
6360 info.text_data = &tg3FwText[0];
6361 info.rodata_base = TG3_FW_RODATA_ADDR;
6362 info.rodata_len = TG3_FW_RODATA_LEN;
6363 info.rodata_data = &tg3FwRodata[0];
6364 info.data_base = TG3_FW_DATA_ADDR;
6365 info.data_len = TG3_FW_DATA_LEN;
6366 info.data_data = NULL;
6367
6368 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6369 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6370 &info);
6371 if (err)
6372 return err;
6373
6374 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6375 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6376 &info);
6377 if (err)
6378 return err;
6379
6380 /* Now startup only the RX cpu. */
6381 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6382 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6383
6384 for (i = 0; i < 5; i++) {
6385 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6386 break;
6387 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6388 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
6389 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6390 udelay(1000);
6391 }
6392 if (i >= 5) {
6393 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6394 "to set RX CPU PC, is %08x should be %08x\n",
6395 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6396 TG3_FW_TEXT_ADDR);
6397 return -ENODEV;
6398 }
6399 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6400 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
6401
6402 return 0;
6403}
6404
Linus Torvalds1da177e2005-04-16 15:20:36 -07006405
6406#define TG3_TSO_FW_RELEASE_MAJOR 0x1
6407#define TG3_TSO_FW_RELASE_MINOR 0x6
6408#define TG3_TSO_FW_RELEASE_FIX 0x0
6409#define TG3_TSO_FW_START_ADDR 0x08000000
6410#define TG3_TSO_FW_TEXT_ADDR 0x08000000
6411#define TG3_TSO_FW_TEXT_LEN 0x1aa0
6412#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
6413#define TG3_TSO_FW_RODATA_LEN 0x60
6414#define TG3_TSO_FW_DATA_ADDR 0x08001b20
6415#define TG3_TSO_FW_DATA_LEN 0x30
6416#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
6417#define TG3_TSO_FW_SBSS_LEN 0x2c
6418#define TG3_TSO_FW_BSS_ADDR 0x08001b80
6419#define TG3_TSO_FW_BSS_LEN 0x894
6420
Andreas Mohr50da8592006-08-14 23:54:30 -07006421static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006422 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6423 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6424 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6425 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6426 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6427 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6428 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6429 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6430 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6431 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6432 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6433 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6434 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6435 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6436 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6437 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6438 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6439 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6440 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6441 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6442 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6443 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6444 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6445 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6446 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6447 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6448 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6449 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6450 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6451 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6452 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6453 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6454 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6455 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6456 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6457 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6458 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6459 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6460 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6461 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6462 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6463 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6464 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6465 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6466 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6467 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6468 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6469 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6470 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6471 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6472 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6473 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6474 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6475 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6476 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6477 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6478 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6479 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6480 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6481 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6482 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6483 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6484 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6485 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6486 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6487 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6488 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6489 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6490 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6491 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6492 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6493 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6494 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6495 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6496 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6497 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6498 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6499 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6500 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6501 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6502 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6503 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6504 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6505 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6506 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6507 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6508 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6509 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6510 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6511 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6512 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6513 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6514 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6515 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6516 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6517 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6518 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6519 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6520 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6521 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6522 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6523 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6524 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6525 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6526 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6527 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6528 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6529 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6530 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6531 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6532 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6533 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6534 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6535 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6536 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6537 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6538 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6539 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6540 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6541 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6542 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6543 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6544 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6545 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6546 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6547 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6548 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6549 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6550 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6551 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6552 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6553 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6554 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6555 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6556 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6557 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6558 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6559 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6560 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6561 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6562 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6563 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6564 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6565 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6566 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6567 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6568 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6569 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6570 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6571 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6572 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6573 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6574 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6575 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6576 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6577 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6578 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6579 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6580 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6581 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6582 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6583 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6584 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6585 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6586 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6587 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6588 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6589 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6590 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6591 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6592 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6593 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6594 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6595 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6596 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6597 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6598 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6599 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6600 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6601 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6602 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6603 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6604 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6605 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6606 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6607 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6608 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6609 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6610 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6611 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6612 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6613 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6614 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6615 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6616 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6617 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6618 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6619 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6620 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6621 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6622 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6623 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6624 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6625 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6626 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6627 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6628 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6629 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6630 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6631 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6632 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6633 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6634 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6635 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6636 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6637 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6638 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6639 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6640 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6641 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6642 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6643 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6644 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6645 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6646 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6647 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6648 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6649 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6650 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6651 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6652 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6653 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6654 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6655 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6656 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6657 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6658 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6659 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6660 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6661 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6662 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6663 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6664 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6665 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6666 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6667 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6668 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6669 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6670 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6671 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6672 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6673 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6674 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6675 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6676 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6677 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6678 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6679 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6680 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6681 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6682 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6683 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6684 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6685 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6686 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6687 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6688 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6689 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6690 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6691 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6692 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6693 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6694 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6695 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6696 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6697 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6698 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6699 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6700 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6701 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6702 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6703 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6704 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6705 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6706};
6707
Andreas Mohr50da8592006-08-14 23:54:30 -07006708static const u32 tg3TsoFwRodata[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006709 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6710 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6711 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6712 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6713 0x00000000,
6714};
6715
Andreas Mohr50da8592006-08-14 23:54:30 -07006716static const u32 tg3TsoFwData[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006717 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6718 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6719 0x00000000,
6720};
6721
6722/* 5705 needs a special version of the TSO firmware. */
6723#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
6724#define TG3_TSO5_FW_RELASE_MINOR 0x2
6725#define TG3_TSO5_FW_RELEASE_FIX 0x0
6726#define TG3_TSO5_FW_START_ADDR 0x00010000
6727#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
6728#define TG3_TSO5_FW_TEXT_LEN 0xe90
6729#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
6730#define TG3_TSO5_FW_RODATA_LEN 0x50
6731#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
6732#define TG3_TSO5_FW_DATA_LEN 0x20
6733#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
6734#define TG3_TSO5_FW_SBSS_LEN 0x28
6735#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
6736#define TG3_TSO5_FW_BSS_LEN 0x88
6737
Andreas Mohr50da8592006-08-14 23:54:30 -07006738static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006739 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6740 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6741 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6742 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6743 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6744 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6745 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6746 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6747 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6748 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6749 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6750 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6751 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6752 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6753 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6754 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6755 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6756 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6757 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6758 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6759 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6760 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6761 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6762 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6763 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6764 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6765 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6766 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6767 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6768 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6769 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6770 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6771 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6772 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6773 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6774 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6775 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6776 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6777 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6778 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6779 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6780 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6781 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6782 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6783 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6784 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6785 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6786 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6787 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6788 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6789 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6790 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6791 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6792 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6793 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6794 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6795 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6796 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6797 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6798 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6799 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6800 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6801 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6802 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6803 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6804 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6805 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6806 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6807 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6808 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6809 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6810 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6811 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6812 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6813 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6814 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6815 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6816 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6817 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6818 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6819 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6820 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6821 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6822 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6823 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6824 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6825 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6826 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6827 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6828 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6829 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6830 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6831 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6832 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6833 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6834 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6835 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6836 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6837 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6838 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6839 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6840 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6841 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6842 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6843 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6844 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6845 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6846 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6847 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6848 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6849 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6850 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6851 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6852 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6853 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6854 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6855 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6856 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6857 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6858 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6859 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6860 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6861 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6862 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6863 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6864 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6865 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6866 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6867 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6868 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6869 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6870 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6871 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6872 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6873 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6874 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6875 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6876 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6877 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6878 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6879 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6880 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6881 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6882 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6883 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6884 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6885 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6886 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6887 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6888 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6889 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6890 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6891 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6892 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6893 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6894 0x00000000, 0x00000000, 0x00000000,
6895};
6896
Andreas Mohr50da8592006-08-14 23:54:30 -07006897static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006898 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6899 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6900 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6901 0x00000000, 0x00000000, 0x00000000,
6902};
6903
Andreas Mohr50da8592006-08-14 23:54:30 -07006904static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006905 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6906 0x00000000, 0x00000000, 0x00000000,
6907};
6908
6909/* tp->lock is held. */
6910static int tg3_load_tso_firmware(struct tg3 *tp)
6911{
6912 struct fw_info info;
6913 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6914 int err, i;
6915
6916 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6917 return 0;
6918
6919 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6920 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6921 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6922 info.text_data = &tg3Tso5FwText[0];
6923 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6924 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6925 info.rodata_data = &tg3Tso5FwRodata[0];
6926 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6927 info.data_len = TG3_TSO5_FW_DATA_LEN;
6928 info.data_data = &tg3Tso5FwData[0];
6929 cpu_base = RX_CPU_BASE;
6930 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6931 cpu_scratch_size = (info.text_len +
6932 info.rodata_len +
6933 info.data_len +
6934 TG3_TSO5_FW_SBSS_LEN +
6935 TG3_TSO5_FW_BSS_LEN);
6936 } else {
6937 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6938 info.text_len = TG3_TSO_FW_TEXT_LEN;
6939 info.text_data = &tg3TsoFwText[0];
6940 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6941 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6942 info.rodata_data = &tg3TsoFwRodata[0];
6943 info.data_base = TG3_TSO_FW_DATA_ADDR;
6944 info.data_len = TG3_TSO_FW_DATA_LEN;
6945 info.data_data = &tg3TsoFwData[0];
6946 cpu_base = TX_CPU_BASE;
6947 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6948 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6949 }
6950
6951 err = tg3_load_firmware_cpu(tp, cpu_base,
6952 cpu_scratch_base, cpu_scratch_size,
6953 &info);
6954 if (err)
6955 return err;
6956
6957 /* Now startup the cpu. */
6958 tw32(cpu_base + CPU_STATE, 0xffffffff);
6959 tw32_f(cpu_base + CPU_PC, info.text_base);
6960
6961 for (i = 0; i < 5; i++) {
6962 if (tr32(cpu_base + CPU_PC) == info.text_base)
6963 break;
6964 tw32(cpu_base + CPU_STATE, 0xffffffff);
6965 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6966 tw32_f(cpu_base + CPU_PC, info.text_base);
6967 udelay(1000);
6968 }
6969 if (i >= 5) {
6970 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6971 "to set CPU PC, is %08x should be %08x\n",
6972 tp->dev->name, tr32(cpu_base + CPU_PC),
6973 info.text_base);
6974 return -ENODEV;
6975 }
6976 tw32(cpu_base + CPU_STATE, 0xffffffff);
6977 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6978 return 0;
6979}
6980
Linus Torvalds1da177e2005-04-16 15:20:36 -07006981
Linus Torvalds1da177e2005-04-16 15:20:36 -07006982static int tg3_set_mac_addr(struct net_device *dev, void *p)
6983{
6984 struct tg3 *tp = netdev_priv(dev);
6985 struct sockaddr *addr = p;
Michael Chan986e0ae2007-05-05 12:10:20 -07006986 int err = 0, skip_mac_1 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006987
Michael Chanf9804dd2005-09-27 12:13:10 -07006988 if (!is_valid_ether_addr(addr->sa_data))
6989 return -EINVAL;
6990
Linus Torvalds1da177e2005-04-16 15:20:36 -07006991 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6992
Michael Chane75f7c92006-03-20 21:33:26 -08006993 if (!netif_running(dev))
6994 return 0;
6995
Michael Chan58712ef2006-04-29 18:58:01 -07006996 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
Michael Chan986e0ae2007-05-05 12:10:20 -07006997 u32 addr0_high, addr0_low, addr1_high, addr1_low;
Michael Chan58712ef2006-04-29 18:58:01 -07006998
Michael Chan986e0ae2007-05-05 12:10:20 -07006999 addr0_high = tr32(MAC_ADDR_0_HIGH);
7000 addr0_low = tr32(MAC_ADDR_0_LOW);
7001 addr1_high = tr32(MAC_ADDR_1_HIGH);
7002 addr1_low = tr32(MAC_ADDR_1_LOW);
7003
7004 /* Skip MAC addr 1 if ASF is using it. */
7005 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7006 !(addr1_high == 0 && addr1_low == 0))
7007 skip_mac_1 = 1;
Michael Chan58712ef2006-04-29 18:58:01 -07007008 }
Michael Chan986e0ae2007-05-05 12:10:20 -07007009 spin_lock_bh(&tp->lock);
7010 __tg3_set_mac_addr(tp, skip_mac_1);
7011 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007012
Michael Chanb9ec6c12006-07-25 16:37:27 -07007013 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007014}
7015
7016/* tp->lock is held. */
7017static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7018 dma_addr_t mapping, u32 maxlen_flags,
7019 u32 nic_addr)
7020{
7021 tg3_write_mem(tp,
7022 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7023 ((u64) mapping >> 32));
7024 tg3_write_mem(tp,
7025 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7026 ((u64) mapping & 0xffffffff));
7027 tg3_write_mem(tp,
7028 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7029 maxlen_flags);
7030
7031 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7032 tg3_write_mem(tp,
7033 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7034 nic_addr);
7035}
7036
7037static void __tg3_set_rx_mode(struct net_device *);
Michael Chand244c892005-07-05 14:42:33 -07007038static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
David S. Miller15f98502005-05-18 22:49:26 -07007039{
7040 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7041 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7042 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7043 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7044 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7045 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7046 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7047 }
7048 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7049 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7050 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7051 u32 val = ec->stats_block_coalesce_usecs;
7052
7053 if (!netif_carrier_ok(tp->dev))
7054 val = 0;
7055
7056 tw32(HOSTCC_STAT_COAL_TICKS, val);
7057 }
7058}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007059
7060/* tp->lock is held. */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007061static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007062{
7063 u32 val, rdmac_mode;
7064 int i, err, limit;
7065
7066 tg3_disable_ints(tp);
7067
7068 tg3_stop_fw(tp);
7069
7070 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7071
7072 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
Michael Chane6de8ad2005-05-05 14:42:41 -07007073 tg3_abort_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007074 }
7075
Matt Carlsondd477002008-05-25 23:45:58 -07007076 if (reset_phy &&
7077 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
Michael Chand4d2c552006-03-20 17:47:20 -08007078 tg3_phy_reset(tp);
7079
Linus Torvalds1da177e2005-04-16 15:20:36 -07007080 err = tg3_chip_reset(tp);
7081 if (err)
7082 return err;
7083
7084 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7085
Matt Carlsonbcb37f62008-11-03 16:52:09 -08007086 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07007087 val = tr32(TG3_CPMU_CTRL);
7088 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7089 tw32(TG3_CPMU_CTRL, val);
Matt Carlson9acb9612007-11-12 21:10:06 -08007090
7091 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7092 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7093 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7094 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7095
7096 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7097 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7098 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7099 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7100
7101 val = tr32(TG3_CPMU_HST_ACC);
7102 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7103 val |= CPMU_HST_ACC_MACCLK_6_25;
7104 tw32(TG3_CPMU_HST_ACC, val);
Matt Carlsond30cdd22007-10-07 23:28:35 -07007105 }
7106
Linus Torvalds1da177e2005-04-16 15:20:36 -07007107 /* This works around an issue with Athlon chipsets on
7108 * B3 tigon3 silicon. This bit has no effect on any
7109 * other revision. But do not set this on PCI Express
Matt Carlson795d01c2007-10-07 23:28:17 -07007110 * chips and don't even touch the clocks if the CPMU is present.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007111 */
Matt Carlson795d01c2007-10-07 23:28:17 -07007112 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7113 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7114 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7115 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7116 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007117
7118 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7119 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7120 val = tr32(TG3PCI_PCISTATE);
7121 val |= PCISTATE_RETRY_SAME_DMA;
7122 tw32(TG3PCI_PCISTATE, val);
7123 }
7124
Matt Carlson0d3031d2007-10-10 18:02:43 -07007125 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7126 /* Allow reads and writes to the
7127 * APE register and memory space.
7128 */
7129 val = tr32(TG3PCI_PCISTATE);
7130 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7131 PCISTATE_ALLOW_APE_SHMEM_WR;
7132 tw32(TG3PCI_PCISTATE, val);
7133 }
7134
Linus Torvalds1da177e2005-04-16 15:20:36 -07007135 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7136 /* Enable some hw fixes. */
7137 val = tr32(TG3PCI_MSI_DATA);
7138 val |= (1 << 26) | (1 << 28) | (1 << 29);
7139 tw32(TG3PCI_MSI_DATA, val);
7140 }
7141
7142 /* Descriptor ring init may make accesses to the
7143 * NIC SRAM area to setup the TX descriptors, so we
7144 * can only do this after the hardware has been
7145 * successfully reset.
7146 */
Michael Chan32d8c572006-07-25 16:38:29 -07007147 err = tg3_init_rings(tp);
7148 if (err)
7149 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007150
Matt Carlson9936bcf2007-10-10 18:03:07 -07007151 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
Matt Carlson57e69832008-05-25 23:48:31 -07007152 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
7153 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07007154 /* This value is determined during the probe time DMA
7155 * engine test, tg3_test_dma.
7156 */
7157 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7158 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007159
7160 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7161 GRC_MODE_4X_NIC_SEND_RINGS |
7162 GRC_MODE_NO_TX_PHDR_CSUM |
7163 GRC_MODE_NO_RX_PHDR_CSUM);
7164 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
Michael Chand2d746f2006-04-06 21:45:39 -07007165
7166 /* Pseudo-header checksum is done by hardware logic and not
7167 * the offload processers, so make the chip do the pseudo-
7168 * header checksums on receive. For transmit it is more
7169 * convenient to do the pseudo-header checksum in software
7170 * as Linux does that on transmit for us in all cases.
7171 */
7172 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007173
7174 tw32(GRC_MODE,
7175 tp->grc_mode |
7176 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7177
7178 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7179 val = tr32(GRC_MISC_CFG);
7180 val &= ~0xff;
7181 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7182 tw32(GRC_MISC_CFG, val);
7183
7184 /* Initialize MBUF/DESC pool. */
John W. Linvillecbf46852005-04-21 17:01:29 -07007185 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007186 /* Do nothing. */
7187 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7188 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7189 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7190 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7191 else
7192 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7193 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7194 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7195 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007196 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7197 int fw_len;
7198
7199 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7200 TG3_TSO5_FW_RODATA_LEN +
7201 TG3_TSO5_FW_DATA_LEN +
7202 TG3_TSO5_FW_SBSS_LEN +
7203 TG3_TSO5_FW_BSS_LEN);
7204 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7205 tw32(BUFMGR_MB_POOL_ADDR,
7206 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7207 tw32(BUFMGR_MB_POOL_SIZE,
7208 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7209 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007210
Michael Chan0f893dc2005-07-25 12:30:38 -07007211 if (tp->dev->mtu <= ETH_DATA_LEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007212 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7213 tp->bufmgr_config.mbuf_read_dma_low_water);
7214 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7215 tp->bufmgr_config.mbuf_mac_rx_low_water);
7216 tw32(BUFMGR_MB_HIGH_WATER,
7217 tp->bufmgr_config.mbuf_high_water);
7218 } else {
7219 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7220 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7221 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7222 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7223 tw32(BUFMGR_MB_HIGH_WATER,
7224 tp->bufmgr_config.mbuf_high_water_jumbo);
7225 }
7226 tw32(BUFMGR_DMA_LOW_WATER,
7227 tp->bufmgr_config.dma_low_water);
7228 tw32(BUFMGR_DMA_HIGH_WATER,
7229 tp->bufmgr_config.dma_high_water);
7230
7231 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7232 for (i = 0; i < 2000; i++) {
7233 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7234 break;
7235 udelay(10);
7236 }
7237 if (i >= 2000) {
7238 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7239 tp->dev->name);
7240 return -ENODEV;
7241 }
7242
7243 /* Setup replenish threshold. */
Michael Chanf92905d2006-06-29 20:14:29 -07007244 val = tp->rx_pending / 8;
7245 if (val == 0)
7246 val = 1;
7247 else if (val > tp->rx_std_max_post)
7248 val = tp->rx_std_max_post;
Michael Chanb5d37722006-09-27 16:06:21 -07007249 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7250 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7251 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7252
7253 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7254 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7255 }
Michael Chanf92905d2006-06-29 20:14:29 -07007256
7257 tw32(RCVBDI_STD_THRESH, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007258
7259 /* Initialize TG3_BDINFO's at:
7260 * RCVDBDI_STD_BD: standard eth size rx ring
7261 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7262 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7263 *
7264 * like so:
7265 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7266 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7267 * ring attribute flags
7268 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7269 *
7270 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7271 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7272 *
7273 * The size of each ring is fixed in the firmware, but the location is
7274 * configurable.
7275 */
7276 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7277 ((u64) tp->rx_std_mapping >> 32));
7278 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7279 ((u64) tp->rx_std_mapping & 0xffffffff));
7280 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7281 NIC_SRAM_RX_BUFFER_DESC);
7282
7283 /* Don't even try to program the JUMBO/MINI buffer descriptor
7284 * configs on 5705.
7285 */
7286 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7287 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7288 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7289 } else {
7290 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7291 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7292
7293 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7294 BDINFO_FLAGS_DISABLED);
7295
7296 /* Setup replenish threshold. */
7297 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7298
Michael Chan0f893dc2005-07-25 12:30:38 -07007299 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007300 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7301 ((u64) tp->rx_jumbo_mapping >> 32));
7302 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7303 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7304 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7305 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7306 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7307 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7308 } else {
7309 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7310 BDINFO_FLAGS_DISABLED);
7311 }
7312
7313 }
7314
7315 /* There is only one send ring on 5705/5750, no need to explicitly
7316 * disable the others.
7317 */
7318 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7319 /* Clear out send RCB ring in SRAM. */
7320 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7321 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7322 BDINFO_FLAGS_DISABLED);
7323 }
7324
7325 tp->tx_prod = 0;
7326 tp->tx_cons = 0;
7327 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7328 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7329
7330 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7331 tp->tx_desc_mapping,
7332 (TG3_TX_RING_SIZE <<
7333 BDINFO_FLAGS_MAXLEN_SHIFT),
7334 NIC_SRAM_TX_BUFFER_DESC);
7335
7336 /* There is only one receive return ring on 5705/5750, no need
7337 * to explicitly disable the others.
7338 */
7339 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7340 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7341 i += TG3_BDINFO_SIZE) {
7342 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7343 BDINFO_FLAGS_DISABLED);
7344 }
7345 }
7346
7347 tp->rx_rcb_ptr = 0;
7348 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7349
7350 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7351 tp->rx_rcb_mapping,
7352 (TG3_RX_RCB_RING_SIZE(tp) <<
7353 BDINFO_FLAGS_MAXLEN_SHIFT),
7354 0);
7355
7356 tp->rx_std_ptr = tp->rx_pending;
7357 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7358 tp->rx_std_ptr);
7359
Michael Chan0f893dc2005-07-25 12:30:38 -07007360 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
Linus Torvalds1da177e2005-04-16 15:20:36 -07007361 tp->rx_jumbo_pending : 0;
7362 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7363 tp->rx_jumbo_ptr);
7364
7365 /* Initialize MAC address and backoff seed. */
Michael Chan986e0ae2007-05-05 12:10:20 -07007366 __tg3_set_mac_addr(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007367
7368 /* MTU + ethernet header + FCS + optional VLAN tag */
7369 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7370
7371 /* The slot time is changed by tg3_setup_phy if we
7372 * run at gigabit with half duplex.
7373 */
7374 tw32(MAC_TX_LENGTHS,
7375 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7376 (6 << TX_LENGTHS_IPG_SHIFT) |
7377 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7378
7379 /* Receive rules. */
7380 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7381 tw32(RCVLPC_CONFIG, 0x0181);
7382
7383 /* Calculate RDMAC_MODE setting early, we need it to determine
7384 * the RCVLPC_STATE_ENABLE mask.
7385 */
7386 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7387 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7388 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7389 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7390 RDMAC_MODE_LNGREAD_ENAB);
Michael Chan85e94ce2005-04-21 17:05:28 -07007391
Matt Carlson57e69832008-05-25 23:48:31 -07007392 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7393 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsond30cdd22007-10-07 23:28:35 -07007394 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7395 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7396 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7397
Michael Chan85e94ce2005-04-21 17:05:28 -07007398 /* If statement applies to 5705 and 5750 PCI devices only */
7399 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7400 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7401 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007402 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
Matt Carlsonc13e3712007-05-05 11:50:04 -07007403 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007404 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7405 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7406 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7407 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7408 }
7409 }
7410
Michael Chan85e94ce2005-04-21 17:05:28 -07007411 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7412 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7413
Linus Torvalds1da177e2005-04-16 15:20:36 -07007414 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7415 rdmac_mode |= (1 << 27);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007416
7417 /* Receive/send statistics. */
Michael Chan16613942006-06-29 20:15:13 -07007418 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7419 val = tr32(RCVLPC_STATS_ENABLE);
7420 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7421 tw32(RCVLPC_STATS_ENABLE, val);
7422 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7423 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007424 val = tr32(RCVLPC_STATS_ENABLE);
7425 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7426 tw32(RCVLPC_STATS_ENABLE, val);
7427 } else {
7428 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7429 }
7430 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7431 tw32(SNDDATAI_STATSENAB, 0xffffff);
7432 tw32(SNDDATAI_STATSCTRL,
7433 (SNDDATAI_SCTRL_ENABLE |
7434 SNDDATAI_SCTRL_FASTUPD));
7435
7436 /* Setup host coalescing engine. */
7437 tw32(HOSTCC_MODE, 0);
7438 for (i = 0; i < 2000; i++) {
7439 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7440 break;
7441 udelay(10);
7442 }
7443
Michael Chand244c892005-07-05 14:42:33 -07007444 __tg3_set_coalesce(tp, &tp->coal);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007445
7446 /* set status block DMA address */
7447 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7448 ((u64) tp->status_mapping >> 32));
7449 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7450 ((u64) tp->status_mapping & 0xffffffff));
7451
7452 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7453 /* Status/statistics block address. See tg3_timer,
7454 * the tg3_periodic_fetch_stats call there, and
7455 * tg3_get_stats to see how this works for 5705/5750 chips.
7456 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007457 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7458 ((u64) tp->stats_mapping >> 32));
7459 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7460 ((u64) tp->stats_mapping & 0xffffffff));
7461 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7462 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7463 }
7464
7465 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7466
7467 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7468 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7469 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7470 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7471
7472 /* Clear statistics/status block in chip, and status block in ram. */
7473 for (i = NIC_SRAM_STATS_BLK;
7474 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7475 i += sizeof(u32)) {
7476 tg3_write_mem(tp, i, 0);
7477 udelay(40);
7478 }
7479 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7480
Michael Chanc94e3942005-09-27 12:12:42 -07007481 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7482 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7483 /* reset to prevent losing 1st rx packet intermittently */
7484 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7485 udelay(10);
7486 }
7487
Matt Carlson3bda1252008-08-15 14:08:22 -07007488 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7489 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7490 else
7491 tp->mac_mode = 0;
7492 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
Linus Torvalds1da177e2005-04-16 15:20:36 -07007493 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07007494 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7495 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7496 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7497 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007498 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7499 udelay(40);
7500
Michael Chan314fba32005-04-21 17:07:04 -07007501 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
Michael Chan9d26e212006-12-07 00:21:14 -08007502 * If TG3_FLG2_IS_NIC is zero, we should read the
Michael Chan314fba32005-04-21 17:07:04 -07007503 * register to preserve the GPIO settings for LOMs. The GPIOs,
7504 * whether used as inputs or outputs, are set by boot code after
7505 * reset.
7506 */
Michael Chan9d26e212006-12-07 00:21:14 -08007507 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
Michael Chan314fba32005-04-21 17:07:04 -07007508 u32 gpio_mask;
7509
Michael Chan9d26e212006-12-07 00:21:14 -08007510 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7511 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7512 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chan3e7d83b2005-04-21 17:10:36 -07007513
7514 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7515 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7516 GRC_LCLCTRL_GPIO_OUTPUT3;
7517
Michael Chanaf36e6b2006-03-23 01:28:06 -08007518 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7519 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7520
Gary Zambranoaaf84462007-05-05 11:51:45 -07007521 tp->grc_local_ctrl &= ~gpio_mask;
Michael Chan314fba32005-04-21 17:07:04 -07007522 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7523
7524 /* GPIO1 must be driven high for eeprom write protect */
Michael Chan9d26e212006-12-07 00:21:14 -08007525 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7526 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7527 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan314fba32005-04-21 17:07:04 -07007528 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007529 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7530 udelay(100);
7531
Michael Chan09ee9292005-08-09 20:17:00 -07007532 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
David S. Millerfac9b832005-05-18 22:46:34 -07007533 tp->last_tag = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007534
7535 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7536 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7537 udelay(40);
7538 }
7539
7540 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7541 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7542 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7543 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7544 WDMAC_MODE_LNGREAD_ENAB);
7545
Michael Chan85e94ce2005-04-21 17:05:28 -07007546 /* If statement applies to 5705 and 5750 PCI devices only */
7547 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7548 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7549 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007550 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7551 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7552 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7553 /* nothing */
7554 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7555 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7556 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7557 val |= WDMAC_MODE_RX_ACCEL;
7558 }
7559 }
7560
Michael Chand9ab5ad2006-03-20 22:27:35 -08007561 /* Enable host coalescing bug fix */
Michael Chanaf36e6b2006-03-23 01:28:06 -08007562 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07007563 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07007564 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
Matt Carlson57e69832008-05-25 23:48:31 -07007565 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7566 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
Matt Carlsonf51f3562008-05-25 23:45:08 -07007567 val |= WDMAC_MODE_STATUS_TAG_FIX;
Michael Chand9ab5ad2006-03-20 22:27:35 -08007568
Linus Torvalds1da177e2005-04-16 15:20:36 -07007569 tw32_f(WDMAC_MODE, val);
7570 udelay(40);
7571
Matt Carlson9974a352007-10-07 23:27:28 -07007572 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7573 u16 pcix_cmd;
7574
7575 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7576 &pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007577 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
Matt Carlson9974a352007-10-07 23:27:28 -07007578 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7579 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007580 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
Matt Carlson9974a352007-10-07 23:27:28 -07007581 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7582 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007583 }
Matt Carlson9974a352007-10-07 23:27:28 -07007584 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7585 pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007586 }
7587
7588 tw32_f(RDMAC_MODE, rdmac_mode);
7589 udelay(40);
7590
7591 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7592 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7593 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
Matt Carlson9936bcf2007-10-10 18:03:07 -07007594
7595 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7596 tw32(SNDDATAC_MODE,
7597 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7598 else
7599 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7600
Linus Torvalds1da177e2005-04-16 15:20:36 -07007601 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7602 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7603 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7604 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007605 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7606 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007607 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7608 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7609
7610 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7611 err = tg3_load_5701_a0_firmware_fix(tp);
7612 if (err)
7613 return err;
7614 }
7615
Linus Torvalds1da177e2005-04-16 15:20:36 -07007616 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7617 err = tg3_load_tso_firmware(tp);
7618 if (err)
7619 return err;
7620 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007621
7622 tp->tx_mode = TX_MODE_ENABLE;
7623 tw32_f(MAC_TX_MODE, tp->tx_mode);
7624 udelay(100);
7625
7626 tp->rx_mode = RX_MODE_ENABLE;
Matt Carlson9936bcf2007-10-10 18:03:07 -07007627 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlson57e69832008-05-25 23:48:31 -07007628 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7629 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7630 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chanaf36e6b2006-03-23 01:28:06 -08007631 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7632
Linus Torvalds1da177e2005-04-16 15:20:36 -07007633 tw32_f(MAC_RX_MODE, tp->rx_mode);
7634 udelay(10);
7635
Linus Torvalds1da177e2005-04-16 15:20:36 -07007636 tw32(MAC_LED_CTRL, tp->led_ctrl);
7637
7638 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
Michael Chanc94e3942005-09-27 12:12:42 -07007639 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007640 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7641 udelay(10);
7642 }
7643 tw32_f(MAC_RX_MODE, tp->rx_mode);
7644 udelay(10);
7645
7646 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7647 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7648 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7649 /* Set drive transmission level to 1.2V */
7650 /* only if the signal pre-emphasis bit is not set */
7651 val = tr32(MAC_SERDES_CFG);
7652 val &= 0xfffff000;
7653 val |= 0x880;
7654 tw32(MAC_SERDES_CFG, val);
7655 }
7656 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7657 tw32(MAC_SERDES_CFG, 0x616000);
7658 }
7659
7660 /* Prevent chip from dropping frames when flow control
7661 * is enabled.
7662 */
7663 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7664
7665 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7666 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7667 /* Use hardware link auto-negotiation */
7668 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7669 }
7670
Michael Chand4d2c552006-03-20 17:47:20 -08007671 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7672 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7673 u32 tmp;
7674
7675 tmp = tr32(SERDES_RX_CTRL);
7676 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7677 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7678 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7679 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7680 }
7681
Matt Carlsondd477002008-05-25 23:45:58 -07007682 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7683 if (tp->link_config.phy_is_low_power) {
7684 tp->link_config.phy_is_low_power = 0;
7685 tp->link_config.speed = tp->link_config.orig_speed;
7686 tp->link_config.duplex = tp->link_config.orig_duplex;
7687 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7688 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007689
Matt Carlsondd477002008-05-25 23:45:58 -07007690 err = tg3_setup_phy(tp, 0);
7691 if (err)
7692 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007693
Matt Carlsondd477002008-05-25 23:45:58 -07007694 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7695 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7696 u32 tmp;
7697
7698 /* Clear CRC stats. */
7699 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7700 tg3_writephy(tp, MII_TG3_TEST1,
7701 tmp | MII_TG3_TEST1_CRC_EN);
7702 tg3_readphy(tp, 0x14, &tmp);
7703 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007704 }
7705 }
7706
7707 __tg3_set_rx_mode(tp->dev);
7708
7709 /* Initialize receive rules. */
7710 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7711 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7712 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7713 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7714
Michael Chan4cf78e42005-07-25 12:29:19 -07007715 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
Michael Chana4e2b342005-10-26 15:46:52 -07007716 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007717 limit = 8;
7718 else
7719 limit = 16;
7720 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7721 limit -= 4;
7722 switch (limit) {
7723 case 16:
7724 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7725 case 15:
7726 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7727 case 14:
7728 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7729 case 13:
7730 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7731 case 12:
7732 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7733 case 11:
7734 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7735 case 10:
7736 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7737 case 9:
7738 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7739 case 8:
7740 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7741 case 7:
7742 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7743 case 6:
7744 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7745 case 5:
7746 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7747 case 4:
7748 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7749 case 3:
7750 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7751 case 2:
7752 case 1:
7753
7754 default:
7755 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07007756 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007757
Matt Carlson9ce768e2007-10-11 19:49:11 -07007758 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7759 /* Write our heartbeat update interval to APE. */
7760 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7761 APE_HOST_HEARTBEAT_INT_DISABLE);
Matt Carlson0d3031d2007-10-10 18:02:43 -07007762
Linus Torvalds1da177e2005-04-16 15:20:36 -07007763 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7764
Linus Torvalds1da177e2005-04-16 15:20:36 -07007765 return 0;
7766}
7767
7768/* Called at device open time to get the chip ready for
7769 * packet processing. Invoked with tp->lock held.
7770 */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007771static int tg3_init_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007772{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007773 tg3_switch_clocks(tp);
7774
7775 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7776
Matt Carlson2f751b62008-08-04 23:17:34 -07007777 return tg3_reset_hw(tp, reset_phy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007778}
7779
7780#define TG3_STAT_ADD32(PSTAT, REG) \
7781do { u32 __val = tr32(REG); \
7782 (PSTAT)->low += __val; \
7783 if ((PSTAT)->low < __val) \
7784 (PSTAT)->high += 1; \
7785} while (0)
7786
7787static void tg3_periodic_fetch_stats(struct tg3 *tp)
7788{
7789 struct tg3_hw_stats *sp = tp->hw_stats;
7790
7791 if (!netif_carrier_ok(tp->dev))
7792 return;
7793
7794 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7795 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7796 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7797 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7798 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7799 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7800 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7801 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7802 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7803 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7804 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7805 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7806 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7807
7808 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7809 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7810 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7811 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7812 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7813 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7814 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7815 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7816 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7817 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7818 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7819 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7820 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7821 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
Michael Chan463d3052006-05-22 16:36:27 -07007822
7823 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7824 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7825 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007826}
7827
7828static void tg3_timer(unsigned long __opaque)
7829{
7830 struct tg3 *tp = (struct tg3 *) __opaque;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007831
Michael Chanf475f162006-03-27 23:20:14 -08007832 if (tp->irq_sync)
7833 goto restart_timer;
7834
David S. Millerf47c11e2005-06-24 20:18:35 -07007835 spin_lock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007836
David S. Millerfac9b832005-05-18 22:46:34 -07007837 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7838 /* All of this garbage is because when using non-tagged
7839 * IRQ status the mailbox/status_block protocol the chip
7840 * uses with the cpu is race prone.
7841 */
7842 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7843 tw32(GRC_LOCAL_CTRL,
7844 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7845 } else {
7846 tw32(HOSTCC_MODE, tp->coalesce_mode |
7847 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7848 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007849
David S. Millerfac9b832005-05-18 22:46:34 -07007850 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7851 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
David S. Millerf47c11e2005-06-24 20:18:35 -07007852 spin_unlock(&tp->lock);
David S. Millerfac9b832005-05-18 22:46:34 -07007853 schedule_work(&tp->reset_task);
7854 return;
7855 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007856 }
7857
Linus Torvalds1da177e2005-04-16 15:20:36 -07007858 /* This part only runs once per second. */
7859 if (!--tp->timer_counter) {
David S. Millerfac9b832005-05-18 22:46:34 -07007860 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7861 tg3_periodic_fetch_stats(tp);
7862
Linus Torvalds1da177e2005-04-16 15:20:36 -07007863 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7864 u32 mac_stat;
7865 int phy_event;
7866
7867 mac_stat = tr32(MAC_STATUS);
7868
7869 phy_event = 0;
7870 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7871 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7872 phy_event = 1;
7873 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7874 phy_event = 1;
7875
7876 if (phy_event)
7877 tg3_setup_phy(tp, 0);
7878 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7879 u32 mac_stat = tr32(MAC_STATUS);
7880 int need_setup = 0;
7881
7882 if (netif_carrier_ok(tp->dev) &&
7883 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7884 need_setup = 1;
7885 }
7886 if (! netif_carrier_ok(tp->dev) &&
7887 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7888 MAC_STATUS_SIGNAL_DET))) {
7889 need_setup = 1;
7890 }
7891 if (need_setup) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07007892 if (!tp->serdes_counter) {
7893 tw32_f(MAC_MODE,
7894 (tp->mac_mode &
7895 ~MAC_MODE_PORT_MODE_MASK));
7896 udelay(40);
7897 tw32_f(MAC_MODE, tp->mac_mode);
7898 udelay(40);
7899 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007900 tg3_setup_phy(tp, 0);
7901 }
Michael Chan747e8f82005-07-25 12:33:22 -07007902 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7903 tg3_serdes_parallel_detect(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007904
7905 tp->timer_counter = tp->timer_multiplier;
7906 }
7907
Michael Chan130b8e42006-09-27 16:00:40 -07007908 /* Heartbeat is only sent once every 2 seconds.
7909 *
7910 * The heartbeat is to tell the ASF firmware that the host
7911 * driver is still alive. In the event that the OS crashes,
7912 * ASF needs to reset the hardware to free up the FIFO space
7913 * that may be filled with rx packets destined for the host.
7914 * If the FIFO is full, ASF will no longer function properly.
7915 *
7916 * Unintended resets have been reported on real time kernels
7917 * where the timer doesn't run on time. Netpoll will also have
7918 * same problem.
7919 *
7920 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7921 * to check the ring condition when the heartbeat is expiring
7922 * before doing the reset. This will prevent most unintended
7923 * resets.
7924 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007925 if (!--tp->asf_counter) {
Matt Carlsonbc7959b2008-08-15 14:08:55 -07007926 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7927 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Matt Carlson7c5026a2008-05-02 16:49:29 -07007928 tg3_wait_for_event_ack(tp);
7929
Michael Chanbbadf502006-04-06 21:46:34 -07007930 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
Michael Chan130b8e42006-09-27 16:00:40 -07007931 FWCMD_NICDRV_ALIVE3);
Michael Chanbbadf502006-04-06 21:46:34 -07007932 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
Michael Chan28fbef72005-10-26 15:48:35 -07007933 /* 5 seconds timeout */
Michael Chanbbadf502006-04-06 21:46:34 -07007934 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
Matt Carlson4ba526c2008-08-15 14:10:04 -07007935
7936 tg3_generate_fw_event(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007937 }
7938 tp->asf_counter = tp->asf_multiplier;
7939 }
7940
David S. Millerf47c11e2005-06-24 20:18:35 -07007941 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007942
Michael Chanf475f162006-03-27 23:20:14 -08007943restart_timer:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007944 tp->timer.expires = jiffies + tp->timer_offset;
7945 add_timer(&tp->timer);
7946}
7947
Adrian Bunk81789ef2006-03-20 23:00:14 -08007948static int tg3_request_irq(struct tg3 *tp)
Michael Chanfcfa0a32006-03-20 22:28:41 -08007949{
David Howells7d12e782006-10-05 14:55:46 +01007950 irq_handler_t fn;
Michael Chanfcfa0a32006-03-20 22:28:41 -08007951 unsigned long flags;
7952 struct net_device *dev = tp->dev;
7953
7954 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7955 fn = tg3_msi;
7956 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7957 fn = tg3_msi_1shot;
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07007958 flags = IRQF_SAMPLE_RANDOM;
Michael Chanfcfa0a32006-03-20 22:28:41 -08007959 } else {
7960 fn = tg3_interrupt;
7961 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7962 fn = tg3_interrupt_tagged;
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07007963 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
Michael Chanfcfa0a32006-03-20 22:28:41 -08007964 }
7965 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7966}
7967
Michael Chan79381092005-04-21 17:13:59 -07007968static int tg3_test_interrupt(struct tg3 *tp)
7969{
7970 struct net_device *dev = tp->dev;
Michael Chanb16250e2006-09-27 16:10:14 -07007971 int err, i, intr_ok = 0;
Michael Chan79381092005-04-21 17:13:59 -07007972
Michael Chand4bc3922005-05-29 14:59:20 -07007973 if (!netif_running(dev))
7974 return -ENODEV;
7975
Michael Chan79381092005-04-21 17:13:59 -07007976 tg3_disable_ints(tp);
7977
7978 free_irq(tp->pdev->irq, dev);
7979
7980 err = request_irq(tp->pdev->irq, tg3_test_isr,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07007981 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
Michael Chan79381092005-04-21 17:13:59 -07007982 if (err)
7983 return err;
7984
Michael Chan38f38432005-09-05 17:53:32 -07007985 tp->hw_status->status &= ~SD_STATUS_UPDATED;
Michael Chan79381092005-04-21 17:13:59 -07007986 tg3_enable_ints(tp);
7987
7988 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7989 HOSTCC_MODE_NOW);
7990
7991 for (i = 0; i < 5; i++) {
Michael Chanb16250e2006-09-27 16:10:14 -07007992 u32 int_mbox, misc_host_ctrl;
7993
Michael Chan09ee9292005-08-09 20:17:00 -07007994 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7995 TG3_64BIT_REG_LOW);
Michael Chanb16250e2006-09-27 16:10:14 -07007996 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7997
7998 if ((int_mbox != 0) ||
7999 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8000 intr_ok = 1;
Michael Chan79381092005-04-21 17:13:59 -07008001 break;
Michael Chanb16250e2006-09-27 16:10:14 -07008002 }
8003
Michael Chan79381092005-04-21 17:13:59 -07008004 msleep(10);
8005 }
8006
8007 tg3_disable_ints(tp);
8008
8009 free_irq(tp->pdev->irq, dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008010
Michael Chanfcfa0a32006-03-20 22:28:41 -08008011 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07008012
8013 if (err)
8014 return err;
8015
Michael Chanb16250e2006-09-27 16:10:14 -07008016 if (intr_ok)
Michael Chan79381092005-04-21 17:13:59 -07008017 return 0;
8018
8019 return -EIO;
8020}
8021
8022/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8023 * successfully restored
8024 */
8025static int tg3_test_msi(struct tg3 *tp)
8026{
8027 struct net_device *dev = tp->dev;
8028 int err;
8029 u16 pci_cmd;
8030
8031 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8032 return 0;
8033
8034 /* Turn off SERR reporting in case MSI terminates with Master
8035 * Abort.
8036 */
8037 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8038 pci_write_config_word(tp->pdev, PCI_COMMAND,
8039 pci_cmd & ~PCI_COMMAND_SERR);
8040
8041 err = tg3_test_interrupt(tp);
8042
8043 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8044
8045 if (!err)
8046 return 0;
8047
8048 /* other failures */
8049 if (err != -EIO)
8050 return err;
8051
8052 /* MSI test failed, go back to INTx mode */
8053 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
8054 "switching to INTx mode. Please report this failure to "
8055 "the PCI maintainer and include system chipset information.\n",
8056 tp->dev->name);
8057
8058 free_irq(tp->pdev->irq, dev);
8059 pci_disable_msi(tp->pdev);
8060
8061 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8062
Michael Chanfcfa0a32006-03-20 22:28:41 -08008063 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07008064 if (err)
8065 return err;
8066
8067 /* Need to reset the chip because the MSI cycle may have terminated
8068 * with Master Abort.
8069 */
David S. Millerf47c11e2005-06-24 20:18:35 -07008070 tg3_full_lock(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07008071
Michael Chan944d9802005-05-29 14:57:48 -07008072 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07008073 err = tg3_init_hw(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07008074
David S. Millerf47c11e2005-06-24 20:18:35 -07008075 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07008076
8077 if (err)
8078 free_irq(tp->pdev->irq, dev);
8079
8080 return err;
8081}
8082
Linus Torvalds1da177e2005-04-16 15:20:36 -07008083static int tg3_open(struct net_device *dev)
8084{
8085 struct tg3 *tp = netdev_priv(dev);
8086 int err;
8087
Michael Chanc49a1562006-12-17 17:07:29 -08008088 netif_carrier_off(tp->dev);
8089
Michael Chanbc1c7562006-03-20 17:48:03 -08008090 err = tg3_set_power_state(tp, PCI_D0);
Matt Carlson2f751b62008-08-04 23:17:34 -07008091 if (err)
Michael Chanbc1c7562006-03-20 17:48:03 -08008092 return err;
Matt Carlson2f751b62008-08-04 23:17:34 -07008093
8094 tg3_full_lock(tp, 0);
Michael Chanbc1c7562006-03-20 17:48:03 -08008095
Linus Torvalds1da177e2005-04-16 15:20:36 -07008096 tg3_disable_ints(tp);
8097 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8098
David S. Millerf47c11e2005-06-24 20:18:35 -07008099 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008100
8101 /* The placement of this call is tied
8102 * to the setup and use of Host TX descriptors.
8103 */
8104 err = tg3_alloc_consistent(tp);
8105 if (err)
8106 return err;
8107
Michael Chan7544b092007-05-05 13:08:32 -07008108 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
David S. Millerfac9b832005-05-18 22:46:34 -07008109 /* All MSI supporting chips should support tagged
8110 * status. Assert that this is the case.
8111 */
8112 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8113 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8114 "Not using MSI.\n", tp->dev->name);
8115 } else if (pci_enable_msi(tp->pdev) == 0) {
Michael Chan88b06bc2005-04-21 17:13:25 -07008116 u32 msi_mode;
8117
8118 msi_mode = tr32(MSGINT_MODE);
8119 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8120 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8121 }
8122 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08008123 err = tg3_request_irq(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008124
8125 if (err) {
Michael Chan88b06bc2005-04-21 17:13:25 -07008126 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8127 pci_disable_msi(tp->pdev);
8128 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8129 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008130 tg3_free_consistent(tp);
8131 return err;
8132 }
8133
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008134 napi_enable(&tp->napi);
8135
David S. Millerf47c11e2005-06-24 20:18:35 -07008136 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008137
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07008138 err = tg3_init_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008139 if (err) {
Michael Chan944d9802005-05-29 14:57:48 -07008140 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008141 tg3_free_rings(tp);
8142 } else {
David S. Millerfac9b832005-05-18 22:46:34 -07008143 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8144 tp->timer_offset = HZ;
8145 else
8146 tp->timer_offset = HZ / 10;
8147
8148 BUG_ON(tp->timer_offset > HZ);
8149 tp->timer_counter = tp->timer_multiplier =
8150 (HZ / tp->timer_offset);
8151 tp->asf_counter = tp->asf_multiplier =
Michael Chan28fbef72005-10-26 15:48:35 -07008152 ((HZ / tp->timer_offset) * 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008153
8154 init_timer(&tp->timer);
8155 tp->timer.expires = jiffies + tp->timer_offset;
8156 tp->timer.data = (unsigned long) tp;
8157 tp->timer.function = tg3_timer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008158 }
8159
David S. Millerf47c11e2005-06-24 20:18:35 -07008160 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008161
8162 if (err) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008163 napi_disable(&tp->napi);
Michael Chan88b06bc2005-04-21 17:13:25 -07008164 free_irq(tp->pdev->irq, dev);
8165 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8166 pci_disable_msi(tp->pdev);
8167 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8168 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008169 tg3_free_consistent(tp);
8170 return err;
8171 }
8172
Michael Chan79381092005-04-21 17:13:59 -07008173 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8174 err = tg3_test_msi(tp);
David S. Millerfac9b832005-05-18 22:46:34 -07008175
Michael Chan79381092005-04-21 17:13:59 -07008176 if (err) {
David S. Millerf47c11e2005-06-24 20:18:35 -07008177 tg3_full_lock(tp, 0);
Michael Chan79381092005-04-21 17:13:59 -07008178
8179 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8180 pci_disable_msi(tp->pdev);
8181 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8182 }
Michael Chan944d9802005-05-29 14:57:48 -07008183 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan79381092005-04-21 17:13:59 -07008184 tg3_free_rings(tp);
8185 tg3_free_consistent(tp);
8186
David S. Millerf47c11e2005-06-24 20:18:35 -07008187 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07008188
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008189 napi_disable(&tp->napi);
8190
Michael Chan79381092005-04-21 17:13:59 -07008191 return err;
8192 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08008193
8194 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8195 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
Michael Chanb5d37722006-09-27 16:06:21 -07008196 u32 val = tr32(PCIE_TRANSACTION_CFG);
Michael Chanfcfa0a32006-03-20 22:28:41 -08008197
Michael Chanb5d37722006-09-27 16:06:21 -07008198 tw32(PCIE_TRANSACTION_CFG,
8199 val | PCIE_TRANS_CFG_1SHOT_MSI);
Michael Chanfcfa0a32006-03-20 22:28:41 -08008200 }
8201 }
Michael Chan79381092005-04-21 17:13:59 -07008202 }
8203
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07008204 tg3_phy_start(tp);
8205
David S. Millerf47c11e2005-06-24 20:18:35 -07008206 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008207
Michael Chan79381092005-04-21 17:13:59 -07008208 add_timer(&tp->timer);
8209 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008210 tg3_enable_ints(tp);
8211
David S. Millerf47c11e2005-06-24 20:18:35 -07008212 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008213
8214 netif_start_queue(dev);
8215
8216 return 0;
8217}
8218
8219#if 0
8220/*static*/ void tg3_dump_state(struct tg3 *tp)
8221{
8222 u32 val32, val32_2, val32_3, val32_4, val32_5;
8223 u16 val16;
8224 int i;
8225
8226 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8227 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8228 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8229 val16, val32);
8230
8231 /* MAC block */
8232 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8233 tr32(MAC_MODE), tr32(MAC_STATUS));
8234 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8235 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8236 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8237 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8238 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8239 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8240
8241 /* Send data initiator control block */
8242 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8243 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8244 printk(" SNDDATAI_STATSCTRL[%08x]\n",
8245 tr32(SNDDATAI_STATSCTRL));
8246
8247 /* Send data completion control block */
8248 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8249
8250 /* Send BD ring selector block */
8251 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8252 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8253
8254 /* Send BD initiator control block */
8255 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8256 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8257
8258 /* Send BD completion control block */
8259 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8260
8261 /* Receive list placement control block */
8262 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8263 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8264 printk(" RCVLPC_STATSCTRL[%08x]\n",
8265 tr32(RCVLPC_STATSCTRL));
8266
8267 /* Receive data and receive BD initiator control block */
8268 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8269 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8270
8271 /* Receive data completion control block */
8272 printk("DEBUG: RCVDCC_MODE[%08x]\n",
8273 tr32(RCVDCC_MODE));
8274
8275 /* Receive BD initiator control block */
8276 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8277 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8278
8279 /* Receive BD completion control block */
8280 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8281 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8282
8283 /* Receive list selector control block */
8284 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8285 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8286
8287 /* Mbuf cluster free block */
8288 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8289 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8290
8291 /* Host coalescing control block */
8292 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8293 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8294 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8295 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8296 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8297 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8298 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8299 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8300 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8301 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8302 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8303 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8304
8305 /* Memory arbiter control block */
8306 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8307 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8308
8309 /* Buffer manager control block */
8310 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8311 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8312 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8313 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8314 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8315 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8316 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8317 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8318
8319 /* Read DMA control block */
8320 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8321 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8322
8323 /* Write DMA control block */
8324 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8325 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8326
8327 /* DMA completion block */
8328 printk("DEBUG: DMAC_MODE[%08x]\n",
8329 tr32(DMAC_MODE));
8330
8331 /* GRC block */
8332 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8333 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8334 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8335 tr32(GRC_LOCAL_CTRL));
8336
8337 /* TG3_BDINFOs */
8338 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8339 tr32(RCVDBDI_JUMBO_BD + 0x0),
8340 tr32(RCVDBDI_JUMBO_BD + 0x4),
8341 tr32(RCVDBDI_JUMBO_BD + 0x8),
8342 tr32(RCVDBDI_JUMBO_BD + 0xc));
8343 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8344 tr32(RCVDBDI_STD_BD + 0x0),
8345 tr32(RCVDBDI_STD_BD + 0x4),
8346 tr32(RCVDBDI_STD_BD + 0x8),
8347 tr32(RCVDBDI_STD_BD + 0xc));
8348 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8349 tr32(RCVDBDI_MINI_BD + 0x0),
8350 tr32(RCVDBDI_MINI_BD + 0x4),
8351 tr32(RCVDBDI_MINI_BD + 0x8),
8352 tr32(RCVDBDI_MINI_BD + 0xc));
8353
8354 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8355 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8356 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8357 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8358 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8359 val32, val32_2, val32_3, val32_4);
8360
8361 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8362 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8363 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8364 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8365 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8366 val32, val32_2, val32_3, val32_4);
8367
8368 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8369 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8370 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8371 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8372 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8373 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8374 val32, val32_2, val32_3, val32_4, val32_5);
8375
8376 /* SW status block */
8377 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8378 tp->hw_status->status,
8379 tp->hw_status->status_tag,
8380 tp->hw_status->rx_jumbo_consumer,
8381 tp->hw_status->rx_consumer,
8382 tp->hw_status->rx_mini_consumer,
8383 tp->hw_status->idx[0].rx_producer,
8384 tp->hw_status->idx[0].tx_consumer);
8385
8386 /* SW statistics block */
8387 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8388 ((u32 *)tp->hw_stats)[0],
8389 ((u32 *)tp->hw_stats)[1],
8390 ((u32 *)tp->hw_stats)[2],
8391 ((u32 *)tp->hw_stats)[3]);
8392
8393 /* Mailboxes */
8394 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
Michael Chan09ee9292005-08-09 20:17:00 -07008395 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8396 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8397 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8398 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008399
8400 /* NIC side send descriptors. */
8401 for (i = 0; i < 6; i++) {
8402 unsigned long txd;
8403
8404 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8405 + (i * sizeof(struct tg3_tx_buffer_desc));
8406 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8407 i,
8408 readl(txd + 0x0), readl(txd + 0x4),
8409 readl(txd + 0x8), readl(txd + 0xc));
8410 }
8411
8412 /* NIC side RX descriptors. */
8413 for (i = 0; i < 6; i++) {
8414 unsigned long rxd;
8415
8416 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8417 + (i * sizeof(struct tg3_rx_buffer_desc));
8418 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8419 i,
8420 readl(rxd + 0x0), readl(rxd + 0x4),
8421 readl(rxd + 0x8), readl(rxd + 0xc));
8422 rxd += (4 * sizeof(u32));
8423 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8424 i,
8425 readl(rxd + 0x0), readl(rxd + 0x4),
8426 readl(rxd + 0x8), readl(rxd + 0xc));
8427 }
8428
8429 for (i = 0; i < 6; i++) {
8430 unsigned long rxd;
8431
8432 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8433 + (i * sizeof(struct tg3_rx_buffer_desc));
8434 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8435 i,
8436 readl(rxd + 0x0), readl(rxd + 0x4),
8437 readl(rxd + 0x8), readl(rxd + 0xc));
8438 rxd += (4 * sizeof(u32));
8439 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8440 i,
8441 readl(rxd + 0x0), readl(rxd + 0x4),
8442 readl(rxd + 0x8), readl(rxd + 0xc));
8443 }
8444}
8445#endif
8446
8447static struct net_device_stats *tg3_get_stats(struct net_device *);
8448static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8449
8450static int tg3_close(struct net_device *dev)
8451{
8452 struct tg3 *tp = netdev_priv(dev);
8453
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008454 napi_disable(&tp->napi);
Oleg Nesterov28e53bd2007-05-09 02:34:22 -07008455 cancel_work_sync(&tp->reset_task);
Michael Chan7faa0062006-02-02 17:29:28 -08008456
Linus Torvalds1da177e2005-04-16 15:20:36 -07008457 netif_stop_queue(dev);
8458
8459 del_timer_sync(&tp->timer);
8460
David S. Millerf47c11e2005-06-24 20:18:35 -07008461 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008462#if 0
8463 tg3_dump_state(tp);
8464#endif
8465
8466 tg3_disable_ints(tp);
8467
Michael Chan944d9802005-05-29 14:57:48 -07008468 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008469 tg3_free_rings(tp);
Michael Chan5cf64b82007-05-05 12:11:21 -07008470 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008471
David S. Millerf47c11e2005-06-24 20:18:35 -07008472 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008473
Michael Chan88b06bc2005-04-21 17:13:25 -07008474 free_irq(tp->pdev->irq, dev);
8475 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8476 pci_disable_msi(tp->pdev);
8477 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8478 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008479
8480 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8481 sizeof(tp->net_stats_prev));
8482 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8483 sizeof(tp->estats_prev));
8484
8485 tg3_free_consistent(tp);
8486
Michael Chanbc1c7562006-03-20 17:48:03 -08008487 tg3_set_power_state(tp, PCI_D3hot);
8488
8489 netif_carrier_off(tp->dev);
8490
Linus Torvalds1da177e2005-04-16 15:20:36 -07008491 return 0;
8492}
8493
8494static inline unsigned long get_stat64(tg3_stat64_t *val)
8495{
8496 unsigned long ret;
8497
8498#if (BITS_PER_LONG == 32)
8499 ret = val->low;
8500#else
8501 ret = ((u64)val->high << 32) | ((u64)val->low);
8502#endif
8503 return ret;
8504}
8505
Stefan Buehler816f8b82008-08-15 14:10:54 -07008506static inline u64 get_estat64(tg3_stat64_t *val)
8507{
8508 return ((u64)val->high << 32) | ((u64)val->low);
8509}
8510
Linus Torvalds1da177e2005-04-16 15:20:36 -07008511static unsigned long calc_crc_errors(struct tg3 *tp)
8512{
8513 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8514
8515 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8516 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8517 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008518 u32 val;
8519
David S. Millerf47c11e2005-06-24 20:18:35 -07008520 spin_lock_bh(&tp->lock);
Michael Chan569a5df2007-02-13 12:18:15 -08008521 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8522 tg3_writephy(tp, MII_TG3_TEST1,
8523 val | MII_TG3_TEST1_CRC_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008524 tg3_readphy(tp, 0x14, &val);
8525 } else
8526 val = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07008527 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008528
8529 tp->phy_crc_errors += val;
8530
8531 return tp->phy_crc_errors;
8532 }
8533
8534 return get_stat64(&hw_stats->rx_fcs_errors);
8535}
8536
8537#define ESTAT_ADD(member) \
8538 estats->member = old_estats->member + \
Stefan Buehler816f8b82008-08-15 14:10:54 -07008539 get_estat64(&hw_stats->member)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008540
8541static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8542{
8543 struct tg3_ethtool_stats *estats = &tp->estats;
8544 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8545 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8546
8547 if (!hw_stats)
8548 return old_estats;
8549
8550 ESTAT_ADD(rx_octets);
8551 ESTAT_ADD(rx_fragments);
8552 ESTAT_ADD(rx_ucast_packets);
8553 ESTAT_ADD(rx_mcast_packets);
8554 ESTAT_ADD(rx_bcast_packets);
8555 ESTAT_ADD(rx_fcs_errors);
8556 ESTAT_ADD(rx_align_errors);
8557 ESTAT_ADD(rx_xon_pause_rcvd);
8558 ESTAT_ADD(rx_xoff_pause_rcvd);
8559 ESTAT_ADD(rx_mac_ctrl_rcvd);
8560 ESTAT_ADD(rx_xoff_entered);
8561 ESTAT_ADD(rx_frame_too_long_errors);
8562 ESTAT_ADD(rx_jabbers);
8563 ESTAT_ADD(rx_undersize_packets);
8564 ESTAT_ADD(rx_in_length_errors);
8565 ESTAT_ADD(rx_out_length_errors);
8566 ESTAT_ADD(rx_64_or_less_octet_packets);
8567 ESTAT_ADD(rx_65_to_127_octet_packets);
8568 ESTAT_ADD(rx_128_to_255_octet_packets);
8569 ESTAT_ADD(rx_256_to_511_octet_packets);
8570 ESTAT_ADD(rx_512_to_1023_octet_packets);
8571 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8572 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8573 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8574 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8575 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8576
8577 ESTAT_ADD(tx_octets);
8578 ESTAT_ADD(tx_collisions);
8579 ESTAT_ADD(tx_xon_sent);
8580 ESTAT_ADD(tx_xoff_sent);
8581 ESTAT_ADD(tx_flow_control);
8582 ESTAT_ADD(tx_mac_errors);
8583 ESTAT_ADD(tx_single_collisions);
8584 ESTAT_ADD(tx_mult_collisions);
8585 ESTAT_ADD(tx_deferred);
8586 ESTAT_ADD(tx_excessive_collisions);
8587 ESTAT_ADD(tx_late_collisions);
8588 ESTAT_ADD(tx_collide_2times);
8589 ESTAT_ADD(tx_collide_3times);
8590 ESTAT_ADD(tx_collide_4times);
8591 ESTAT_ADD(tx_collide_5times);
8592 ESTAT_ADD(tx_collide_6times);
8593 ESTAT_ADD(tx_collide_7times);
8594 ESTAT_ADD(tx_collide_8times);
8595 ESTAT_ADD(tx_collide_9times);
8596 ESTAT_ADD(tx_collide_10times);
8597 ESTAT_ADD(tx_collide_11times);
8598 ESTAT_ADD(tx_collide_12times);
8599 ESTAT_ADD(tx_collide_13times);
8600 ESTAT_ADD(tx_collide_14times);
8601 ESTAT_ADD(tx_collide_15times);
8602 ESTAT_ADD(tx_ucast_packets);
8603 ESTAT_ADD(tx_mcast_packets);
8604 ESTAT_ADD(tx_bcast_packets);
8605 ESTAT_ADD(tx_carrier_sense_errors);
8606 ESTAT_ADD(tx_discards);
8607 ESTAT_ADD(tx_errors);
8608
8609 ESTAT_ADD(dma_writeq_full);
8610 ESTAT_ADD(dma_write_prioq_full);
8611 ESTAT_ADD(rxbds_empty);
8612 ESTAT_ADD(rx_discards);
8613 ESTAT_ADD(rx_errors);
8614 ESTAT_ADD(rx_threshold_hit);
8615
8616 ESTAT_ADD(dma_readq_full);
8617 ESTAT_ADD(dma_read_prioq_full);
8618 ESTAT_ADD(tx_comp_queue_full);
8619
8620 ESTAT_ADD(ring_set_send_prod_index);
8621 ESTAT_ADD(ring_status_update);
8622 ESTAT_ADD(nic_irqs);
8623 ESTAT_ADD(nic_avoided_irqs);
8624 ESTAT_ADD(nic_tx_threshold_hit);
8625
8626 return estats;
8627}
8628
8629static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8630{
8631 struct tg3 *tp = netdev_priv(dev);
8632 struct net_device_stats *stats = &tp->net_stats;
8633 struct net_device_stats *old_stats = &tp->net_stats_prev;
8634 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8635
8636 if (!hw_stats)
8637 return old_stats;
8638
8639 stats->rx_packets = old_stats->rx_packets +
8640 get_stat64(&hw_stats->rx_ucast_packets) +
8641 get_stat64(&hw_stats->rx_mcast_packets) +
8642 get_stat64(&hw_stats->rx_bcast_packets);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008643
Linus Torvalds1da177e2005-04-16 15:20:36 -07008644 stats->tx_packets = old_stats->tx_packets +
8645 get_stat64(&hw_stats->tx_ucast_packets) +
8646 get_stat64(&hw_stats->tx_mcast_packets) +
8647 get_stat64(&hw_stats->tx_bcast_packets);
8648
8649 stats->rx_bytes = old_stats->rx_bytes +
8650 get_stat64(&hw_stats->rx_octets);
8651 stats->tx_bytes = old_stats->tx_bytes +
8652 get_stat64(&hw_stats->tx_octets);
8653
8654 stats->rx_errors = old_stats->rx_errors +
John W. Linville4f63b872005-09-12 14:43:18 -07008655 get_stat64(&hw_stats->rx_errors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008656 stats->tx_errors = old_stats->tx_errors +
8657 get_stat64(&hw_stats->tx_errors) +
8658 get_stat64(&hw_stats->tx_mac_errors) +
8659 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8660 get_stat64(&hw_stats->tx_discards);
8661
8662 stats->multicast = old_stats->multicast +
8663 get_stat64(&hw_stats->rx_mcast_packets);
8664 stats->collisions = old_stats->collisions +
8665 get_stat64(&hw_stats->tx_collisions);
8666
8667 stats->rx_length_errors = old_stats->rx_length_errors +
8668 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8669 get_stat64(&hw_stats->rx_undersize_packets);
8670
8671 stats->rx_over_errors = old_stats->rx_over_errors +
8672 get_stat64(&hw_stats->rxbds_empty);
8673 stats->rx_frame_errors = old_stats->rx_frame_errors +
8674 get_stat64(&hw_stats->rx_align_errors);
8675 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8676 get_stat64(&hw_stats->tx_discards);
8677 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8678 get_stat64(&hw_stats->tx_carrier_sense_errors);
8679
8680 stats->rx_crc_errors = old_stats->rx_crc_errors +
8681 calc_crc_errors(tp);
8682
John W. Linville4f63b872005-09-12 14:43:18 -07008683 stats->rx_missed_errors = old_stats->rx_missed_errors +
8684 get_stat64(&hw_stats->rx_discards);
8685
Linus Torvalds1da177e2005-04-16 15:20:36 -07008686 return stats;
8687}
8688
8689static inline u32 calc_crc(unsigned char *buf, int len)
8690{
8691 u32 reg;
8692 u32 tmp;
8693 int j, k;
8694
8695 reg = 0xffffffff;
8696
8697 for (j = 0; j < len; j++) {
8698 reg ^= buf[j];
8699
8700 for (k = 0; k < 8; k++) {
8701 tmp = reg & 0x01;
8702
8703 reg >>= 1;
8704
8705 if (tmp) {
8706 reg ^= 0xedb88320;
8707 }
8708 }
8709 }
8710
8711 return ~reg;
8712}
8713
8714static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8715{
8716 /* accept or reject all multicast frames */
8717 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8718 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8719 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8720 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8721}
8722
8723static void __tg3_set_rx_mode(struct net_device *dev)
8724{
8725 struct tg3 *tp = netdev_priv(dev);
8726 u32 rx_mode;
8727
8728 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8729 RX_MODE_KEEP_VLAN_TAG);
8730
8731 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8732 * flag clear.
8733 */
8734#if TG3_VLAN_TAG_USED
8735 if (!tp->vlgrp &&
8736 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8737 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8738#else
8739 /* By definition, VLAN is disabled always in this
8740 * case.
8741 */
8742 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8743 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8744#endif
8745
8746 if (dev->flags & IFF_PROMISC) {
8747 /* Promiscuous mode. */
8748 rx_mode |= RX_MODE_PROMISC;
8749 } else if (dev->flags & IFF_ALLMULTI) {
8750 /* Accept all multicast. */
8751 tg3_set_multi (tp, 1);
8752 } else if (dev->mc_count < 1) {
8753 /* Reject all multicast. */
8754 tg3_set_multi (tp, 0);
8755 } else {
8756 /* Accept one or more multicast(s). */
8757 struct dev_mc_list *mclist;
8758 unsigned int i;
8759 u32 mc_filter[4] = { 0, };
8760 u32 regidx;
8761 u32 bit;
8762 u32 crc;
8763
8764 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8765 i++, mclist = mclist->next) {
8766
8767 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8768 bit = ~crc & 0x7f;
8769 regidx = (bit & 0x60) >> 5;
8770 bit &= 0x1f;
8771 mc_filter[regidx] |= (1 << bit);
8772 }
8773
8774 tw32(MAC_HASH_REG_0, mc_filter[0]);
8775 tw32(MAC_HASH_REG_1, mc_filter[1]);
8776 tw32(MAC_HASH_REG_2, mc_filter[2]);
8777 tw32(MAC_HASH_REG_3, mc_filter[3]);
8778 }
8779
8780 if (rx_mode != tp->rx_mode) {
8781 tp->rx_mode = rx_mode;
8782 tw32_f(MAC_RX_MODE, rx_mode);
8783 udelay(10);
8784 }
8785}
8786
8787static void tg3_set_rx_mode(struct net_device *dev)
8788{
8789 struct tg3 *tp = netdev_priv(dev);
8790
Michael Chane75f7c92006-03-20 21:33:26 -08008791 if (!netif_running(dev))
8792 return;
8793
David S. Millerf47c11e2005-06-24 20:18:35 -07008794 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008795 __tg3_set_rx_mode(dev);
David S. Millerf47c11e2005-06-24 20:18:35 -07008796 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008797}
8798
8799#define TG3_REGDUMP_LEN (32 * 1024)
8800
8801static int tg3_get_regs_len(struct net_device *dev)
8802{
8803 return TG3_REGDUMP_LEN;
8804}
8805
8806static void tg3_get_regs(struct net_device *dev,
8807 struct ethtool_regs *regs, void *_p)
8808{
8809 u32 *p = _p;
8810 struct tg3 *tp = netdev_priv(dev);
8811 u8 *orig_p = _p;
8812 int i;
8813
8814 regs->version = 0;
8815
8816 memset(p, 0, TG3_REGDUMP_LEN);
8817
Michael Chanbc1c7562006-03-20 17:48:03 -08008818 if (tp->link_config.phy_is_low_power)
8819 return;
8820
David S. Millerf47c11e2005-06-24 20:18:35 -07008821 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008822
8823#define __GET_REG32(reg) (*(p)++ = tr32(reg))
8824#define GET_REG32_LOOP(base,len) \
8825do { p = (u32 *)(orig_p + (base)); \
8826 for (i = 0; i < len; i += 4) \
8827 __GET_REG32((base) + i); \
8828} while (0)
8829#define GET_REG32_1(reg) \
8830do { p = (u32 *)(orig_p + (reg)); \
8831 __GET_REG32((reg)); \
8832} while (0)
8833
8834 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8835 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8836 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8837 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8838 GET_REG32_1(SNDDATAC_MODE);
8839 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8840 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8841 GET_REG32_1(SNDBDC_MODE);
8842 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8843 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8844 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8845 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8846 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8847 GET_REG32_1(RCVDCC_MODE);
8848 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8849 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8850 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8851 GET_REG32_1(MBFREE_MODE);
8852 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8853 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8854 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8855 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8856 GET_REG32_LOOP(WDMAC_MODE, 0x08);
Chris Elmquist091465d2005-12-20 13:25:19 -08008857 GET_REG32_1(RX_CPU_MODE);
8858 GET_REG32_1(RX_CPU_STATE);
8859 GET_REG32_1(RX_CPU_PGMCTR);
8860 GET_REG32_1(RX_CPU_HWBKPT);
8861 GET_REG32_1(TX_CPU_MODE);
8862 GET_REG32_1(TX_CPU_STATE);
8863 GET_REG32_1(TX_CPU_PGMCTR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008864 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8865 GET_REG32_LOOP(FTQ_RESET, 0x120);
8866 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8867 GET_REG32_1(DMAC_MODE);
8868 GET_REG32_LOOP(GRC_MODE, 0x4c);
8869 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8870 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8871
8872#undef __GET_REG32
8873#undef GET_REG32_LOOP
8874#undef GET_REG32_1
8875
David S. Millerf47c11e2005-06-24 20:18:35 -07008876 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008877}
8878
8879static int tg3_get_eeprom_len(struct net_device *dev)
8880{
8881 struct tg3 *tp = netdev_priv(dev);
8882
8883 return tp->nvram_size;
8884}
8885
8886static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
Al Virob9fc7dc2007-12-17 22:59:57 -08008887static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
Michael Chan18201802006-03-20 22:29:15 -08008888static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008889
8890static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8891{
8892 struct tg3 *tp = netdev_priv(dev);
8893 int ret;
8894 u8 *pd;
Al Virob9fc7dc2007-12-17 22:59:57 -08008895 u32 i, offset, len, b_offset, b_count;
8896 __le32 val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008897
Michael Chanbc1c7562006-03-20 17:48:03 -08008898 if (tp->link_config.phy_is_low_power)
8899 return -EAGAIN;
8900
Linus Torvalds1da177e2005-04-16 15:20:36 -07008901 offset = eeprom->offset;
8902 len = eeprom->len;
8903 eeprom->len = 0;
8904
8905 eeprom->magic = TG3_EEPROM_MAGIC;
8906
8907 if (offset & 3) {
8908 /* adjustments to start on required 4 byte boundary */
8909 b_offset = offset & 3;
8910 b_count = 4 - b_offset;
8911 if (b_count > len) {
8912 /* i.e. offset=1 len=2 */
8913 b_count = len;
8914 }
Al Virob9fc7dc2007-12-17 22:59:57 -08008915 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008916 if (ret)
8917 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008918 memcpy(data, ((char*)&val) + b_offset, b_count);
8919 len -= b_count;
8920 offset += b_count;
8921 eeprom->len += b_count;
8922 }
8923
8924 /* read bytes upto the last 4 byte boundary */
8925 pd = &data[eeprom->len];
8926 for (i = 0; i < (len - (len & 3)); i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -08008927 ret = tg3_nvram_read_le(tp, offset + i, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008928 if (ret) {
8929 eeprom->len += i;
8930 return ret;
8931 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008932 memcpy(pd + i, &val, 4);
8933 }
8934 eeprom->len += i;
8935
8936 if (len & 3) {
8937 /* read last bytes not ending on 4 byte boundary */
8938 pd = &data[eeprom->len];
8939 b_count = len & 3;
8940 b_offset = offset + len - b_count;
Al Virob9fc7dc2007-12-17 22:59:57 -08008941 ret = tg3_nvram_read_le(tp, b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008942 if (ret)
8943 return ret;
Al Virob9fc7dc2007-12-17 22:59:57 -08008944 memcpy(pd, &val, b_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008945 eeprom->len += b_count;
8946 }
8947 return 0;
8948}
8949
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008950static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008951
8952static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8953{
8954 struct tg3 *tp = netdev_priv(dev);
8955 int ret;
Al Virob9fc7dc2007-12-17 22:59:57 -08008956 u32 offset, len, b_offset, odd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008957 u8 *buf;
Al Virob9fc7dc2007-12-17 22:59:57 -08008958 __le32 start, end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008959
Michael Chanbc1c7562006-03-20 17:48:03 -08008960 if (tp->link_config.phy_is_low_power)
8961 return -EAGAIN;
8962
Linus Torvalds1da177e2005-04-16 15:20:36 -07008963 if (eeprom->magic != TG3_EEPROM_MAGIC)
8964 return -EINVAL;
8965
8966 offset = eeprom->offset;
8967 len = eeprom->len;
8968
8969 if ((b_offset = (offset & 3))) {
8970 /* adjustments to start on required 4 byte boundary */
Al Virob9fc7dc2007-12-17 22:59:57 -08008971 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008972 if (ret)
8973 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008974 len += b_offset;
8975 offset &= ~3;
Michael Chan1c8594b2005-04-21 17:12:46 -07008976 if (len < 4)
8977 len = 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008978 }
8979
8980 odd_len = 0;
Michael Chan1c8594b2005-04-21 17:12:46 -07008981 if (len & 3) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008982 /* adjustments to end on required 4 byte boundary */
8983 odd_len = 1;
8984 len = (len + 3) & ~3;
Al Virob9fc7dc2007-12-17 22:59:57 -08008985 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008986 if (ret)
8987 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008988 }
8989
8990 buf = data;
8991 if (b_offset || odd_len) {
8992 buf = kmalloc(len, GFP_KERNEL);
Andy Gospodarekab0049b2007-09-06 20:42:14 +01008993 if (!buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008994 return -ENOMEM;
8995 if (b_offset)
8996 memcpy(buf, &start, 4);
8997 if (odd_len)
8998 memcpy(buf+len-4, &end, 4);
8999 memcpy(buf + b_offset, data, eeprom->len);
9000 }
9001
9002 ret = tg3_nvram_write_block(tp, offset, len, buf);
9003
9004 if (buf != data)
9005 kfree(buf);
9006
9007 return ret;
9008}
9009
9010static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9011{
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009012 struct tg3 *tp = netdev_priv(dev);
9013
9014 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9015 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9016 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009017 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009018 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009019
Linus Torvalds1da177e2005-04-16 15:20:36 -07009020 cmd->supported = (SUPPORTED_Autoneg);
9021
9022 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9023 cmd->supported |= (SUPPORTED_1000baseT_Half |
9024 SUPPORTED_1000baseT_Full);
9025
Karsten Keilef348142006-05-12 12:49:08 -07009026 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009027 cmd->supported |= (SUPPORTED_100baseT_Half |
9028 SUPPORTED_100baseT_Full |
9029 SUPPORTED_10baseT_Half |
9030 SUPPORTED_10baseT_Full |
Matt Carlson3bebab52007-11-12 21:22:40 -08009031 SUPPORTED_TP);
Karsten Keilef348142006-05-12 12:49:08 -07009032 cmd->port = PORT_TP;
9033 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009034 cmd->supported |= SUPPORTED_FIBRE;
Karsten Keilef348142006-05-12 12:49:08 -07009035 cmd->port = PORT_FIBRE;
9036 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009037
Linus Torvalds1da177e2005-04-16 15:20:36 -07009038 cmd->advertising = tp->link_config.advertising;
9039 if (netif_running(dev)) {
9040 cmd->speed = tp->link_config.active_speed;
9041 cmd->duplex = tp->link_config.active_duplex;
9042 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009043 cmd->phy_address = PHY_ADDR;
9044 cmd->transceiver = 0;
9045 cmd->autoneg = tp->link_config.autoneg;
9046 cmd->maxtxpkt = 0;
9047 cmd->maxrxpkt = 0;
9048 return 0;
9049}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009050
Linus Torvalds1da177e2005-04-16 15:20:36 -07009051static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9052{
9053 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009054
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009055 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9056 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9057 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009058 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009059 }
9060
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009061 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009062 /* These are the only valid advertisement bits allowed. */
9063 if (cmd->autoneg == AUTONEG_ENABLE &&
9064 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
9065 ADVERTISED_1000baseT_Full |
9066 ADVERTISED_Autoneg |
9067 ADVERTISED_FIBRE)))
9068 return -EINVAL;
Michael Chan37ff2382005-10-26 15:49:51 -07009069 /* Fiber can only do SPEED_1000. */
9070 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9071 (cmd->speed != SPEED_1000))
9072 return -EINVAL;
9073 /* Copper cannot force SPEED_1000. */
9074 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9075 (cmd->speed == SPEED_1000))
9076 return -EINVAL;
9077 else if ((cmd->speed == SPEED_1000) &&
Matt Carlson0ba11fb2008-06-09 15:40:26 -07009078 (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
Michael Chan37ff2382005-10-26 15:49:51 -07009079 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009080
David S. Millerf47c11e2005-06-24 20:18:35 -07009081 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009082
9083 tp->link_config.autoneg = cmd->autoneg;
9084 if (cmd->autoneg == AUTONEG_ENABLE) {
Andy Gospodarek405d8e52007-10-08 01:08:47 -07009085 tp->link_config.advertising = (cmd->advertising |
9086 ADVERTISED_Autoneg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009087 tp->link_config.speed = SPEED_INVALID;
9088 tp->link_config.duplex = DUPLEX_INVALID;
9089 } else {
9090 tp->link_config.advertising = 0;
9091 tp->link_config.speed = cmd->speed;
9092 tp->link_config.duplex = cmd->duplex;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009093 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009094
Michael Chan24fcad62006-12-17 17:06:46 -08009095 tp->link_config.orig_speed = tp->link_config.speed;
9096 tp->link_config.orig_duplex = tp->link_config.duplex;
9097 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9098
Linus Torvalds1da177e2005-04-16 15:20:36 -07009099 if (netif_running(dev))
9100 tg3_setup_phy(tp, 1);
9101
David S. Millerf47c11e2005-06-24 20:18:35 -07009102 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009103
Linus Torvalds1da177e2005-04-16 15:20:36 -07009104 return 0;
9105}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009106
Linus Torvalds1da177e2005-04-16 15:20:36 -07009107static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9108{
9109 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009110
Linus Torvalds1da177e2005-04-16 15:20:36 -07009111 strcpy(info->driver, DRV_MODULE_NAME);
9112 strcpy(info->version, DRV_MODULE_VERSION);
Michael Chanc4e65752006-03-20 22:29:32 -08009113 strcpy(info->fw_version, tp->fw_ver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009114 strcpy(info->bus_info, pci_name(tp->pdev));
9115}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009116
Linus Torvalds1da177e2005-04-16 15:20:36 -07009117static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9118{
9119 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009120
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009121 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9122 device_can_wakeup(&tp->pdev->dev))
Gary Zambranoa85feb82007-05-05 11:52:19 -07009123 wol->supported = WAKE_MAGIC;
9124 else
9125 wol->supported = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009126 wol->wolopts = 0;
Matt Carlson05ac4cb2008-11-03 16:53:46 -08009127 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9128 device_can_wakeup(&tp->pdev->dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009129 wol->wolopts = WAKE_MAGIC;
9130 memset(&wol->sopass, 0, sizeof(wol->sopass));
9131}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009132
Linus Torvalds1da177e2005-04-16 15:20:36 -07009133static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9134{
9135 struct tg3 *tp = netdev_priv(dev);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009136 struct device *dp = &tp->pdev->dev;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009137
Linus Torvalds1da177e2005-04-16 15:20:36 -07009138 if (wol->wolopts & ~WAKE_MAGIC)
9139 return -EINVAL;
9140 if ((wol->wolopts & WAKE_MAGIC) &&
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009141 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009142 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009143
David S. Millerf47c11e2005-06-24 20:18:35 -07009144 spin_lock_bh(&tp->lock);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009145 if (wol->wolopts & WAKE_MAGIC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009146 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009147 device_set_wakeup_enable(dp, true);
9148 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009149 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009150 device_set_wakeup_enable(dp, false);
9151 }
David S. Millerf47c11e2005-06-24 20:18:35 -07009152 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009153
Linus Torvalds1da177e2005-04-16 15:20:36 -07009154 return 0;
9155}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009156
Linus Torvalds1da177e2005-04-16 15:20:36 -07009157static u32 tg3_get_msglevel(struct net_device *dev)
9158{
9159 struct tg3 *tp = netdev_priv(dev);
9160 return tp->msg_enable;
9161}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009162
Linus Torvalds1da177e2005-04-16 15:20:36 -07009163static void tg3_set_msglevel(struct net_device *dev, u32 value)
9164{
9165 struct tg3 *tp = netdev_priv(dev);
9166 tp->msg_enable = value;
9167}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009168
Linus Torvalds1da177e2005-04-16 15:20:36 -07009169static int tg3_set_tso(struct net_device *dev, u32 value)
9170{
9171 struct tg3 *tp = netdev_priv(dev);
9172
9173 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9174 if (value)
9175 return -EINVAL;
9176 return 0;
9177 }
Michael Chanb5d37722006-09-27 16:06:21 -07009178 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
9179 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
Matt Carlson9936bcf2007-10-10 18:03:07 -07009180 if (value) {
Michael Chanb0026622006-07-03 19:42:14 -07009181 dev->features |= NETIF_F_TSO6;
Matt Carlson57e69832008-05-25 23:48:31 -07009182 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9183 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9184 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9185 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlson9936bcf2007-10-10 18:03:07 -07009186 dev->features |= NETIF_F_TSO_ECN;
9187 } else
9188 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
Michael Chanb0026622006-07-03 19:42:14 -07009189 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009190 return ethtool_op_set_tso(dev, value);
9191}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009192
Linus Torvalds1da177e2005-04-16 15:20:36 -07009193static int tg3_nway_reset(struct net_device *dev)
9194{
9195 struct tg3 *tp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009196 int r;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009197
Linus Torvalds1da177e2005-04-16 15:20:36 -07009198 if (!netif_running(dev))
9199 return -EAGAIN;
9200
Michael Chanc94e3942005-09-27 12:12:42 -07009201 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9202 return -EINVAL;
9203
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009204 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9205 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9206 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009207 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009208 } else {
9209 u32 bmcr;
9210
9211 spin_lock_bh(&tp->lock);
9212 r = -EINVAL;
9213 tg3_readphy(tp, MII_BMCR, &bmcr);
9214 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9215 ((bmcr & BMCR_ANENABLE) ||
9216 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9217 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9218 BMCR_ANENABLE);
9219 r = 0;
9220 }
9221 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009222 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009223
Linus Torvalds1da177e2005-04-16 15:20:36 -07009224 return r;
9225}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009226
Linus Torvalds1da177e2005-04-16 15:20:36 -07009227static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9228{
9229 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009230
Linus Torvalds1da177e2005-04-16 15:20:36 -07009231 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9232 ering->rx_mini_max_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08009233 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9234 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9235 else
9236 ering->rx_jumbo_max_pending = 0;
9237
9238 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009239
9240 ering->rx_pending = tp->rx_pending;
9241 ering->rx_mini_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08009242 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9243 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9244 else
9245 ering->rx_jumbo_pending = 0;
9246
Linus Torvalds1da177e2005-04-16 15:20:36 -07009247 ering->tx_pending = tp->tx_pending;
9248}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009249
Linus Torvalds1da177e2005-04-16 15:20:36 -07009250static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9251{
9252 struct tg3 *tp = netdev_priv(dev);
Michael Chanb9ec6c12006-07-25 16:37:27 -07009253 int irq_sync = 0, err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009254
Linus Torvalds1da177e2005-04-16 15:20:36 -07009255 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9256 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
Michael Chanbc3a9252006-10-18 20:55:18 -07009257 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9258 (ering->tx_pending <= MAX_SKB_FRAGS) ||
Michael Chan7f62ad52007-02-20 23:25:40 -08009259 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
Michael Chanbc3a9252006-10-18 20:55:18 -07009260 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009261 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009262
Michael Chanbbe832c2005-06-24 20:20:04 -07009263 if (netif_running(dev)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009264 tg3_phy_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009265 tg3_netif_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07009266 irq_sync = 1;
9267 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009268
Michael Chanbbe832c2005-06-24 20:20:04 -07009269 tg3_full_lock(tp, irq_sync);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009270
Linus Torvalds1da177e2005-04-16 15:20:36 -07009271 tp->rx_pending = ering->rx_pending;
9272
9273 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9274 tp->rx_pending > 63)
9275 tp->rx_pending = 63;
9276 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9277 tp->tx_pending = ering->tx_pending;
9278
9279 if (netif_running(dev)) {
Michael Chan944d9802005-05-29 14:57:48 -07009280 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chanb9ec6c12006-07-25 16:37:27 -07009281 err = tg3_restart_hw(tp, 1);
9282 if (!err)
9283 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009284 }
9285
David S. Millerf47c11e2005-06-24 20:18:35 -07009286 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009287
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009288 if (irq_sync && !err)
9289 tg3_phy_start(tp);
9290
Michael Chanb9ec6c12006-07-25 16:37:27 -07009291 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009292}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009293
Linus Torvalds1da177e2005-04-16 15:20:36 -07009294static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9295{
9296 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009297
Linus Torvalds1da177e2005-04-16 15:20:36 -07009298 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
Matt Carlson8d018622007-12-20 20:05:44 -08009299
9300 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
9301 epause->rx_pause = 1;
9302 else
9303 epause->rx_pause = 0;
9304
9305 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
9306 epause->tx_pause = 1;
9307 else
9308 epause->tx_pause = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009309}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009310
Linus Torvalds1da177e2005-04-16 15:20:36 -07009311static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9312{
9313 struct tg3 *tp = netdev_priv(dev);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009314 int err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009315
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009316 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9317 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9318 return -EAGAIN;
9319
9320 if (epause->autoneg) {
9321 u32 newadv;
9322 struct phy_device *phydev;
9323
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07009324 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009325
9326 if (epause->rx_pause) {
9327 if (epause->tx_pause)
9328 newadv = ADVERTISED_Pause;
9329 else
9330 newadv = ADVERTISED_Pause |
9331 ADVERTISED_Asym_Pause;
9332 } else if (epause->tx_pause) {
9333 newadv = ADVERTISED_Asym_Pause;
9334 } else
9335 newadv = 0;
9336
9337 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9338 u32 oldadv = phydev->advertising &
9339 (ADVERTISED_Pause |
9340 ADVERTISED_Asym_Pause);
9341 if (oldadv != newadv) {
9342 phydev->advertising &=
9343 ~(ADVERTISED_Pause |
9344 ADVERTISED_Asym_Pause);
9345 phydev->advertising |= newadv;
9346 err = phy_start_aneg(phydev);
9347 }
9348 } else {
9349 tp->link_config.advertising &=
9350 ~(ADVERTISED_Pause |
9351 ADVERTISED_Asym_Pause);
9352 tp->link_config.advertising |= newadv;
9353 }
9354 } else {
9355 if (epause->rx_pause)
9356 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9357 else
9358 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9359
9360 if (epause->tx_pause)
9361 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9362 else
9363 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9364
9365 if (netif_running(dev))
9366 tg3_setup_flow_control(tp, 0, 0);
9367 }
9368 } else {
9369 int irq_sync = 0;
9370
9371 if (netif_running(dev)) {
9372 tg3_netif_stop(tp);
9373 irq_sync = 1;
9374 }
9375
9376 tg3_full_lock(tp, irq_sync);
9377
9378 if (epause->autoneg)
9379 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9380 else
9381 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9382 if (epause->rx_pause)
9383 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9384 else
9385 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9386 if (epause->tx_pause)
9387 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9388 else
9389 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9390
9391 if (netif_running(dev)) {
9392 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9393 err = tg3_restart_hw(tp, 1);
9394 if (!err)
9395 tg3_netif_start(tp);
9396 }
9397
9398 tg3_full_unlock(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07009399 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009400
Michael Chanb9ec6c12006-07-25 16:37:27 -07009401 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009402}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009403
Linus Torvalds1da177e2005-04-16 15:20:36 -07009404static u32 tg3_get_rx_csum(struct net_device *dev)
9405{
9406 struct tg3 *tp = netdev_priv(dev);
9407 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9408}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009409
Linus Torvalds1da177e2005-04-16 15:20:36 -07009410static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9411{
9412 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009413
Linus Torvalds1da177e2005-04-16 15:20:36 -07009414 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9415 if (data != 0)
9416 return -EINVAL;
9417 return 0;
9418 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009419
David S. Millerf47c11e2005-06-24 20:18:35 -07009420 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009421 if (data)
9422 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9423 else
9424 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
David S. Millerf47c11e2005-06-24 20:18:35 -07009425 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009426
Linus Torvalds1da177e2005-04-16 15:20:36 -07009427 return 0;
9428}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009429
Linus Torvalds1da177e2005-04-16 15:20:36 -07009430static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9431{
9432 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009433
Linus Torvalds1da177e2005-04-16 15:20:36 -07009434 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9435 if (data != 0)
9436 return -EINVAL;
9437 return 0;
9438 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009439
Michael Chanaf36e6b2006-03-23 01:28:06 -08009440 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07009441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07009442 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -07009443 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9444 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan6460d942007-07-14 19:07:52 -07009445 ethtool_op_set_tx_ipv6_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009446 else
Michael Chan9c27dbd2006-03-20 22:28:27 -08009447 ethtool_op_set_tx_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009448
9449 return 0;
9450}
9451
Jeff Garzikb9f2c042007-10-03 18:07:32 -07009452static int tg3_get_sset_count (struct net_device *dev, int sset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009453{
Jeff Garzikb9f2c042007-10-03 18:07:32 -07009454 switch (sset) {
9455 case ETH_SS_TEST:
9456 return TG3_NUM_TEST;
9457 case ETH_SS_STATS:
9458 return TG3_NUM_STATS;
9459 default:
9460 return -EOPNOTSUPP;
9461 }
Michael Chan4cafd3f2005-05-29 14:56:34 -07009462}
9463
Linus Torvalds1da177e2005-04-16 15:20:36 -07009464static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9465{
9466 switch (stringset) {
9467 case ETH_SS_STATS:
9468 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9469 break;
Michael Chan4cafd3f2005-05-29 14:56:34 -07009470 case ETH_SS_TEST:
9471 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9472 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009473 default:
9474 WARN_ON(1); /* we need a WARN() */
9475 break;
9476 }
9477}
9478
Michael Chan4009a932005-09-05 17:52:54 -07009479static int tg3_phys_id(struct net_device *dev, u32 data)
9480{
9481 struct tg3 *tp = netdev_priv(dev);
9482 int i;
9483
9484 if (!netif_running(tp->dev))
9485 return -EAGAIN;
9486
9487 if (data == 0)
Stephen Hemminger759afc32008-02-23 19:51:59 -08009488 data = UINT_MAX / 2;
Michael Chan4009a932005-09-05 17:52:54 -07009489
9490 for (i = 0; i < (data * 2); i++) {
9491 if ((i % 2) == 0)
9492 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9493 LED_CTRL_1000MBPS_ON |
9494 LED_CTRL_100MBPS_ON |
9495 LED_CTRL_10MBPS_ON |
9496 LED_CTRL_TRAFFIC_OVERRIDE |
9497 LED_CTRL_TRAFFIC_BLINK |
9498 LED_CTRL_TRAFFIC_LED);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009499
Michael Chan4009a932005-09-05 17:52:54 -07009500 else
9501 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9502 LED_CTRL_TRAFFIC_OVERRIDE);
9503
9504 if (msleep_interruptible(500))
9505 break;
9506 }
9507 tw32(MAC_LED_CTRL, tp->led_ctrl);
9508 return 0;
9509}
9510
Linus Torvalds1da177e2005-04-16 15:20:36 -07009511static void tg3_get_ethtool_stats (struct net_device *dev,
9512 struct ethtool_stats *estats, u64 *tmp_stats)
9513{
9514 struct tg3 *tp = netdev_priv(dev);
9515 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9516}
9517
Michael Chan566f86a2005-05-29 14:56:58 -07009518#define NVRAM_TEST_SIZE 0x100
Matt Carlsona5767de2007-11-12 21:10:58 -08009519#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
9520#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
9521#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
Michael Chanb16250e2006-09-27 16:10:14 -07009522#define NVRAM_SELFBOOT_HW_SIZE 0x20
9523#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
Michael Chan566f86a2005-05-29 14:56:58 -07009524
9525static int tg3_test_nvram(struct tg3 *tp)
9526{
Al Virob9fc7dc2007-12-17 22:59:57 -08009527 u32 csum, magic;
9528 __le32 *buf;
Andy Gospodarekab0049b2007-09-06 20:42:14 +01009529 int i, j, k, err = 0, size;
Michael Chan566f86a2005-05-29 14:56:58 -07009530
Michael Chan18201802006-03-20 22:29:15 -08009531 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Michael Chan1b277772006-03-20 22:27:48 -08009532 return -EIO;
9533
Michael Chan1b277772006-03-20 22:27:48 -08009534 if (magic == TG3_EEPROM_MAGIC)
9535 size = NVRAM_TEST_SIZE;
Michael Chanb16250e2006-09-27 16:10:14 -07009536 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
Matt Carlsona5767de2007-11-12 21:10:58 -08009537 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9538 TG3_EEPROM_SB_FORMAT_1) {
9539 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9540 case TG3_EEPROM_SB_REVISION_0:
9541 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9542 break;
9543 case TG3_EEPROM_SB_REVISION_2:
9544 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9545 break;
9546 case TG3_EEPROM_SB_REVISION_3:
9547 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9548 break;
9549 default:
9550 return 0;
9551 }
9552 } else
Michael Chan1b277772006-03-20 22:27:48 -08009553 return 0;
Michael Chanb16250e2006-09-27 16:10:14 -07009554 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9555 size = NVRAM_SELFBOOT_HW_SIZE;
9556 else
Michael Chan1b277772006-03-20 22:27:48 -08009557 return -EIO;
9558
9559 buf = kmalloc(size, GFP_KERNEL);
Michael Chan566f86a2005-05-29 14:56:58 -07009560 if (buf == NULL)
9561 return -ENOMEM;
9562
Michael Chan1b277772006-03-20 22:27:48 -08009563 err = -EIO;
9564 for (i = 0, j = 0; i < size; i += 4, j++) {
Al Virob9fc7dc2007-12-17 22:59:57 -08009565 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
Michael Chan566f86a2005-05-29 14:56:58 -07009566 break;
Michael Chan566f86a2005-05-29 14:56:58 -07009567 }
Michael Chan1b277772006-03-20 22:27:48 -08009568 if (i < size)
Michael Chan566f86a2005-05-29 14:56:58 -07009569 goto out;
9570
Michael Chan1b277772006-03-20 22:27:48 -08009571 /* Selfboot format */
Al Virob9fc7dc2007-12-17 22:59:57 -08009572 magic = swab32(le32_to_cpu(buf[0]));
9573 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -07009574 TG3_EEPROM_MAGIC_FW) {
Michael Chan1b277772006-03-20 22:27:48 -08009575 u8 *buf8 = (u8 *) buf, csum8 = 0;
9576
Al Virob9fc7dc2007-12-17 22:59:57 -08009577 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
Matt Carlsona5767de2007-11-12 21:10:58 -08009578 TG3_EEPROM_SB_REVISION_2) {
9579 /* For rev 2, the csum doesn't include the MBA. */
9580 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9581 csum8 += buf8[i];
9582 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9583 csum8 += buf8[i];
9584 } else {
9585 for (i = 0; i < size; i++)
9586 csum8 += buf8[i];
9587 }
Michael Chan1b277772006-03-20 22:27:48 -08009588
Adrian Bunkad96b482006-04-05 22:21:04 -07009589 if (csum8 == 0) {
9590 err = 0;
9591 goto out;
9592 }
9593
9594 err = -EIO;
9595 goto out;
Michael Chan1b277772006-03-20 22:27:48 -08009596 }
Michael Chan566f86a2005-05-29 14:56:58 -07009597
Al Virob9fc7dc2007-12-17 22:59:57 -08009598 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -07009599 TG3_EEPROM_MAGIC_HW) {
9600 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9601 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9602 u8 *buf8 = (u8 *) buf;
Michael Chanb16250e2006-09-27 16:10:14 -07009603
9604 /* Separate the parity bits and the data bytes. */
9605 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9606 if ((i == 0) || (i == 8)) {
9607 int l;
9608 u8 msk;
9609
9610 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9611 parity[k++] = buf8[i] & msk;
9612 i++;
9613 }
9614 else if (i == 16) {
9615 int l;
9616 u8 msk;
9617
9618 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9619 parity[k++] = buf8[i] & msk;
9620 i++;
9621
9622 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9623 parity[k++] = buf8[i] & msk;
9624 i++;
9625 }
9626 data[j++] = buf8[i];
9627 }
9628
9629 err = -EIO;
9630 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9631 u8 hw8 = hweight8(data[i]);
9632
9633 if ((hw8 & 0x1) && parity[i])
9634 goto out;
9635 else if (!(hw8 & 0x1) && !parity[i])
9636 goto out;
9637 }
9638 err = 0;
9639 goto out;
9640 }
9641
Michael Chan566f86a2005-05-29 14:56:58 -07009642 /* Bootstrap checksum at offset 0x10 */
9643 csum = calc_crc((unsigned char *) buf, 0x10);
Al Virob9fc7dc2007-12-17 22:59:57 -08009644 if(csum != le32_to_cpu(buf[0x10/4]))
Michael Chan566f86a2005-05-29 14:56:58 -07009645 goto out;
9646
9647 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9648 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
Al Virob9fc7dc2007-12-17 22:59:57 -08009649 if (csum != le32_to_cpu(buf[0xfc/4]))
Michael Chan566f86a2005-05-29 14:56:58 -07009650 goto out;
9651
9652 err = 0;
9653
9654out:
9655 kfree(buf);
9656 return err;
9657}
9658
Michael Chanca430072005-05-29 14:57:23 -07009659#define TG3_SERDES_TIMEOUT_SEC 2
9660#define TG3_COPPER_TIMEOUT_SEC 6
9661
9662static int tg3_test_link(struct tg3 *tp)
9663{
9664 int i, max;
9665
9666 if (!netif_running(tp->dev))
9667 return -ENODEV;
9668
Michael Chan4c987482005-09-05 17:52:38 -07009669 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Michael Chanca430072005-05-29 14:57:23 -07009670 max = TG3_SERDES_TIMEOUT_SEC;
9671 else
9672 max = TG3_COPPER_TIMEOUT_SEC;
9673
9674 for (i = 0; i < max; i++) {
9675 if (netif_carrier_ok(tp->dev))
9676 return 0;
9677
9678 if (msleep_interruptible(1000))
9679 break;
9680 }
9681
9682 return -EIO;
9683}
9684
Michael Chana71116d2005-05-29 14:58:11 -07009685/* Only test the commonly used registers */
David S. Miller30ca3e32006-03-20 23:02:36 -08009686static int tg3_test_registers(struct tg3 *tp)
Michael Chana71116d2005-05-29 14:58:11 -07009687{
Michael Chanb16250e2006-09-27 16:10:14 -07009688 int i, is_5705, is_5750;
Michael Chana71116d2005-05-29 14:58:11 -07009689 u32 offset, read_mask, write_mask, val, save_val, read_val;
9690 static struct {
9691 u16 offset;
9692 u16 flags;
9693#define TG3_FL_5705 0x1
9694#define TG3_FL_NOT_5705 0x2
9695#define TG3_FL_NOT_5788 0x4
Michael Chanb16250e2006-09-27 16:10:14 -07009696#define TG3_FL_NOT_5750 0x8
Michael Chana71116d2005-05-29 14:58:11 -07009697 u32 read_mask;
9698 u32 write_mask;
9699 } reg_tbl[] = {
9700 /* MAC Control Registers */
9701 { MAC_MODE, TG3_FL_NOT_5705,
9702 0x00000000, 0x00ef6f8c },
9703 { MAC_MODE, TG3_FL_5705,
9704 0x00000000, 0x01ef6b8c },
9705 { MAC_STATUS, TG3_FL_NOT_5705,
9706 0x03800107, 0x00000000 },
9707 { MAC_STATUS, TG3_FL_5705,
9708 0x03800100, 0x00000000 },
9709 { MAC_ADDR_0_HIGH, 0x0000,
9710 0x00000000, 0x0000ffff },
9711 { MAC_ADDR_0_LOW, 0x0000,
9712 0x00000000, 0xffffffff },
9713 { MAC_RX_MTU_SIZE, 0x0000,
9714 0x00000000, 0x0000ffff },
9715 { MAC_TX_MODE, 0x0000,
9716 0x00000000, 0x00000070 },
9717 { MAC_TX_LENGTHS, 0x0000,
9718 0x00000000, 0x00003fff },
9719 { MAC_RX_MODE, TG3_FL_NOT_5705,
9720 0x00000000, 0x000007fc },
9721 { MAC_RX_MODE, TG3_FL_5705,
9722 0x00000000, 0x000007dc },
9723 { MAC_HASH_REG_0, 0x0000,
9724 0x00000000, 0xffffffff },
9725 { MAC_HASH_REG_1, 0x0000,
9726 0x00000000, 0xffffffff },
9727 { MAC_HASH_REG_2, 0x0000,
9728 0x00000000, 0xffffffff },
9729 { MAC_HASH_REG_3, 0x0000,
9730 0x00000000, 0xffffffff },
9731
9732 /* Receive Data and Receive BD Initiator Control Registers. */
9733 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9734 0x00000000, 0xffffffff },
9735 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9736 0x00000000, 0xffffffff },
9737 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9738 0x00000000, 0x00000003 },
9739 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9740 0x00000000, 0xffffffff },
9741 { RCVDBDI_STD_BD+0, 0x0000,
9742 0x00000000, 0xffffffff },
9743 { RCVDBDI_STD_BD+4, 0x0000,
9744 0x00000000, 0xffffffff },
9745 { RCVDBDI_STD_BD+8, 0x0000,
9746 0x00000000, 0xffff0002 },
9747 { RCVDBDI_STD_BD+0xc, 0x0000,
9748 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009749
Michael Chana71116d2005-05-29 14:58:11 -07009750 /* Receive BD Initiator Control Registers. */
9751 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9752 0x00000000, 0xffffffff },
9753 { RCVBDI_STD_THRESH, TG3_FL_5705,
9754 0x00000000, 0x000003ff },
9755 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9756 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009757
Michael Chana71116d2005-05-29 14:58:11 -07009758 /* Host Coalescing Control Registers. */
9759 { HOSTCC_MODE, TG3_FL_NOT_5705,
9760 0x00000000, 0x00000004 },
9761 { HOSTCC_MODE, TG3_FL_5705,
9762 0x00000000, 0x000000f6 },
9763 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9764 0x00000000, 0xffffffff },
9765 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9766 0x00000000, 0x000003ff },
9767 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9768 0x00000000, 0xffffffff },
9769 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9770 0x00000000, 0x000003ff },
9771 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9772 0x00000000, 0xffffffff },
9773 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9774 0x00000000, 0x000000ff },
9775 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9776 0x00000000, 0xffffffff },
9777 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9778 0x00000000, 0x000000ff },
9779 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9780 0x00000000, 0xffffffff },
9781 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9782 0x00000000, 0xffffffff },
9783 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9784 0x00000000, 0xffffffff },
9785 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9786 0x00000000, 0x000000ff },
9787 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9788 0x00000000, 0xffffffff },
9789 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9790 0x00000000, 0x000000ff },
9791 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9792 0x00000000, 0xffffffff },
9793 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9794 0x00000000, 0xffffffff },
9795 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9796 0x00000000, 0xffffffff },
9797 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9798 0x00000000, 0xffffffff },
9799 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9800 0x00000000, 0xffffffff },
9801 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9802 0xffffffff, 0x00000000 },
9803 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9804 0xffffffff, 0x00000000 },
9805
9806 /* Buffer Manager Control Registers. */
Michael Chanb16250e2006-09-27 16:10:14 -07009807 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -07009808 0x00000000, 0x007fff80 },
Michael Chanb16250e2006-09-27 16:10:14 -07009809 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -07009810 0x00000000, 0x007fffff },
9811 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9812 0x00000000, 0x0000003f },
9813 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9814 0x00000000, 0x000001ff },
9815 { BUFMGR_MB_HIGH_WATER, 0x0000,
9816 0x00000000, 0x000001ff },
9817 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9818 0xffffffff, 0x00000000 },
9819 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9820 0xffffffff, 0x00000000 },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009821
Michael Chana71116d2005-05-29 14:58:11 -07009822 /* Mailbox Registers */
9823 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9824 0x00000000, 0x000001ff },
9825 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9826 0x00000000, 0x000001ff },
9827 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9828 0x00000000, 0x000007ff },
9829 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9830 0x00000000, 0x000001ff },
9831
9832 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9833 };
9834
Michael Chanb16250e2006-09-27 16:10:14 -07009835 is_5705 = is_5750 = 0;
9836 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
Michael Chana71116d2005-05-29 14:58:11 -07009837 is_5705 = 1;
Michael Chanb16250e2006-09-27 16:10:14 -07009838 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9839 is_5750 = 1;
9840 }
Michael Chana71116d2005-05-29 14:58:11 -07009841
9842 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9843 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9844 continue;
9845
9846 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9847 continue;
9848
9849 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9850 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9851 continue;
9852
Michael Chanb16250e2006-09-27 16:10:14 -07009853 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9854 continue;
9855
Michael Chana71116d2005-05-29 14:58:11 -07009856 offset = (u32) reg_tbl[i].offset;
9857 read_mask = reg_tbl[i].read_mask;
9858 write_mask = reg_tbl[i].write_mask;
9859
9860 /* Save the original register content */
9861 save_val = tr32(offset);
9862
9863 /* Determine the read-only value. */
9864 read_val = save_val & read_mask;
9865
9866 /* Write zero to the register, then make sure the read-only bits
9867 * are not changed and the read/write bits are all zeros.
9868 */
9869 tw32(offset, 0);
9870
9871 val = tr32(offset);
9872
9873 /* Test the read-only and read/write bits. */
9874 if (((val & read_mask) != read_val) || (val & write_mask))
9875 goto out;
9876
9877 /* Write ones to all the bits defined by RdMask and WrMask, then
9878 * make sure the read-only bits are not changed and the
9879 * read/write bits are all ones.
9880 */
9881 tw32(offset, read_mask | write_mask);
9882
9883 val = tr32(offset);
9884
9885 /* Test the read-only bits. */
9886 if ((val & read_mask) != read_val)
9887 goto out;
9888
9889 /* Test the read/write bits. */
9890 if ((val & write_mask) != write_mask)
9891 goto out;
9892
9893 tw32(offset, save_val);
9894 }
9895
9896 return 0;
9897
9898out:
Michael Chan9f88f292006-12-07 00:22:54 -08009899 if (netif_msg_hw(tp))
9900 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9901 offset);
Michael Chana71116d2005-05-29 14:58:11 -07009902 tw32(offset, save_val);
9903 return -EIO;
9904}
9905
Michael Chan7942e1d2005-05-29 14:58:36 -07009906static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9907{
Arjan van de Venf71e1302006-03-03 21:33:57 -05009908 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
Michael Chan7942e1d2005-05-29 14:58:36 -07009909 int i;
9910 u32 j;
9911
Alejandro Martinez Ruize9edda62007-10-15 03:37:43 +02009912 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
Michael Chan7942e1d2005-05-29 14:58:36 -07009913 for (j = 0; j < len; j += 4) {
9914 u32 val;
9915
9916 tg3_write_mem(tp, offset + j, test_pattern[i]);
9917 tg3_read_mem(tp, offset + j, &val);
9918 if (val != test_pattern[i])
9919 return -EIO;
9920 }
9921 }
9922 return 0;
9923}
9924
9925static int tg3_test_memory(struct tg3 *tp)
9926{
9927 static struct mem_entry {
9928 u32 offset;
9929 u32 len;
9930 } mem_tbl_570x[] = {
Michael Chan38690192005-12-19 16:27:28 -08009931 { 0x00000000, 0x00b50},
Michael Chan7942e1d2005-05-29 14:58:36 -07009932 { 0x00002000, 0x1c000},
9933 { 0xffffffff, 0x00000}
9934 }, mem_tbl_5705[] = {
9935 { 0x00000100, 0x0000c},
9936 { 0x00000200, 0x00008},
Michael Chan7942e1d2005-05-29 14:58:36 -07009937 { 0x00004000, 0x00800},
9938 { 0x00006000, 0x01000},
9939 { 0x00008000, 0x02000},
9940 { 0x00010000, 0x0e000},
9941 { 0xffffffff, 0x00000}
Michael Chan79f4d132006-03-20 22:28:57 -08009942 }, mem_tbl_5755[] = {
9943 { 0x00000200, 0x00008},
9944 { 0x00004000, 0x00800},
9945 { 0x00006000, 0x00800},
9946 { 0x00008000, 0x02000},
9947 { 0x00010000, 0x0c000},
9948 { 0xffffffff, 0x00000}
Michael Chanb16250e2006-09-27 16:10:14 -07009949 }, mem_tbl_5906[] = {
9950 { 0x00000200, 0x00008},
9951 { 0x00004000, 0x00400},
9952 { 0x00006000, 0x00400},
9953 { 0x00008000, 0x01000},
9954 { 0x00010000, 0x01000},
9955 { 0xffffffff, 0x00000}
Michael Chan7942e1d2005-05-29 14:58:36 -07009956 };
9957 struct mem_entry *mem_tbl;
9958 int err = 0;
9959 int i;
9960
Michael Chan79f4d132006-03-20 22:28:57 -08009961 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
Michael Chanaf36e6b2006-03-23 01:28:06 -08009962 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07009963 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07009964 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -07009965 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9966 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan79f4d132006-03-20 22:28:57 -08009967 mem_tbl = mem_tbl_5755;
Michael Chanb16250e2006-09-27 16:10:14 -07009968 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9969 mem_tbl = mem_tbl_5906;
Michael Chan79f4d132006-03-20 22:28:57 -08009970 else
9971 mem_tbl = mem_tbl_5705;
9972 } else
Michael Chan7942e1d2005-05-29 14:58:36 -07009973 mem_tbl = mem_tbl_570x;
9974
9975 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9976 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9977 mem_tbl[i].len)) != 0)
9978 break;
9979 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009980
Michael Chan7942e1d2005-05-29 14:58:36 -07009981 return err;
9982}
9983
Michael Chan9f40dea2005-09-05 17:53:06 -07009984#define TG3_MAC_LOOPBACK 0
9985#define TG3_PHY_LOOPBACK 1
9986
9987static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
Michael Chanc76949a2005-05-29 14:58:59 -07009988{
Michael Chan9f40dea2005-09-05 17:53:06 -07009989 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
Michael Chanc76949a2005-05-29 14:58:59 -07009990 u32 desc_idx;
9991 struct sk_buff *skb, *rx_skb;
9992 u8 *tx_data;
9993 dma_addr_t map;
9994 int num_pkts, tx_len, rx_len, i, err;
9995 struct tg3_rx_buffer_desc *desc;
9996
Michael Chan9f40dea2005-09-05 17:53:06 -07009997 if (loopback_mode == TG3_MAC_LOOPBACK) {
Michael Chanc94e3942005-09-27 12:12:42 -07009998 /* HW errata - mac loopback fails in some cases on 5780.
9999 * Normal traffic and PHY loopback are not affected by
10000 * errata.
10001 */
10002 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10003 return 0;
10004
Michael Chan9f40dea2005-09-05 17:53:06 -070010005 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
Matt Carlsone8f3f6c2007-07-11 19:47:55 -070010006 MAC_MODE_PORT_INT_LPBACK;
10007 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10008 mac_mode |= MAC_MODE_LINK_POLARITY;
Michael Chan3f7045c2006-09-27 16:02:29 -070010009 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10010 mac_mode |= MAC_MODE_PORT_MODE_MII;
10011 else
10012 mac_mode |= MAC_MODE_PORT_MODE_GMII;
Michael Chan9f40dea2005-09-05 17:53:06 -070010013 tw32(MAC_MODE, mac_mode);
10014 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
Michael Chan3f7045c2006-09-27 16:02:29 -070010015 u32 val;
10016
Michael Chanb16250e2006-09-27 16:10:14 -070010017 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10018 u32 phytest;
10019
10020 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
10021 u32 phy;
10022
10023 tg3_writephy(tp, MII_TG3_EPHY_TEST,
10024 phytest | MII_TG3_EPHY_SHADOW_EN);
10025 if (!tg3_readphy(tp, 0x1b, &phy))
10026 tg3_writephy(tp, 0x1b, phy & ~0x20);
Michael Chanb16250e2006-09-27 16:10:14 -070010027 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
10028 }
Michael Chan5d64ad32006-12-07 00:19:40 -080010029 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10030 } else
10031 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
Michael Chan3f7045c2006-09-27 16:02:29 -070010032
Matt Carlson9ef8ca92007-07-11 19:48:29 -070010033 tg3_phy_toggle_automdix(tp, 0);
10034
Michael Chan3f7045c2006-09-27 16:02:29 -070010035 tg3_writephy(tp, MII_BMCR, val);
Michael Chanc94e3942005-09-27 12:12:42 -070010036 udelay(40);
Michael Chan5d64ad32006-12-07 00:19:40 -080010037
Matt Carlsone8f3f6c2007-07-11 19:47:55 -070010038 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
Michael Chan5d64ad32006-12-07 00:19:40 -080010039 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chanb16250e2006-09-27 16:10:14 -070010040 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
Michael Chan5d64ad32006-12-07 00:19:40 -080010041 mac_mode |= MAC_MODE_PORT_MODE_MII;
10042 } else
10043 mac_mode |= MAC_MODE_PORT_MODE_GMII;
Michael Chanb16250e2006-09-27 16:10:14 -070010044
Michael Chanc94e3942005-09-27 12:12:42 -070010045 /* reset to prevent losing 1st rx packet intermittently */
10046 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10047 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10048 udelay(10);
10049 tw32_f(MAC_RX_MODE, tp->rx_mode);
10050 }
Matt Carlsone8f3f6c2007-07-11 19:47:55 -070010051 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10052 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10053 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10054 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10055 mac_mode |= MAC_MODE_LINK_POLARITY;
Michael Chanff18ff02006-03-27 23:17:27 -080010056 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10057 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10058 }
Michael Chan9f40dea2005-09-05 17:53:06 -070010059 tw32(MAC_MODE, mac_mode);
Michael Chan9f40dea2005-09-05 17:53:06 -070010060 }
10061 else
10062 return -EINVAL;
Michael Chanc76949a2005-05-29 14:58:59 -070010063
10064 err = -EIO;
10065
Michael Chanc76949a2005-05-29 14:58:59 -070010066 tx_len = 1514;
David S. Millera20e9c62006-07-31 22:38:16 -070010067 skb = netdev_alloc_skb(tp->dev, tx_len);
Jesper Juhla50bb7b2006-05-09 23:14:35 -070010068 if (!skb)
10069 return -ENOMEM;
10070
Michael Chanc76949a2005-05-29 14:58:59 -070010071 tx_data = skb_put(skb, tx_len);
10072 memcpy(tx_data, tp->dev->dev_addr, 6);
10073 memset(tx_data + 6, 0x0, 8);
10074
10075 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10076
10077 for (i = 14; i < tx_len; i++)
10078 tx_data[i] = (u8) (i & 0xff);
10079
10080 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10081
10082 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10083 HOSTCC_MODE_NOW);
10084
10085 udelay(10);
10086
10087 rx_start_idx = tp->hw_status->idx[0].rx_producer;
10088
Michael Chanc76949a2005-05-29 14:58:59 -070010089 num_pkts = 0;
10090
Michael Chan9f40dea2005-09-05 17:53:06 -070010091 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
Michael Chanc76949a2005-05-29 14:58:59 -070010092
Michael Chan9f40dea2005-09-05 17:53:06 -070010093 tp->tx_prod++;
Michael Chanc76949a2005-05-29 14:58:59 -070010094 num_pkts++;
10095
Michael Chan9f40dea2005-09-05 17:53:06 -070010096 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10097 tp->tx_prod);
Michael Chan09ee9292005-08-09 20:17:00 -070010098 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
Michael Chanc76949a2005-05-29 14:58:59 -070010099
10100 udelay(10);
10101
Michael Chan3f7045c2006-09-27 16:02:29 -070010102 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
10103 for (i = 0; i < 25; i++) {
Michael Chanc76949a2005-05-29 14:58:59 -070010104 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10105 HOSTCC_MODE_NOW);
10106
10107 udelay(10);
10108
10109 tx_idx = tp->hw_status->idx[0].tx_consumer;
10110 rx_idx = tp->hw_status->idx[0].rx_producer;
Michael Chan9f40dea2005-09-05 17:53:06 -070010111 if ((tx_idx == tp->tx_prod) &&
Michael Chanc76949a2005-05-29 14:58:59 -070010112 (rx_idx == (rx_start_idx + num_pkts)))
10113 break;
10114 }
10115
10116 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10117 dev_kfree_skb(skb);
10118
Michael Chan9f40dea2005-09-05 17:53:06 -070010119 if (tx_idx != tp->tx_prod)
Michael Chanc76949a2005-05-29 14:58:59 -070010120 goto out;
10121
10122 if (rx_idx != rx_start_idx + num_pkts)
10123 goto out;
10124
10125 desc = &tp->rx_rcb[rx_start_idx];
10126 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10127 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10128 if (opaque_key != RXD_OPAQUE_RING_STD)
10129 goto out;
10130
10131 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10132 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10133 goto out;
10134
10135 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10136 if (rx_len != tx_len)
10137 goto out;
10138
10139 rx_skb = tp->rx_std_buffers[desc_idx].skb;
10140
10141 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10142 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10143
10144 for (i = 14; i < tx_len; i++) {
10145 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10146 goto out;
10147 }
10148 err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010149
Michael Chanc76949a2005-05-29 14:58:59 -070010150 /* tg3_free_rings will unmap and free the rx_skb */
10151out:
10152 return err;
10153}
10154
Michael Chan9f40dea2005-09-05 17:53:06 -070010155#define TG3_MAC_LOOPBACK_FAILED 1
10156#define TG3_PHY_LOOPBACK_FAILED 2
10157#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10158 TG3_PHY_LOOPBACK_FAILED)
10159
10160static int tg3_test_loopback(struct tg3 *tp)
10161{
10162 int err = 0;
Matt Carlson9936bcf2007-10-10 18:03:07 -070010163 u32 cpmuctrl = 0;
Michael Chan9f40dea2005-09-05 17:53:06 -070010164
10165 if (!netif_running(tp->dev))
10166 return TG3_LOOPBACK_FAILED;
10167
Michael Chanb9ec6c12006-07-25 16:37:27 -070010168 err = tg3_reset_hw(tp, 1);
10169 if (err)
10170 return TG3_LOOPBACK_FAILED;
Michael Chan9f40dea2005-09-05 17:53:06 -070010171
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010172 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10174 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Matt Carlson9936bcf2007-10-10 18:03:07 -070010175 int i;
10176 u32 status;
10177
10178 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10179
10180 /* Wait for up to 40 microseconds to acquire lock. */
10181 for (i = 0; i < 4; i++) {
10182 status = tr32(TG3_CPMU_MUTEX_GNT);
10183 if (status == CPMU_MUTEX_GNT_DRIVER)
10184 break;
10185 udelay(10);
10186 }
10187
10188 if (status != CPMU_MUTEX_GNT_DRIVER)
10189 return TG3_LOOPBACK_FAILED;
10190
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010191 /* Turn off link-based power management. */
Matt Carlsone8750932007-11-12 21:11:51 -080010192 cpmuctrl = tr32(TG3_CPMU_CTRL);
Matt Carlson109115e2008-05-02 16:48:59 -070010193 tw32(TG3_CPMU_CTRL,
10194 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10195 CPMU_CTRL_LINK_AWARE_MODE));
Matt Carlson9936bcf2007-10-10 18:03:07 -070010196 }
10197
Michael Chan9f40dea2005-09-05 17:53:06 -070010198 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10199 err |= TG3_MAC_LOOPBACK_FAILED;
Matt Carlson9936bcf2007-10-10 18:03:07 -070010200
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010201 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010202 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10203 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Matt Carlson9936bcf2007-10-10 18:03:07 -070010204 tw32(TG3_CPMU_CTRL, cpmuctrl);
10205
10206 /* Release the mutex */
10207 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10208 }
10209
Matt Carlsondd477002008-05-25 23:45:58 -070010210 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10211 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
Michael Chan9f40dea2005-09-05 17:53:06 -070010212 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10213 err |= TG3_PHY_LOOPBACK_FAILED;
10214 }
10215
10216 return err;
10217}
10218
Michael Chan4cafd3f2005-05-29 14:56:34 -070010219static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10220 u64 *data)
10221{
Michael Chan566f86a2005-05-29 14:56:58 -070010222 struct tg3 *tp = netdev_priv(dev);
10223
Michael Chanbc1c7562006-03-20 17:48:03 -080010224 if (tp->link_config.phy_is_low_power)
10225 tg3_set_power_state(tp, PCI_D0);
10226
Michael Chan566f86a2005-05-29 14:56:58 -070010227 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10228
10229 if (tg3_test_nvram(tp) != 0) {
10230 etest->flags |= ETH_TEST_FL_FAILED;
10231 data[0] = 1;
10232 }
Michael Chanca430072005-05-29 14:57:23 -070010233 if (tg3_test_link(tp) != 0) {
10234 etest->flags |= ETH_TEST_FL_FAILED;
10235 data[1] = 1;
10236 }
Michael Chana71116d2005-05-29 14:58:11 -070010237 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010238 int err, err2 = 0, irq_sync = 0;
Michael Chana71116d2005-05-29 14:58:11 -070010239
Michael Chanbbe832c2005-06-24 20:20:04 -070010240 if (netif_running(dev)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010241 tg3_phy_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -070010242 tg3_netif_stop(tp);
10243 irq_sync = 1;
10244 }
10245
10246 tg3_full_lock(tp, irq_sync);
Michael Chana71116d2005-05-29 14:58:11 -070010247
10248 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
Michael Chanec41c7d2006-01-17 02:40:55 -080010249 err = tg3_nvram_lock(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010250 tg3_halt_cpu(tp, RX_CPU_BASE);
10251 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10252 tg3_halt_cpu(tp, TX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -080010253 if (!err)
10254 tg3_nvram_unlock(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010255
Michael Chand9ab5ad2006-03-20 22:27:35 -080010256 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10257 tg3_phy_reset(tp);
10258
Michael Chana71116d2005-05-29 14:58:11 -070010259 if (tg3_test_registers(tp) != 0) {
10260 etest->flags |= ETH_TEST_FL_FAILED;
10261 data[2] = 1;
10262 }
Michael Chan7942e1d2005-05-29 14:58:36 -070010263 if (tg3_test_memory(tp) != 0) {
10264 etest->flags |= ETH_TEST_FL_FAILED;
10265 data[3] = 1;
10266 }
Michael Chan9f40dea2005-09-05 17:53:06 -070010267 if ((data[4] = tg3_test_loopback(tp)) != 0)
Michael Chanc76949a2005-05-29 14:58:59 -070010268 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chana71116d2005-05-29 14:58:11 -070010269
David S. Millerf47c11e2005-06-24 20:18:35 -070010270 tg3_full_unlock(tp);
10271
Michael Chand4bc3922005-05-29 14:59:20 -070010272 if (tg3_test_interrupt(tp) != 0) {
10273 etest->flags |= ETH_TEST_FL_FAILED;
10274 data[5] = 1;
10275 }
David S. Millerf47c11e2005-06-24 20:18:35 -070010276
10277 tg3_full_lock(tp, 0);
Michael Chand4bc3922005-05-29 14:59:20 -070010278
Michael Chana71116d2005-05-29 14:58:11 -070010279 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10280 if (netif_running(dev)) {
10281 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010282 err2 = tg3_restart_hw(tp, 1);
10283 if (!err2)
Michael Chanb9ec6c12006-07-25 16:37:27 -070010284 tg3_netif_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010285 }
David S. Millerf47c11e2005-06-24 20:18:35 -070010286
10287 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010288
10289 if (irq_sync && !err2)
10290 tg3_phy_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010291 }
Michael Chanbc1c7562006-03-20 17:48:03 -080010292 if (tp->link_config.phy_is_low_power)
10293 tg3_set_power_state(tp, PCI_D3hot);
10294
Michael Chan4cafd3f2005-05-29 14:56:34 -070010295}
10296
Linus Torvalds1da177e2005-04-16 15:20:36 -070010297static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10298{
10299 struct mii_ioctl_data *data = if_mii(ifr);
10300 struct tg3 *tp = netdev_priv(dev);
10301 int err;
10302
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010303 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10304 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10305 return -EAGAIN;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -070010306 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010307 }
10308
Linus Torvalds1da177e2005-04-16 15:20:36 -070010309 switch(cmd) {
10310 case SIOCGMIIPHY:
10311 data->phy_id = PHY_ADDR;
10312
10313 /* fallthru */
10314 case SIOCGMIIREG: {
10315 u32 mii_regval;
10316
10317 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10318 break; /* We have no PHY */
10319
Michael Chanbc1c7562006-03-20 17:48:03 -080010320 if (tp->link_config.phy_is_low_power)
10321 return -EAGAIN;
10322
David S. Millerf47c11e2005-06-24 20:18:35 -070010323 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010324 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
David S. Millerf47c11e2005-06-24 20:18:35 -070010325 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010326
10327 data->val_out = mii_regval;
10328
10329 return err;
10330 }
10331
10332 case SIOCSMIIREG:
10333 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10334 break; /* We have no PHY */
10335
10336 if (!capable(CAP_NET_ADMIN))
10337 return -EPERM;
10338
Michael Chanbc1c7562006-03-20 17:48:03 -080010339 if (tp->link_config.phy_is_low_power)
10340 return -EAGAIN;
10341
David S. Millerf47c11e2005-06-24 20:18:35 -070010342 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010343 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
David S. Millerf47c11e2005-06-24 20:18:35 -070010344 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010345
10346 return err;
10347
10348 default:
10349 /* do nothing */
10350 break;
10351 }
10352 return -EOPNOTSUPP;
10353}
10354
10355#if TG3_VLAN_TAG_USED
10356static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10357{
10358 struct tg3 *tp = netdev_priv(dev);
10359
Michael Chan29315e82006-06-29 20:12:30 -070010360 if (netif_running(dev))
10361 tg3_netif_stop(tp);
10362
David S. Millerf47c11e2005-06-24 20:18:35 -070010363 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010364
10365 tp->vlgrp = grp;
10366
10367 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10368 __tg3_set_rx_mode(dev);
10369
Michael Chan29315e82006-06-29 20:12:30 -070010370 if (netif_running(dev))
10371 tg3_netif_start(tp);
Michael Chan46966542007-07-11 19:47:19 -070010372
10373 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010374}
Linus Torvalds1da177e2005-04-16 15:20:36 -070010375#endif
10376
David S. Miller15f98502005-05-18 22:49:26 -070010377static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10378{
10379 struct tg3 *tp = netdev_priv(dev);
10380
10381 memcpy(ec, &tp->coal, sizeof(*ec));
10382 return 0;
10383}
10384
Michael Chand244c892005-07-05 14:42:33 -070010385static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10386{
10387 struct tg3 *tp = netdev_priv(dev);
10388 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10389 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10390
10391 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10392 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10393 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10394 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10395 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10396 }
10397
10398 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10399 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10400 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10401 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10402 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10403 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10404 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10405 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10406 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10407 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10408 return -EINVAL;
10409
10410 /* No rx interrupts will be generated if both are zero */
10411 if ((ec->rx_coalesce_usecs == 0) &&
10412 (ec->rx_max_coalesced_frames == 0))
10413 return -EINVAL;
10414
10415 /* No tx interrupts will be generated if both are zero */
10416 if ((ec->tx_coalesce_usecs == 0) &&
10417 (ec->tx_max_coalesced_frames == 0))
10418 return -EINVAL;
10419
10420 /* Only copy relevant parameters, ignore all others. */
10421 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10422 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10423 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10424 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10425 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10426 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10427 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10428 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10429 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10430
10431 if (netif_running(dev)) {
10432 tg3_full_lock(tp, 0);
10433 __tg3_set_coalesce(tp, &tp->coal);
10434 tg3_full_unlock(tp);
10435 }
10436 return 0;
10437}
10438
Jeff Garzik7282d492006-09-13 14:30:00 -040010439static const struct ethtool_ops tg3_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010440 .get_settings = tg3_get_settings,
10441 .set_settings = tg3_set_settings,
10442 .get_drvinfo = tg3_get_drvinfo,
10443 .get_regs_len = tg3_get_regs_len,
10444 .get_regs = tg3_get_regs,
10445 .get_wol = tg3_get_wol,
10446 .set_wol = tg3_set_wol,
10447 .get_msglevel = tg3_get_msglevel,
10448 .set_msglevel = tg3_set_msglevel,
10449 .nway_reset = tg3_nway_reset,
10450 .get_link = ethtool_op_get_link,
10451 .get_eeprom_len = tg3_get_eeprom_len,
10452 .get_eeprom = tg3_get_eeprom,
10453 .set_eeprom = tg3_set_eeprom,
10454 .get_ringparam = tg3_get_ringparam,
10455 .set_ringparam = tg3_set_ringparam,
10456 .get_pauseparam = tg3_get_pauseparam,
10457 .set_pauseparam = tg3_set_pauseparam,
10458 .get_rx_csum = tg3_get_rx_csum,
10459 .set_rx_csum = tg3_set_rx_csum,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010460 .set_tx_csum = tg3_set_tx_csum,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010461 .set_sg = ethtool_op_set_sg,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010462 .set_tso = tg3_set_tso,
Michael Chan4cafd3f2005-05-29 14:56:34 -070010463 .self_test = tg3_self_test,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010464 .get_strings = tg3_get_strings,
Michael Chan4009a932005-09-05 17:52:54 -070010465 .phys_id = tg3_phys_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010466 .get_ethtool_stats = tg3_get_ethtool_stats,
David S. Miller15f98502005-05-18 22:49:26 -070010467 .get_coalesce = tg3_get_coalesce,
Michael Chand244c892005-07-05 14:42:33 -070010468 .set_coalesce = tg3_set_coalesce,
Jeff Garzikb9f2c042007-10-03 18:07:32 -070010469 .get_sset_count = tg3_get_sset_count,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010470};
10471
10472static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10473{
Michael Chan1b277772006-03-20 22:27:48 -080010474 u32 cursize, val, magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010475
10476 tp->nvram_size = EEPROM_CHIP_SIZE;
10477
Michael Chan18201802006-03-20 22:29:15 -080010478 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010479 return;
10480
Michael Chanb16250e2006-09-27 16:10:14 -070010481 if ((magic != TG3_EEPROM_MAGIC) &&
10482 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10483 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
Linus Torvalds1da177e2005-04-16 15:20:36 -070010484 return;
10485
10486 /*
10487 * Size the chip by reading offsets at increasing powers of two.
10488 * When we encounter our validation signature, we know the addressing
10489 * has wrapped around, and thus have our chip size.
10490 */
Michael Chan1b277772006-03-20 22:27:48 -080010491 cursize = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010492
10493 while (cursize < tp->nvram_size) {
Michael Chan18201802006-03-20 22:29:15 -080010494 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010495 return;
10496
Michael Chan18201802006-03-20 22:29:15 -080010497 if (val == magic)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010498 break;
10499
10500 cursize <<= 1;
10501 }
10502
10503 tp->nvram_size = cursize;
10504}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010505
Linus Torvalds1da177e2005-04-16 15:20:36 -070010506static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10507{
10508 u32 val;
10509
Michael Chan18201802006-03-20 22:29:15 -080010510 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
Michael Chan1b277772006-03-20 22:27:48 -080010511 return;
10512
10513 /* Selfboot format */
Michael Chan18201802006-03-20 22:29:15 -080010514 if (val != TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -080010515 tg3_get_eeprom_size(tp);
10516 return;
10517 }
10518
Linus Torvalds1da177e2005-04-16 15:20:36 -070010519 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10520 if (val != 0) {
10521 tp->nvram_size = (val >> 16) * 1024;
10522 return;
10523 }
10524 }
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010525 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010526}
10527
10528static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10529{
10530 u32 nvcfg1;
10531
10532 nvcfg1 = tr32(NVRAM_CFG1);
10533 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10534 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10535 }
10536 else {
10537 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10538 tw32(NVRAM_CFG1, nvcfg1);
10539 }
10540
Michael Chan4c987482005-09-05 17:52:38 -070010541 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
Michael Chana4e2b342005-10-26 15:46:52 -070010542 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010543 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10544 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10545 tp->nvram_jedecnum = JEDEC_ATMEL;
10546 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10547 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10548 break;
10549 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10550 tp->nvram_jedecnum = JEDEC_ATMEL;
10551 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10552 break;
10553 case FLASH_VENDOR_ATMEL_EEPROM:
10554 tp->nvram_jedecnum = JEDEC_ATMEL;
10555 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10556 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10557 break;
10558 case FLASH_VENDOR_ST:
10559 tp->nvram_jedecnum = JEDEC_ST;
10560 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10561 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10562 break;
10563 case FLASH_VENDOR_SAIFUN:
10564 tp->nvram_jedecnum = JEDEC_SAIFUN;
10565 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10566 break;
10567 case FLASH_VENDOR_SST_SMALL:
10568 case FLASH_VENDOR_SST_LARGE:
10569 tp->nvram_jedecnum = JEDEC_SST;
10570 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10571 break;
10572 }
10573 }
10574 else {
10575 tp->nvram_jedecnum = JEDEC_ATMEL;
10576 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10577 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10578 }
10579}
10580
Michael Chan361b4ac2005-04-21 17:11:21 -070010581static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10582{
10583 u32 nvcfg1;
10584
10585 nvcfg1 = tr32(NVRAM_CFG1);
10586
Michael Chane6af3012005-04-21 17:12:05 -070010587 /* NVRAM protection for TPM */
10588 if (nvcfg1 & (1 << 27))
10589 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10590
Michael Chan361b4ac2005-04-21 17:11:21 -070010591 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10592 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10593 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10594 tp->nvram_jedecnum = JEDEC_ATMEL;
10595 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10596 break;
10597 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10598 tp->nvram_jedecnum = JEDEC_ATMEL;
10599 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10600 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10601 break;
10602 case FLASH_5752VENDOR_ST_M45PE10:
10603 case FLASH_5752VENDOR_ST_M45PE20:
10604 case FLASH_5752VENDOR_ST_M45PE40:
10605 tp->nvram_jedecnum = JEDEC_ST;
10606 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10607 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10608 break;
10609 }
10610
10611 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10612 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10613 case FLASH_5752PAGE_SIZE_256:
10614 tp->nvram_pagesize = 256;
10615 break;
10616 case FLASH_5752PAGE_SIZE_512:
10617 tp->nvram_pagesize = 512;
10618 break;
10619 case FLASH_5752PAGE_SIZE_1K:
10620 tp->nvram_pagesize = 1024;
10621 break;
10622 case FLASH_5752PAGE_SIZE_2K:
10623 tp->nvram_pagesize = 2048;
10624 break;
10625 case FLASH_5752PAGE_SIZE_4K:
10626 tp->nvram_pagesize = 4096;
10627 break;
10628 case FLASH_5752PAGE_SIZE_264:
10629 tp->nvram_pagesize = 264;
10630 break;
10631 }
10632 }
10633 else {
10634 /* For eeprom, set pagesize to maximum eeprom size */
10635 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10636
10637 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10638 tw32(NVRAM_CFG1, nvcfg1);
10639 }
10640}
10641
Michael Chand3c7b882006-03-23 01:28:25 -080010642static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10643{
Matt Carlson989a9d22007-05-05 11:51:05 -070010644 u32 nvcfg1, protect = 0;
Michael Chand3c7b882006-03-23 01:28:25 -080010645
10646 nvcfg1 = tr32(NVRAM_CFG1);
10647
10648 /* NVRAM protection for TPM */
Matt Carlson989a9d22007-05-05 11:51:05 -070010649 if (nvcfg1 & (1 << 27)) {
Michael Chand3c7b882006-03-23 01:28:25 -080010650 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
Matt Carlson989a9d22007-05-05 11:51:05 -070010651 protect = 1;
10652 }
Michael Chand3c7b882006-03-23 01:28:25 -080010653
Matt Carlson989a9d22007-05-05 11:51:05 -070010654 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10655 switch (nvcfg1) {
Michael Chand3c7b882006-03-23 01:28:25 -080010656 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10657 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10658 case FLASH_5755VENDOR_ATMEL_FLASH_3:
Matt Carlson70b65a22007-07-11 19:48:50 -070010659 case FLASH_5755VENDOR_ATMEL_FLASH_5:
Michael Chand3c7b882006-03-23 01:28:25 -080010660 tp->nvram_jedecnum = JEDEC_ATMEL;
10661 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10662 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10663 tp->nvram_pagesize = 264;
Matt Carlson70b65a22007-07-11 19:48:50 -070010664 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10665 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010666 tp->nvram_size = (protect ? 0x3e200 :
10667 TG3_NVRAM_SIZE_512KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010668 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010669 tp->nvram_size = (protect ? 0x1f200 :
10670 TG3_NVRAM_SIZE_256KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010671 else
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010672 tp->nvram_size = (protect ? 0x1f200 :
10673 TG3_NVRAM_SIZE_128KB);
Michael Chand3c7b882006-03-23 01:28:25 -080010674 break;
10675 case FLASH_5752VENDOR_ST_M45PE10:
10676 case FLASH_5752VENDOR_ST_M45PE20:
10677 case FLASH_5752VENDOR_ST_M45PE40:
10678 tp->nvram_jedecnum = JEDEC_ST;
10679 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10680 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10681 tp->nvram_pagesize = 256;
Matt Carlson989a9d22007-05-05 11:51:05 -070010682 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010683 tp->nvram_size = (protect ?
10684 TG3_NVRAM_SIZE_64KB :
10685 TG3_NVRAM_SIZE_128KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010686 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010687 tp->nvram_size = (protect ?
10688 TG3_NVRAM_SIZE_64KB :
10689 TG3_NVRAM_SIZE_256KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010690 else
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010691 tp->nvram_size = (protect ?
10692 TG3_NVRAM_SIZE_128KB :
10693 TG3_NVRAM_SIZE_512KB);
Michael Chand3c7b882006-03-23 01:28:25 -080010694 break;
10695 }
10696}
10697
Michael Chan1b277772006-03-20 22:27:48 -080010698static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10699{
10700 u32 nvcfg1;
10701
10702 nvcfg1 = tr32(NVRAM_CFG1);
10703
10704 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10705 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10706 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10707 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10708 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10709 tp->nvram_jedecnum = JEDEC_ATMEL;
10710 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10711 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10712
10713 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10714 tw32(NVRAM_CFG1, nvcfg1);
10715 break;
10716 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10717 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10718 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10719 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10720 tp->nvram_jedecnum = JEDEC_ATMEL;
10721 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10722 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10723 tp->nvram_pagesize = 264;
10724 break;
10725 case FLASH_5752VENDOR_ST_M45PE10:
10726 case FLASH_5752VENDOR_ST_M45PE20:
10727 case FLASH_5752VENDOR_ST_M45PE40:
10728 tp->nvram_jedecnum = JEDEC_ST;
10729 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10730 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10731 tp->nvram_pagesize = 256;
10732 break;
10733 }
10734}
10735
Matt Carlson6b91fa02007-10-10 18:01:09 -070010736static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10737{
10738 u32 nvcfg1, protect = 0;
10739
10740 nvcfg1 = tr32(NVRAM_CFG1);
10741
10742 /* NVRAM protection for TPM */
10743 if (nvcfg1 & (1 << 27)) {
10744 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10745 protect = 1;
10746 }
10747
10748 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10749 switch (nvcfg1) {
10750 case FLASH_5761VENDOR_ATMEL_ADB021D:
10751 case FLASH_5761VENDOR_ATMEL_ADB041D:
10752 case FLASH_5761VENDOR_ATMEL_ADB081D:
10753 case FLASH_5761VENDOR_ATMEL_ADB161D:
10754 case FLASH_5761VENDOR_ATMEL_MDB021D:
10755 case FLASH_5761VENDOR_ATMEL_MDB041D:
10756 case FLASH_5761VENDOR_ATMEL_MDB081D:
10757 case FLASH_5761VENDOR_ATMEL_MDB161D:
10758 tp->nvram_jedecnum = JEDEC_ATMEL;
10759 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10760 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10761 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10762 tp->nvram_pagesize = 256;
10763 break;
10764 case FLASH_5761VENDOR_ST_A_M45PE20:
10765 case FLASH_5761VENDOR_ST_A_M45PE40:
10766 case FLASH_5761VENDOR_ST_A_M45PE80:
10767 case FLASH_5761VENDOR_ST_A_M45PE16:
10768 case FLASH_5761VENDOR_ST_M_M45PE20:
10769 case FLASH_5761VENDOR_ST_M_M45PE40:
10770 case FLASH_5761VENDOR_ST_M_M45PE80:
10771 case FLASH_5761VENDOR_ST_M_M45PE16:
10772 tp->nvram_jedecnum = JEDEC_ST;
10773 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10774 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10775 tp->nvram_pagesize = 256;
10776 break;
10777 }
10778
10779 if (protect) {
10780 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10781 } else {
10782 switch (nvcfg1) {
10783 case FLASH_5761VENDOR_ATMEL_ADB161D:
10784 case FLASH_5761VENDOR_ATMEL_MDB161D:
10785 case FLASH_5761VENDOR_ST_A_M45PE16:
10786 case FLASH_5761VENDOR_ST_M_M45PE16:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010787 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010788 break;
10789 case FLASH_5761VENDOR_ATMEL_ADB081D:
10790 case FLASH_5761VENDOR_ATMEL_MDB081D:
10791 case FLASH_5761VENDOR_ST_A_M45PE80:
10792 case FLASH_5761VENDOR_ST_M_M45PE80:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010793 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010794 break;
10795 case FLASH_5761VENDOR_ATMEL_ADB041D:
10796 case FLASH_5761VENDOR_ATMEL_MDB041D:
10797 case FLASH_5761VENDOR_ST_A_M45PE40:
10798 case FLASH_5761VENDOR_ST_M_M45PE40:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010799 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010800 break;
10801 case FLASH_5761VENDOR_ATMEL_ADB021D:
10802 case FLASH_5761VENDOR_ATMEL_MDB021D:
10803 case FLASH_5761VENDOR_ST_A_M45PE20:
10804 case FLASH_5761VENDOR_ST_M_M45PE20:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010805 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010806 break;
10807 }
10808 }
10809}
10810
Michael Chanb5d37722006-09-27 16:06:21 -070010811static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10812{
10813 tp->nvram_jedecnum = JEDEC_ATMEL;
10814 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10815 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10816}
10817
Linus Torvalds1da177e2005-04-16 15:20:36 -070010818/* Chips other than 5700/5701 use the NVRAM for fetching info. */
10819static void __devinit tg3_nvram_init(struct tg3 *tp)
10820{
Linus Torvalds1da177e2005-04-16 15:20:36 -070010821 tw32_f(GRC_EEPROM_ADDR,
10822 (EEPROM_ADDR_FSM_RESET |
10823 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10824 EEPROM_ADDR_CLKPERD_SHIFT)));
10825
Michael Chan9d57f012006-12-07 00:23:25 -080010826 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010827
10828 /* Enable seeprom accesses. */
10829 tw32_f(GRC_LOCAL_CTRL,
10830 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10831 udelay(100);
10832
10833 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10834 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10835 tp->tg3_flags |= TG3_FLAG_NVRAM;
10836
Michael Chanec41c7d2006-01-17 02:40:55 -080010837 if (tg3_nvram_lock(tp)) {
10838 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10839 "tg3_nvram_init failed.\n", tp->dev->name);
10840 return;
10841 }
Michael Chane6af3012005-04-21 17:12:05 -070010842 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010843
Matt Carlson989a9d22007-05-05 11:51:05 -070010844 tp->nvram_size = 0;
10845
Michael Chan361b4ac2005-04-21 17:11:21 -070010846 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10847 tg3_get_5752_nvram_info(tp);
Michael Chand3c7b882006-03-23 01:28:25 -080010848 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10849 tg3_get_5755_nvram_info(tp);
Matt Carlsond30cdd22007-10-07 23:28:35 -070010850 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010851 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10852 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan1b277772006-03-20 22:27:48 -080010853 tg3_get_5787_nvram_info(tp);
Matt Carlson6b91fa02007-10-10 18:01:09 -070010854 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10855 tg3_get_5761_nvram_info(tp);
Michael Chanb5d37722006-09-27 16:06:21 -070010856 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10857 tg3_get_5906_nvram_info(tp);
Michael Chan361b4ac2005-04-21 17:11:21 -070010858 else
10859 tg3_get_nvram_info(tp);
10860
Matt Carlson989a9d22007-05-05 11:51:05 -070010861 if (tp->nvram_size == 0)
10862 tg3_get_nvram_size(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010863
Michael Chane6af3012005-04-21 17:12:05 -070010864 tg3_disable_nvram_access(tp);
Michael Chan381291b2005-12-13 21:08:21 -080010865 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010866
10867 } else {
10868 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10869
10870 tg3_get_eeprom_size(tp);
10871 }
10872}
10873
10874static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10875 u32 offset, u32 *val)
10876{
10877 u32 tmp;
10878 int i;
10879
10880 if (offset > EEPROM_ADDR_ADDR_MASK ||
10881 (offset % 4) != 0)
10882 return -EINVAL;
10883
10884 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10885 EEPROM_ADDR_DEVID_MASK |
10886 EEPROM_ADDR_READ);
10887 tw32(GRC_EEPROM_ADDR,
10888 tmp |
10889 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10890 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10891 EEPROM_ADDR_ADDR_MASK) |
10892 EEPROM_ADDR_READ | EEPROM_ADDR_START);
10893
Michael Chan9d57f012006-12-07 00:23:25 -080010894 for (i = 0; i < 1000; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010895 tmp = tr32(GRC_EEPROM_ADDR);
10896
10897 if (tmp & EEPROM_ADDR_COMPLETE)
10898 break;
Michael Chan9d57f012006-12-07 00:23:25 -080010899 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010900 }
10901 if (!(tmp & EEPROM_ADDR_COMPLETE))
10902 return -EBUSY;
10903
10904 *val = tr32(GRC_EEPROM_DATA);
10905 return 0;
10906}
10907
10908#define NVRAM_CMD_TIMEOUT 10000
10909
10910static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10911{
10912 int i;
10913
10914 tw32(NVRAM_CMD, nvram_cmd);
10915 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10916 udelay(10);
10917 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10918 udelay(10);
10919 break;
10920 }
10921 }
10922 if (i == NVRAM_CMD_TIMEOUT) {
10923 return -EBUSY;
10924 }
10925 return 0;
10926}
10927
Michael Chan18201802006-03-20 22:29:15 -080010928static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10929{
10930 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10931 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10932 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
Matt Carlson6b91fa02007-10-10 18:01:09 -070010933 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
Michael Chan18201802006-03-20 22:29:15 -080010934 (tp->nvram_jedecnum == JEDEC_ATMEL))
10935
10936 addr = ((addr / tp->nvram_pagesize) <<
10937 ATMEL_AT45DB0X1B_PAGE_POS) +
10938 (addr % tp->nvram_pagesize);
10939
10940 return addr;
10941}
10942
Michael Chanc4e65752006-03-20 22:29:32 -080010943static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10944{
10945 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10946 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10947 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
Matt Carlson6b91fa02007-10-10 18:01:09 -070010948 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
Michael Chanc4e65752006-03-20 22:29:32 -080010949 (tp->nvram_jedecnum == JEDEC_ATMEL))
10950
10951 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10952 tp->nvram_pagesize) +
10953 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10954
10955 return addr;
10956}
10957
Linus Torvalds1da177e2005-04-16 15:20:36 -070010958static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10959{
10960 int ret;
10961
Linus Torvalds1da177e2005-04-16 15:20:36 -070010962 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10963 return tg3_nvram_read_using_eeprom(tp, offset, val);
10964
Michael Chan18201802006-03-20 22:29:15 -080010965 offset = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010966
10967 if (offset > NVRAM_ADDR_MSK)
10968 return -EINVAL;
10969
Michael Chanec41c7d2006-01-17 02:40:55 -080010970 ret = tg3_nvram_lock(tp);
10971 if (ret)
10972 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010973
Michael Chane6af3012005-04-21 17:12:05 -070010974 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010975
10976 tw32(NVRAM_ADDR, offset);
10977 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10978 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10979
10980 if (ret == 0)
10981 *val = swab32(tr32(NVRAM_RDDATA));
10982
Michael Chane6af3012005-04-21 17:12:05 -070010983 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010984
Michael Chan381291b2005-12-13 21:08:21 -080010985 tg3_nvram_unlock(tp);
10986
Linus Torvalds1da177e2005-04-16 15:20:36 -070010987 return ret;
10988}
10989
Al Virob9fc7dc2007-12-17 22:59:57 -080010990static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10991{
10992 u32 v;
10993 int res = tg3_nvram_read(tp, offset, &v);
10994 if (!res)
10995 *val = cpu_to_le32(v);
10996 return res;
10997}
10998
Michael Chan18201802006-03-20 22:29:15 -080010999static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
11000{
11001 int err;
11002 u32 tmp;
11003
11004 err = tg3_nvram_read(tp, offset, &tmp);
11005 *val = swab32(tmp);
11006 return err;
11007}
11008
Linus Torvalds1da177e2005-04-16 15:20:36 -070011009static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11010 u32 offset, u32 len, u8 *buf)
11011{
11012 int i, j, rc = 0;
11013 u32 val;
11014
11015 for (i = 0; i < len; i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011016 u32 addr;
11017 __le32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011018
11019 addr = offset + i;
11020
11021 memcpy(&data, buf + i, 4);
11022
Al Virob9fc7dc2007-12-17 22:59:57 -080011023 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011024
11025 val = tr32(GRC_EEPROM_ADDR);
11026 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11027
11028 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11029 EEPROM_ADDR_READ);
11030 tw32(GRC_EEPROM_ADDR, val |
11031 (0 << EEPROM_ADDR_DEVID_SHIFT) |
11032 (addr & EEPROM_ADDR_ADDR_MASK) |
11033 EEPROM_ADDR_START |
11034 EEPROM_ADDR_WRITE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011035
Michael Chan9d57f012006-12-07 00:23:25 -080011036 for (j = 0; j < 1000; j++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011037 val = tr32(GRC_EEPROM_ADDR);
11038
11039 if (val & EEPROM_ADDR_COMPLETE)
11040 break;
Michael Chan9d57f012006-12-07 00:23:25 -080011041 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011042 }
11043 if (!(val & EEPROM_ADDR_COMPLETE)) {
11044 rc = -EBUSY;
11045 break;
11046 }
11047 }
11048
11049 return rc;
11050}
11051
11052/* offset and length are dword aligned */
11053static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11054 u8 *buf)
11055{
11056 int ret = 0;
11057 u32 pagesize = tp->nvram_pagesize;
11058 u32 pagemask = pagesize - 1;
11059 u32 nvram_cmd;
11060 u8 *tmp;
11061
11062 tmp = kmalloc(pagesize, GFP_KERNEL);
11063 if (tmp == NULL)
11064 return -ENOMEM;
11065
11066 while (len) {
11067 int j;
Michael Chane6af3012005-04-21 17:12:05 -070011068 u32 phy_addr, page_off, size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011069
11070 phy_addr = offset & ~pagemask;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011071
Linus Torvalds1da177e2005-04-16 15:20:36 -070011072 for (j = 0; j < pagesize; j += 4) {
Al Viro286e3102007-12-17 23:00:31 -080011073 if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
Al Virob9fc7dc2007-12-17 22:59:57 -080011074 (__le32 *) (tmp + j))))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011075 break;
11076 }
11077 if (ret)
11078 break;
11079
11080 page_off = offset & pagemask;
11081 size = pagesize;
11082 if (len < size)
11083 size = len;
11084
11085 len -= size;
11086
11087 memcpy(tmp + page_off, buf, size);
11088
11089 offset = offset + (pagesize - page_off);
11090
Michael Chane6af3012005-04-21 17:12:05 -070011091 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011092
11093 /*
11094 * Before we can erase the flash page, we need
11095 * to issue a special "write enable" command.
11096 */
11097 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11098
11099 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11100 break;
11101
11102 /* Erase the target page */
11103 tw32(NVRAM_ADDR, phy_addr);
11104
11105 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11106 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11107
11108 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11109 break;
11110
11111 /* Issue another write enable to start the write. */
11112 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11113
11114 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11115 break;
11116
11117 for (j = 0; j < pagesize; j += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011118 __be32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011119
Al Virob9fc7dc2007-12-17 22:59:57 -080011120 data = *((__be32 *) (tmp + j));
11121 /* swab32(le32_to_cpu(data)), actually */
11122 tw32(NVRAM_WRDATA, be32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011123
11124 tw32(NVRAM_ADDR, phy_addr + j);
11125
11126 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11127 NVRAM_CMD_WR;
11128
11129 if (j == 0)
11130 nvram_cmd |= NVRAM_CMD_FIRST;
11131 else if (j == (pagesize - 4))
11132 nvram_cmd |= NVRAM_CMD_LAST;
11133
11134 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11135 break;
11136 }
11137 if (ret)
11138 break;
11139 }
11140
11141 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11142 tg3_nvram_exec_cmd(tp, nvram_cmd);
11143
11144 kfree(tmp);
11145
11146 return ret;
11147}
11148
11149/* offset and length are dword aligned */
11150static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11151 u8 *buf)
11152{
11153 int i, ret = 0;
11154
11155 for (i = 0; i < len; i += 4, offset += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011156 u32 page_off, phy_addr, nvram_cmd;
11157 __be32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011158
11159 memcpy(&data, buf + i, 4);
Al Virob9fc7dc2007-12-17 22:59:57 -080011160 tw32(NVRAM_WRDATA, be32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011161
11162 page_off = offset % tp->nvram_pagesize;
11163
Michael Chan18201802006-03-20 22:29:15 -080011164 phy_addr = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011165
11166 tw32(NVRAM_ADDR, phy_addr);
11167
11168 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11169
11170 if ((page_off == 0) || (i == 0))
11171 nvram_cmd |= NVRAM_CMD_FIRST;
Michael Chanf6d9a252006-04-29 19:00:24 -070011172 if (page_off == (tp->nvram_pagesize - 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011173 nvram_cmd |= NVRAM_CMD_LAST;
11174
11175 if (i == (len - 4))
11176 nvram_cmd |= NVRAM_CMD_LAST;
11177
Michael Chan4c987482005-09-05 17:52:38 -070011178 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
Michael Chanaf36e6b2006-03-23 01:28:06 -080011179 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
Michael Chan1b277772006-03-20 22:27:48 -080011180 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
Matt Carlsond30cdd22007-10-07 23:28:35 -070011181 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
Matt Carlson9936bcf2007-10-10 18:03:07 -070011182 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
Matt Carlson57e69832008-05-25 23:48:31 -070011183 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
Michael Chan4c987482005-09-05 17:52:38 -070011184 (tp->nvram_jedecnum == JEDEC_ST) &&
11185 (nvram_cmd & NVRAM_CMD_FIRST)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011186
11187 if ((ret = tg3_nvram_exec_cmd(tp,
11188 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11189 NVRAM_CMD_DONE)))
11190
11191 break;
11192 }
11193 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11194 /* We always do complete word writes to eeprom. */
11195 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11196 }
11197
11198 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11199 break;
11200 }
11201 return ret;
11202}
11203
11204/* offset and length are dword aligned */
11205static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11206{
11207 int ret;
11208
Linus Torvalds1da177e2005-04-16 15:20:36 -070011209 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -070011210 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11211 ~GRC_LCLCTRL_GPIO_OUTPUT1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011212 udelay(40);
11213 }
11214
11215 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11216 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11217 }
11218 else {
11219 u32 grc_mode;
11220
Michael Chanec41c7d2006-01-17 02:40:55 -080011221 ret = tg3_nvram_lock(tp);
11222 if (ret)
11223 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011224
Michael Chane6af3012005-04-21 17:12:05 -070011225 tg3_enable_nvram_access(tp);
11226 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11227 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011228 tw32(NVRAM_WRITE1, 0x406);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011229
11230 grc_mode = tr32(GRC_MODE);
11231 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11232
11233 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11234 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11235
11236 ret = tg3_nvram_write_block_buffered(tp, offset, len,
11237 buf);
11238 }
11239 else {
11240 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11241 buf);
11242 }
11243
11244 grc_mode = tr32(GRC_MODE);
11245 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11246
Michael Chane6af3012005-04-21 17:12:05 -070011247 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011248 tg3_nvram_unlock(tp);
11249 }
11250
11251 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -070011252 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011253 udelay(40);
11254 }
11255
11256 return ret;
11257}
11258
11259struct subsys_tbl_ent {
11260 u16 subsys_vendor, subsys_devid;
11261 u32 phy_id;
11262};
11263
11264static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11265 /* Broadcom boards. */
11266 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11267 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11268 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11269 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
11270 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11271 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11272 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
11273 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11274 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11275 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11276 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11277
11278 /* 3com boards. */
11279 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11280 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11281 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
11282 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11283 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11284
11285 /* DELL boards. */
11286 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11287 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11288 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11289 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11290
11291 /* Compaq boards. */
11292 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11293 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11294 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
11295 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11296 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11297
11298 /* IBM boards. */
11299 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11300};
11301
11302static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11303{
11304 int i;
11305
11306 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11307 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11308 tp->pdev->subsystem_vendor) &&
11309 (subsys_id_to_phy_id[i].subsys_devid ==
11310 tp->pdev->subsystem_device))
11311 return &subsys_id_to_phy_id[i];
11312 }
11313 return NULL;
11314}
11315
Michael Chan7d0c41e2005-04-21 17:06:20 -070011316static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011317{
Linus Torvalds1da177e2005-04-16 15:20:36 -070011318 u32 val;
Michael Chancaf636c72006-03-22 01:05:31 -080011319 u16 pmcsr;
11320
11321 /* On some early chips the SRAM cannot be accessed in D3hot state,
11322 * so need make sure we're in D0.
11323 */
11324 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11325 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11326 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11327 msleep(1);
Michael Chan7d0c41e2005-04-21 17:06:20 -070011328
11329 /* Make sure register accesses (indirect or otherwise)
11330 * will function correctly.
11331 */
11332 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11333 tp->misc_host_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011334
David S. Millerf49639e2006-06-09 11:58:36 -070011335 /* The memory arbiter has to be enabled in order for SRAM accesses
11336 * to succeed. Normally on powerup the tg3 chip firmware will make
11337 * sure it is enabled, but other entities such as system netboot
11338 * code might disable it.
11339 */
11340 val = tr32(MEMARB_MODE);
11341 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11342
Linus Torvalds1da177e2005-04-16 15:20:36 -070011343 tp->phy_id = PHY_ID_INVALID;
Michael Chan7d0c41e2005-04-21 17:06:20 -070011344 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11345
Gary Zambranoa85feb82007-05-05 11:52:19 -070011346 /* Assume an onboard device and WOL capable by default. */
11347 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
David S. Miller72b845e2006-03-14 14:11:48 -080011348
Michael Chanb5d37722006-09-27 16:06:21 -070011349 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan9d26e212006-12-07 00:21:14 -080011350 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
Michael Chanb5d37722006-09-27 16:06:21 -070011351 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011352 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11353 }
Matt Carlson0527ba32007-10-10 18:03:30 -070011354 val = tr32(VCPU_CFGSHDW);
11355 if (val & VCPU_CFGSHDW_ASPM_DBNC)
Matt Carlson8ed5d972007-05-07 00:25:49 -070011356 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
Matt Carlson0527ba32007-10-10 18:03:30 -070011357 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070011358 (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11359 device_may_wakeup(&tp->pdev->dev))
Matt Carlson0527ba32007-10-10 18:03:30 -070011360 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
Matt Carlson05ac4cb2008-11-03 16:53:46 -080011361 goto done;
Michael Chanb5d37722006-09-27 16:06:21 -070011362 }
11363
Linus Torvalds1da177e2005-04-16 15:20:36 -070011364 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11365 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11366 u32 nic_cfg, led_cfg;
Matt Carlsona9daf362008-05-25 23:49:44 -070011367 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
Michael Chan7d0c41e2005-04-21 17:06:20 -070011368 int eeprom_phy_serdes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011369
11370 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11371 tp->nic_sram_data_cfg = nic_cfg;
11372
11373 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11374 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11375 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11376 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11377 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11378 (ver > 0) && (ver < 0x100))
11379 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11380
Matt Carlsona9daf362008-05-25 23:49:44 -070011381 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11382 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11383
Linus Torvalds1da177e2005-04-16 15:20:36 -070011384 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11385 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11386 eeprom_phy_serdes = 1;
11387
11388 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11389 if (nic_phy_id != 0) {
11390 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11391 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11392
11393 eeprom_phy_id = (id1 >> 16) << 10;
11394 eeprom_phy_id |= (id2 & 0xfc00) << 16;
11395 eeprom_phy_id |= (id2 & 0x03ff) << 0;
11396 } else
11397 eeprom_phy_id = 0;
11398
Michael Chan7d0c41e2005-04-21 17:06:20 -070011399 tp->phy_id = eeprom_phy_id;
Michael Chan747e8f82005-07-25 12:33:22 -070011400 if (eeprom_phy_serdes) {
Michael Chana4e2b342005-10-26 15:46:52 -070011401 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan747e8f82005-07-25 12:33:22 -070011402 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11403 else
11404 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11405 }
Michael Chan7d0c41e2005-04-21 17:06:20 -070011406
John W. Linvillecbf46852005-04-21 17:01:29 -070011407 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011408 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11409 SHASTA_EXT_LED_MODE_MASK);
John W. Linvillecbf46852005-04-21 17:01:29 -070011410 else
Linus Torvalds1da177e2005-04-16 15:20:36 -070011411 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11412
11413 switch (led_cfg) {
11414 default:
11415 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11416 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11417 break;
11418
11419 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11420 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11421 break;
11422
11423 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11424 tp->led_ctrl = LED_CTRL_MODE_MAC;
Michael Chan9ba27792005-06-06 15:16:20 -070011425
11426 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11427 * read on some older 5700/5701 bootcode.
11428 */
11429 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11430 ASIC_REV_5700 ||
11431 GET_ASIC_REV(tp->pci_chip_rev_id) ==
11432 ASIC_REV_5701)
11433 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11434
Linus Torvalds1da177e2005-04-16 15:20:36 -070011435 break;
11436
11437 case SHASTA_EXT_LED_SHARED:
11438 tp->led_ctrl = LED_CTRL_MODE_SHARED;
11439 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11440 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11441 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11442 LED_CTRL_MODE_PHY_2);
11443 break;
11444
11445 case SHASTA_EXT_LED_MAC:
11446 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11447 break;
11448
11449 case SHASTA_EXT_LED_COMBO:
11450 tp->led_ctrl = LED_CTRL_MODE_COMBO;
11451 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11452 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11453 LED_CTRL_MODE_PHY_2);
11454 break;
11455
Stephen Hemminger855e1112008-04-16 16:37:28 -070011456 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011457
11458 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11459 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11460 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11461 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11462
Matt Carlsonb2a5c192008-04-03 21:44:44 -070011463 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11464 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
Matt Carlson5f608912007-11-12 21:17:07 -080011465
Michael Chan9d26e212006-12-07 00:21:14 -080011466 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011467 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011468 if ((tp->pdev->subsystem_vendor ==
11469 PCI_VENDOR_ID_ARIMA) &&
11470 (tp->pdev->subsystem_device == 0x205a ||
11471 tp->pdev->subsystem_device == 0x2063))
11472 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11473 } else {
David S. Millerf49639e2006-06-09 11:58:36 -070011474 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011475 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11476 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011477
11478 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11479 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
John W. Linvillecbf46852005-04-21 17:01:29 -070011480 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011481 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11482 }
Matt Carlsonb2b98d42008-11-03 16:52:32 -080011483
11484 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11485 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
Matt Carlson0d3031d2007-10-10 18:02:43 -070011486 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
Matt Carlsonb2b98d42008-11-03 16:52:32 -080011487
Gary Zambranoa85feb82007-05-05 11:52:19 -070011488 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11489 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11490 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011491
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070011492 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
Matt Carlson05ac4cb2008-11-03 16:53:46 -080011493 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
Matt Carlson0527ba32007-10-10 18:03:30 -070011494 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11495
Linus Torvalds1da177e2005-04-16 15:20:36 -070011496 if (cfg2 & (1 << 17))
11497 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11498
11499 /* serdes signal pre-emphasis in register 0x590 set by */
11500 /* bootcode if bit 18 is set */
11501 if (cfg2 & (1 << 18))
11502 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
Matt Carlson8ed5d972007-05-07 00:25:49 -070011503
11504 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11505 u32 cfg3;
11506
11507 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11508 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11509 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11510 }
Matt Carlsona9daf362008-05-25 23:49:44 -070011511
11512 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11513 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11514 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11515 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11516 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11517 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011518 }
Matt Carlson05ac4cb2008-11-03 16:53:46 -080011519done:
11520 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
11521 device_set_wakeup_enable(&tp->pdev->dev,
11522 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
Michael Chan7d0c41e2005-04-21 17:06:20 -070011523}
11524
Matt Carlsonb2a5c192008-04-03 21:44:44 -070011525static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11526{
11527 int i;
11528 u32 val;
11529
11530 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11531 tw32(OTP_CTRL, cmd);
11532
11533 /* Wait for up to 1 ms for command to execute. */
11534 for (i = 0; i < 100; i++) {
11535 val = tr32(OTP_STATUS);
11536 if (val & OTP_STATUS_CMD_DONE)
11537 break;
11538 udelay(10);
11539 }
11540
11541 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11542}
11543
11544/* Read the gphy configuration from the OTP region of the chip. The gphy
11545 * configuration is a 32-bit value that straddles the alignment boundary.
11546 * We do two 32-bit reads and then shift and merge the results.
11547 */
11548static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11549{
11550 u32 bhalf_otp, thalf_otp;
11551
11552 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11553
11554 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11555 return 0;
11556
11557 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11558
11559 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11560 return 0;
11561
11562 thalf_otp = tr32(OTP_READ_DATA);
11563
11564 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11565
11566 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11567 return 0;
11568
11569 bhalf_otp = tr32(OTP_READ_DATA);
11570
11571 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11572}
11573
Michael Chan7d0c41e2005-04-21 17:06:20 -070011574static int __devinit tg3_phy_probe(struct tg3 *tp)
11575{
11576 u32 hw_phy_id_1, hw_phy_id_2;
11577 u32 hw_phy_id, hw_phy_id_masked;
11578 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011579
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011580 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11581 return tg3_phy_init(tp);
11582
Linus Torvalds1da177e2005-04-16 15:20:36 -070011583 /* Reading the PHY ID register can conflict with ASF
11584 * firwmare access to the PHY hardware.
11585 */
11586 err = 0;
Matt Carlson0d3031d2007-10-10 18:02:43 -070011587 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11588 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011589 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11590 } else {
11591 /* Now read the physical PHY_ID from the chip and verify
11592 * that it is sane. If it doesn't look good, we fall back
11593 * to either the hard-coded table based PHY_ID and failing
11594 * that the value found in the eeprom area.
11595 */
11596 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11597 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11598
11599 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
11600 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11601 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
11602
11603 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11604 }
11605
11606 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11607 tp->phy_id = hw_phy_id;
11608 if (hw_phy_id_masked == PHY_ID_BCM8002)
11609 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
Michael Chanda6b2d02005-08-19 12:54:29 -070011610 else
11611 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011612 } else {
Michael Chan7d0c41e2005-04-21 17:06:20 -070011613 if (tp->phy_id != PHY_ID_INVALID) {
11614 /* Do nothing, phy ID already set up in
11615 * tg3_get_eeprom_hw_cfg().
11616 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070011617 } else {
11618 struct subsys_tbl_ent *p;
11619
11620 /* No eeprom signature? Try the hardcoded
11621 * subsys device table.
11622 */
11623 p = lookup_by_subsys(tp);
11624 if (!p)
11625 return -ENODEV;
11626
11627 tp->phy_id = p->phy_id;
11628 if (!tp->phy_id ||
11629 tp->phy_id == PHY_ID_BCM8002)
11630 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11631 }
11632 }
11633
Michael Chan747e8f82005-07-25 12:33:22 -070011634 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
Matt Carlson0d3031d2007-10-10 18:02:43 -070011635 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070011636 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chan3600d912006-12-07 00:21:48 -080011637 u32 bmsr, adv_reg, tg3_ctrl, mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011638
11639 tg3_readphy(tp, MII_BMSR, &bmsr);
11640 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11641 (bmsr & BMSR_LSTATUS))
11642 goto skip_phy_reset;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011643
Linus Torvalds1da177e2005-04-16 15:20:36 -070011644 err = tg3_phy_reset(tp);
11645 if (err)
11646 return err;
11647
11648 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11649 ADVERTISE_100HALF | ADVERTISE_100FULL |
11650 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11651 tg3_ctrl = 0;
11652 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11653 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11654 MII_TG3_CTRL_ADV_1000_FULL);
11655 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11656 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11657 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11658 MII_TG3_CTRL_ENABLE_AS_MASTER);
11659 }
11660
Michael Chan3600d912006-12-07 00:21:48 -080011661 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11662 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11663 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11664 if (!tg3_copper_is_advertising_all(tp, mask)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011665 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11666
11667 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11668 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11669
11670 tg3_writephy(tp, MII_BMCR,
11671 BMCR_ANENABLE | BMCR_ANRESTART);
11672 }
11673 tg3_phy_set_wirespeed(tp);
11674
11675 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11676 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11677 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11678 }
11679
11680skip_phy_reset:
11681 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11682 err = tg3_init_5401phy_dsp(tp);
11683 if (err)
11684 return err;
11685 }
11686
11687 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11688 err = tg3_init_5401phy_dsp(tp);
11689 }
11690
Michael Chan747e8f82005-07-25 12:33:22 -070011691 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011692 tp->link_config.advertising =
11693 (ADVERTISED_1000baseT_Half |
11694 ADVERTISED_1000baseT_Full |
11695 ADVERTISED_Autoneg |
11696 ADVERTISED_FIBRE);
11697 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11698 tp->link_config.advertising &=
11699 ~(ADVERTISED_1000baseT_Half |
11700 ADVERTISED_1000baseT_Full);
11701
11702 return err;
11703}
11704
11705static void __devinit tg3_read_partno(struct tg3 *tp)
11706{
11707 unsigned char vpd_data[256];
Michael Chanaf2c6a42006-11-07 14:57:51 -080011708 unsigned int i;
Michael Chan1b277772006-03-20 22:27:48 -080011709 u32 magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011710
Michael Chan18201802006-03-20 22:29:15 -080011711 if (tg3_nvram_read_swab(tp, 0x0, &magic))
David S. Millerf49639e2006-06-09 11:58:36 -070011712 goto out_not_found;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011713
Michael Chan18201802006-03-20 22:29:15 -080011714 if (magic == TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -080011715 for (i = 0; i < 256; i += 4) {
11716 u32 tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011717
Michael Chan1b277772006-03-20 22:27:48 -080011718 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11719 goto out_not_found;
11720
11721 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
11722 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
11723 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11724 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11725 }
11726 } else {
11727 int vpd_cap;
11728
11729 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11730 for (i = 0; i < 256; i += 4) {
11731 u32 tmp, j = 0;
Al Virob9fc7dc2007-12-17 22:59:57 -080011732 __le32 v;
Michael Chan1b277772006-03-20 22:27:48 -080011733 u16 tmp16;
11734
11735 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11736 i);
11737 while (j++ < 100) {
11738 pci_read_config_word(tp->pdev, vpd_cap +
11739 PCI_VPD_ADDR, &tmp16);
11740 if (tmp16 & 0x8000)
11741 break;
11742 msleep(1);
11743 }
David S. Millerf49639e2006-06-09 11:58:36 -070011744 if (!(tmp16 & 0x8000))
11745 goto out_not_found;
11746
Michael Chan1b277772006-03-20 22:27:48 -080011747 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11748 &tmp);
Al Virob9fc7dc2007-12-17 22:59:57 -080011749 v = cpu_to_le32(tmp);
11750 memcpy(&vpd_data[i], &v, 4);
Michael Chan1b277772006-03-20 22:27:48 -080011751 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011752 }
11753
11754 /* Now parse and find the part number. */
Michael Chanaf2c6a42006-11-07 14:57:51 -080011755 for (i = 0; i < 254; ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011756 unsigned char val = vpd_data[i];
Michael Chanaf2c6a42006-11-07 14:57:51 -080011757 unsigned int block_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011758
11759 if (val == 0x82 || val == 0x91) {
11760 i = (i + 3 +
11761 (vpd_data[i + 1] +
11762 (vpd_data[i + 2] << 8)));
11763 continue;
11764 }
11765
11766 if (val != 0x90)
11767 goto out_not_found;
11768
11769 block_end = (i + 3 +
11770 (vpd_data[i + 1] +
11771 (vpd_data[i + 2] << 8)));
11772 i += 3;
Michael Chanaf2c6a42006-11-07 14:57:51 -080011773
11774 if (block_end > 256)
11775 goto out_not_found;
11776
11777 while (i < (block_end - 2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011778 if (vpd_data[i + 0] == 'P' &&
11779 vpd_data[i + 1] == 'N') {
11780 int partno_len = vpd_data[i + 2];
11781
Michael Chanaf2c6a42006-11-07 14:57:51 -080011782 i += 3;
11783 if (partno_len > 24 || (partno_len + i) > 256)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011784 goto out_not_found;
11785
11786 memcpy(tp->board_part_number,
Michael Chanaf2c6a42006-11-07 14:57:51 -080011787 &vpd_data[i], partno_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011788
11789 /* Success. */
11790 return;
11791 }
Michael Chanaf2c6a42006-11-07 14:57:51 -080011792 i += 3 + vpd_data[i + 2];
Linus Torvalds1da177e2005-04-16 15:20:36 -070011793 }
11794
11795 /* Part number not found. */
11796 goto out_not_found;
11797 }
11798
11799out_not_found:
Michael Chanb5d37722006-09-27 16:06:21 -070011800 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11801 strcpy(tp->board_part_number, "BCM95906");
11802 else
11803 strcpy(tp->board_part_number, "none");
Linus Torvalds1da177e2005-04-16 15:20:36 -070011804}
11805
Matt Carlson9c8a6202007-10-21 16:16:08 -070011806static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11807{
11808 u32 val;
11809
11810 if (tg3_nvram_read_swab(tp, offset, &val) ||
11811 (val & 0xfc000000) != 0x0c000000 ||
11812 tg3_nvram_read_swab(tp, offset + 4, &val) ||
11813 val != 0)
11814 return 0;
11815
11816 return 1;
11817}
11818
Michael Chanc4e65752006-03-20 22:29:32 -080011819static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11820{
11821 u32 val, offset, start;
Matt Carlson9c8a6202007-10-21 16:16:08 -070011822 u32 ver_offset;
11823 int i, bcnt;
Michael Chanc4e65752006-03-20 22:29:32 -080011824
11825 if (tg3_nvram_read_swab(tp, 0, &val))
11826 return;
11827
11828 if (val != TG3_EEPROM_MAGIC)
11829 return;
11830
11831 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11832 tg3_nvram_read_swab(tp, 0x4, &start))
11833 return;
11834
11835 offset = tg3_nvram_logical_addr(tp, offset);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011836
11837 if (!tg3_fw_img_is_valid(tp, offset) ||
11838 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
Michael Chanc4e65752006-03-20 22:29:32 -080011839 return;
11840
Matt Carlson9c8a6202007-10-21 16:16:08 -070011841 offset = offset + ver_offset - start;
11842 for (i = 0; i < 16; i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011843 __le32 v;
11844 if (tg3_nvram_read_le(tp, offset + i, &v))
Michael Chanc4e65752006-03-20 22:29:32 -080011845 return;
11846
Al Virob9fc7dc2007-12-17 22:59:57 -080011847 memcpy(tp->fw_ver + i, &v, 4);
Michael Chanc4e65752006-03-20 22:29:32 -080011848 }
Matt Carlson9c8a6202007-10-21 16:16:08 -070011849
11850 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
Matt Carlson84af67f2007-11-12 21:08:59 -080011851 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
Matt Carlson9c8a6202007-10-21 16:16:08 -070011852 return;
11853
11854 for (offset = TG3_NVM_DIR_START;
11855 offset < TG3_NVM_DIR_END;
11856 offset += TG3_NVM_DIRENT_SIZE) {
11857 if (tg3_nvram_read_swab(tp, offset, &val))
11858 return;
11859
11860 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11861 break;
11862 }
11863
11864 if (offset == TG3_NVM_DIR_END)
11865 return;
11866
11867 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11868 start = 0x08000000;
11869 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11870 return;
11871
11872 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11873 !tg3_fw_img_is_valid(tp, offset) ||
11874 tg3_nvram_read_swab(tp, offset + 8, &val))
11875 return;
11876
11877 offset += val - start;
11878
11879 bcnt = strlen(tp->fw_ver);
11880
11881 tp->fw_ver[bcnt++] = ',';
11882 tp->fw_ver[bcnt++] = ' ';
11883
11884 for (i = 0; i < 4; i++) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011885 __le32 v;
11886 if (tg3_nvram_read_le(tp, offset, &v))
Matt Carlson9c8a6202007-10-21 16:16:08 -070011887 return;
11888
Al Virob9fc7dc2007-12-17 22:59:57 -080011889 offset += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011890
Al Virob9fc7dc2007-12-17 22:59:57 -080011891 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11892 memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011893 break;
11894 }
11895
Al Virob9fc7dc2007-12-17 22:59:57 -080011896 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11897 bcnt += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011898 }
11899
11900 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
Michael Chanc4e65752006-03-20 22:29:32 -080011901}
11902
Michael Chan7544b092007-05-05 13:08:32 -070011903static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11904
Linus Torvalds1da177e2005-04-16 15:20:36 -070011905static int __devinit tg3_get_invariants(struct tg3 *tp)
11906{
11907 static struct pci_device_id write_reorder_chipsets[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011908 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11909 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
John W. Linvillec165b002006-07-08 13:28:53 -070011910 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11911 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
Michael Chan399de502005-10-03 14:02:39 -070011912 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11913 PCI_DEVICE_ID_VIA_8385_0) },
Linus Torvalds1da177e2005-04-16 15:20:36 -070011914 { },
11915 };
11916 u32 misc_ctrl_reg;
11917 u32 cacheline_sz_reg;
11918 u32 pci_state_reg, grc_misc_cfg;
11919 u32 val;
11920 u16 pci_cmd;
Michael Chanc7835a72006-11-15 21:14:42 -080011921 int err, pcie_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011922
Linus Torvalds1da177e2005-04-16 15:20:36 -070011923 /* Force memory write invalidate off. If we leave it on,
11924 * then on 5700_BX chips we have to enable a workaround.
11925 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11926 * to match the cacheline size. The Broadcom driver have this
11927 * workaround but turns MWI off all the times so never uses
11928 * it. This seems to suggest that the workaround is insufficient.
11929 */
11930 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11931 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11932 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11933
11934 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11935 * has the register indirect write enable bit set before
11936 * we try to access any of the MMIO registers. It is also
11937 * critical that the PCI-X hw workaround situation is decided
11938 * before that as well.
11939 */
11940 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11941 &misc_ctrl_reg);
11942
11943 tp->pci_chip_rev_id = (misc_ctrl_reg >>
11944 MISC_HOST_CTRL_CHIPREV_SHIFT);
Matt Carlson795d01c2007-10-07 23:28:17 -070011945 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11946 u32 prod_id_asic_rev;
11947
11948 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11949 &prod_id_asic_rev);
11950 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11951 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011952
Michael Chanff645be2005-04-21 17:09:53 -070011953 /* Wrong chip ID in 5752 A0. This code can be removed later
11954 * as A0 is not in production.
11955 */
11956 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11957 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11958
Michael Chan68929142005-08-09 20:17:14 -070011959 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11960 * we need to disable memory and use config. cycles
11961 * only to access all registers. The 5702/03 chips
11962 * can mistakenly decode the special cycles from the
11963 * ICH chipsets as memory write cycles, causing corruption
11964 * of register and memory space. Only certain ICH bridges
11965 * will drive special cycles with non-zero data during the
11966 * address phase which can fall within the 5703's address
11967 * range. This is not an ICH bug as the PCI spec allows
11968 * non-zero address during special cycles. However, only
11969 * these ICH bridges are known to drive non-zero addresses
11970 * during special cycles.
11971 *
11972 * Since special cycles do not cross PCI bridges, we only
11973 * enable this workaround if the 5703 is on the secondary
11974 * bus of these ICH bridges.
11975 */
11976 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11977 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11978 static struct tg3_dev_id {
11979 u32 vendor;
11980 u32 device;
11981 u32 rev;
11982 } ich_chipsets[] = {
11983 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11984 PCI_ANY_ID },
11985 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11986 PCI_ANY_ID },
11987 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11988 0xa },
11989 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11990 PCI_ANY_ID },
11991 { },
11992 };
11993 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11994 struct pci_dev *bridge = NULL;
11995
11996 while (pci_id->vendor != 0) {
11997 bridge = pci_get_device(pci_id->vendor, pci_id->device,
11998 bridge);
11999 if (!bridge) {
12000 pci_id++;
12001 continue;
12002 }
12003 if (pci_id->rev != PCI_ANY_ID) {
Auke Kok44c10132007-06-08 15:46:36 -070012004 if (bridge->revision > pci_id->rev)
Michael Chan68929142005-08-09 20:17:14 -070012005 continue;
12006 }
12007 if (bridge->subordinate &&
12008 (bridge->subordinate->number ==
12009 tp->pdev->bus->number)) {
12010
12011 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
12012 pci_dev_put(bridge);
12013 break;
12014 }
12015 }
12016 }
12017
Matt Carlson41588ba2008-04-19 18:12:33 -070012018 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
12019 static struct tg3_dev_id {
12020 u32 vendor;
12021 u32 device;
12022 } bridge_chipsets[] = {
12023 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12024 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12025 { },
12026 };
12027 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12028 struct pci_dev *bridge = NULL;
12029
12030 while (pci_id->vendor != 0) {
12031 bridge = pci_get_device(pci_id->vendor,
12032 pci_id->device,
12033 bridge);
12034 if (!bridge) {
12035 pci_id++;
12036 continue;
12037 }
12038 if (bridge->subordinate &&
12039 (bridge->subordinate->number <=
12040 tp->pdev->bus->number) &&
12041 (bridge->subordinate->subordinate >=
12042 tp->pdev->bus->number)) {
12043 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12044 pci_dev_put(bridge);
12045 break;
12046 }
12047 }
12048 }
12049
Michael Chan4a29cc22006-03-19 13:21:12 -080012050 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12051 * DMA addresses > 40-bit. This bridge may have other additional
12052 * 57xx devices behind it in some 4-port NIC designs for example.
12053 * Any tg3 device found behind the bridge will also need the 40-bit
12054 * DMA workaround.
12055 */
Michael Chana4e2b342005-10-26 15:46:52 -070012056 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12057 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12058 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
Michael Chan4a29cc22006-03-19 13:21:12 -080012059 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
Michael Chan4cf78e42005-07-25 12:29:19 -070012060 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
Michael Chana4e2b342005-10-26 15:46:52 -070012061 }
Michael Chan4a29cc22006-03-19 13:21:12 -080012062 else {
12063 struct pci_dev *bridge = NULL;
12064
12065 do {
12066 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12067 PCI_DEVICE_ID_SERVERWORKS_EPB,
12068 bridge);
12069 if (bridge && bridge->subordinate &&
12070 (bridge->subordinate->number <=
12071 tp->pdev->bus->number) &&
12072 (bridge->subordinate->subordinate >=
12073 tp->pdev->bus->number)) {
12074 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12075 pci_dev_put(bridge);
12076 break;
12077 }
12078 } while (bridge);
12079 }
Michael Chan4cf78e42005-07-25 12:29:19 -070012080
Linus Torvalds1da177e2005-04-16 15:20:36 -070012081 /* Initialize misc host control in PCI block. */
12082 tp->misc_host_ctrl |= (misc_ctrl_reg &
12083 MISC_HOST_CTRL_CHIPREV);
12084 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12085 tp->misc_host_ctrl);
12086
12087 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12088 &cacheline_sz_reg);
12089
12090 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
12091 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
12092 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
12093 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
12094
Michael Chan7544b092007-05-05 13:08:32 -070012095 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12096 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12097 tp->pdev_peer = tg3_find_peer(tp);
12098
John W. Linville2052da92005-04-21 16:56:08 -070012099 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
Michael Chan4cf78e42005-07-25 12:29:19 -070012100 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -080012101 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chand9ab5ad2006-03-20 22:27:35 -080012102 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012103 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012104 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012105 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012106 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
Michael Chana4e2b342005-10-26 15:46:52 -070012107 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
John W. Linville6708e5c2005-04-21 17:00:52 -070012108 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12109
John W. Linville1b440c562005-04-21 17:03:18 -070012110 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12111 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12112 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12113
Michael Chan5a6f3072006-03-20 22:28:05 -080012114 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Michael Chan7544b092007-05-05 13:08:32 -070012115 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12116 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12117 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12118 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12119 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12120 tp->pdev_peer == tp->pdev))
12121 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12122
Michael Chanaf36e6b2006-03-23 01:28:06 -080012123 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012124 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012125 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012126 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012127 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012128 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan5a6f3072006-03-20 22:28:05 -080012129 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
Michael Chanfcfa0a32006-03-20 22:28:41 -080012130 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
Michael Chan52c0fd82006-06-29 20:15:54 -070012131 } else {
Michael Chan7f62ad52007-02-20 23:25:40 -080012132 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
Michael Chan52c0fd82006-06-29 20:15:54 -070012133 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12134 ASIC_REV_5750 &&
12135 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
Michael Chan7f62ad52007-02-20 23:25:40 -080012136 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
Michael Chan52c0fd82006-06-29 20:15:54 -070012137 }
Michael Chan5a6f3072006-03-20 22:28:05 -080012138 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012139
Matt Carlsonf51f3562008-05-25 23:45:08 -070012140 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12141 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan0f893dc2005-07-25 12:30:38 -070012142 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12143
Michael Chanc7835a72006-11-15 21:14:42 -080012144 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12145 if (pcie_cap != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012146 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
Matt Carlson5f5c51e2007-11-12 21:19:37 -080012147
12148 pcie_set_readrq(tp->pdev, 4096);
12149
Michael Chanc7835a72006-11-15 21:14:42 -080012150 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12151 u16 lnkctl;
12152
12153 pci_read_config_word(tp->pdev,
12154 pcie_cap + PCI_EXP_LNKCTL,
12155 &lnkctl);
12156 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
12157 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12158 }
12159 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012160
Michael Chan399de502005-10-03 14:02:39 -070012161 /* If we have an AMD 762 or VIA K8T800 chipset, write
12162 * reordering to the mailbox registers done by the host
12163 * controller can cause major troubles. We read back from
12164 * every mailbox register write to force the writes to be
12165 * posted to the chip in order.
12166 */
12167 if (pci_dev_present(write_reorder_chipsets) &&
12168 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12169 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12170
Linus Torvalds1da177e2005-04-16 15:20:36 -070012171 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12172 tp->pci_lat_timer < 64) {
12173 tp->pci_lat_timer = 64;
12174
12175 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
12176 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
12177 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
12178 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
12179
12180 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12181 cacheline_sz_reg);
12182 }
12183
Matt Carlson9974a352007-10-07 23:27:28 -070012184 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12185 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12186 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12187 if (!tp->pcix_cap) {
12188 printk(KERN_ERR PFX "Cannot find PCI-X "
12189 "capability, aborting.\n");
12190 return -EIO;
12191 }
12192 }
12193
Linus Torvalds1da177e2005-04-16 15:20:36 -070012194 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12195 &pci_state_reg);
12196
Matt Carlson9974a352007-10-07 23:27:28 -070012197 if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012198 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12199
12200 /* If this is a 5700 BX chipset, and we are in PCI-X
12201 * mode, enable register write workaround.
12202 *
12203 * The workaround is to use indirect register accesses
12204 * for all chip writes not to mailbox registers.
12205 */
12206 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12207 u32 pm_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012208
12209 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12210
12211 /* The chip can have it's power management PCI config
12212 * space registers clobbered due to this bug.
12213 * So explicitly force the chip into D0 here.
12214 */
Matt Carlson9974a352007-10-07 23:27:28 -070012215 pci_read_config_dword(tp->pdev,
12216 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012217 &pm_reg);
12218 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12219 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
Matt Carlson9974a352007-10-07 23:27:28 -070012220 pci_write_config_dword(tp->pdev,
12221 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012222 pm_reg);
12223
12224 /* Also, force SERR#/PERR# in PCI command. */
12225 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12226 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12227 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12228 }
12229 }
12230
Michael Chan087fe252005-08-09 20:17:41 -070012231 /* 5700 BX chips need to have their TX producer index mailboxes
12232 * written twice to workaround a bug.
12233 */
12234 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
12235 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12236
Linus Torvalds1da177e2005-04-16 15:20:36 -070012237 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12238 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12239 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12240 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12241
12242 /* Chip-specific fixup from Broadcom driver */
12243 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12244 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12245 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12246 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12247 }
12248
Michael Chan1ee582d2005-08-09 20:16:46 -070012249 /* Default fast path register access methods */
Michael Chan20094932005-08-09 20:16:32 -070012250 tp->read32 = tg3_read32;
Michael Chan1ee582d2005-08-09 20:16:46 -070012251 tp->write32 = tg3_write32;
Michael Chan09ee9292005-08-09 20:17:00 -070012252 tp->read32_mbox = tg3_read32;
Michael Chan20094932005-08-09 20:16:32 -070012253 tp->write32_mbox = tg3_write32;
Michael Chan1ee582d2005-08-09 20:16:46 -070012254 tp->write32_tx_mbox = tg3_write32;
12255 tp->write32_rx_mbox = tg3_write32;
12256
12257 /* Various workaround register access methods */
12258 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12259 tp->write32 = tg3_write_indirect_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070012260 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12261 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12262 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12263 /*
12264 * Back to back register writes can cause problems on these
12265 * chips, the workaround is to read back all reg writes
12266 * except those to mailbox regs.
12267 *
12268 * See tg3_write_indirect_reg32().
12269 */
Michael Chan1ee582d2005-08-09 20:16:46 -070012270 tp->write32 = tg3_write_flush_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070012271 }
12272
Michael Chan1ee582d2005-08-09 20:16:46 -070012273
12274 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12275 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12276 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12277 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12278 tp->write32_rx_mbox = tg3_write_flush_reg32;
12279 }
Michael Chan20094932005-08-09 20:16:32 -070012280
Michael Chan68929142005-08-09 20:17:14 -070012281 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12282 tp->read32 = tg3_read_indirect_reg32;
12283 tp->write32 = tg3_write_indirect_reg32;
12284 tp->read32_mbox = tg3_read_indirect_mbox;
12285 tp->write32_mbox = tg3_write_indirect_mbox;
12286 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12287 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12288
12289 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070012290 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070012291
12292 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12293 pci_cmd &= ~PCI_COMMAND_MEMORY;
12294 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12295 }
Michael Chanb5d37722006-09-27 16:06:21 -070012296 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12297 tp->read32_mbox = tg3_read32_mbox_5906;
12298 tp->write32_mbox = tg3_write32_mbox_5906;
12299 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12300 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12301 }
Michael Chan68929142005-08-09 20:17:14 -070012302
Michael Chanbbadf502006-04-06 21:46:34 -070012303 if (tp->write32 == tg3_write_indirect_reg32 ||
12304 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12305 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
David S. Millerf49639e2006-06-09 11:58:36 -070012306 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
Michael Chanbbadf502006-04-06 21:46:34 -070012307 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12308
Michael Chan7d0c41e2005-04-21 17:06:20 -070012309 /* Get eeprom hw config before calling tg3_set_power_state().
Michael Chan9d26e212006-12-07 00:21:14 -080012310 * In particular, the TG3_FLG2_IS_NIC flag must be
Michael Chan7d0c41e2005-04-21 17:06:20 -070012311 * determined before calling tg3_set_power_state() so that
12312 * we know whether or not to switch out of Vaux power.
12313 * When the flag is set, it means that GPIO1 is used for eeprom
12314 * write protect and also implies that it is a LOM where GPIOs
12315 * are not used to switch power.
Jeff Garzik6aa20a22006-09-13 13:24:59 -040012316 */
Michael Chan7d0c41e2005-04-21 17:06:20 -070012317 tg3_get_eeprom_hw_cfg(tp);
12318
Matt Carlson0d3031d2007-10-10 18:02:43 -070012319 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12320 /* Allow reads and writes to the
12321 * APE register and memory space.
12322 */
12323 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12324 PCISTATE_ALLOW_APE_SHMEM_WR;
12325 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12326 pci_state_reg);
12327 }
12328
Matt Carlson9936bcf2007-10-10 18:03:07 -070012329 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012330 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlsonbcb37f62008-11-03 16:52:09 -080012331 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsond30cdd22007-10-07 23:28:35 -070012332 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12333
Michael Chan314fba32005-04-21 17:07:04 -070012334 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12335 * GPIO1 driven high will bring 5700's external PHY out of reset.
12336 * It is also used as eeprom write protect on LOMs.
12337 */
12338 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12339 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12340 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12341 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12342 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan3e7d83b2005-04-21 17:10:36 -070012343 /* Unused GPIO3 must be driven as output on 5752 because there
12344 * are no pull-up resistors on unused GPIO pins.
12345 */
12346 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12347 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chan314fba32005-04-21 17:07:04 -070012348
Michael Chanaf36e6b2006-03-23 01:28:06 -080012349 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12350 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12351
Matt Carlson5f0c4a32008-06-09 15:41:12 -070012352 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12353 /* Turn off the debug UART. */
12354 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12355 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12356 /* Keep VMain power. */
12357 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12358 GRC_LCLCTRL_GPIO_OUTPUT0;
12359 }
12360
Linus Torvalds1da177e2005-04-16 15:20:36 -070012361 /* Force the chip into D0. */
Michael Chanbc1c7562006-03-20 17:48:03 -080012362 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012363 if (err) {
12364 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12365 pci_name(tp->pdev));
12366 return err;
12367 }
12368
12369 /* 5700 B0 chips do not support checksumming correctly due
12370 * to hardware bugs.
12371 */
12372 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12373 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12374
Linus Torvalds1da177e2005-04-16 15:20:36 -070012375 /* Derive initial jumbo mode from MTU assigned in
12376 * ether_setup() via the alloc_etherdev() call
12377 */
Michael Chan0f893dc2005-07-25 12:30:38 -070012378 if (tp->dev->mtu > ETH_DATA_LEN &&
Michael Chana4e2b342005-10-26 15:46:52 -070012379 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan0f893dc2005-07-25 12:30:38 -070012380 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012381
12382 /* Determine WakeOnLan speed to use. */
12383 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12384 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12385 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12386 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12387 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12388 } else {
12389 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12390 }
12391
12392 /* A few boards don't want Ethernet@WireSpeed phy feature */
12393 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12394 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12395 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
Michael Chan747e8f82005-07-25 12:33:22 -070012396 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
Michael Chanb5d37722006-09-27 16:06:21 -070012397 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
Michael Chan747e8f82005-07-25 12:33:22 -070012398 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
Linus Torvalds1da177e2005-04-16 15:20:36 -070012399 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12400
12401 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12402 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12403 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12404 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12405 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12406
Michael Chanc424cb22006-04-29 18:56:34 -070012407 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12408 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012409 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012410 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12411 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
Michael Chand4011ad2007-02-13 12:17:25 -080012412 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12413 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12414 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
Michael Chanc1d2a192007-01-08 19:57:20 -080012415 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12416 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
Matt Carlson57e69832008-05-25 23:48:31 -070012417 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12418 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
Michael Chanc424cb22006-04-29 18:56:34 -070012419 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12420 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012421
Matt Carlsonb2a5c192008-04-03 21:44:44 -070012422 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12423 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12424 tp->phy_otp = tg3_read_otp_phycfg(tp);
12425 if (tp->phy_otp == 0)
12426 tp->phy_otp = TG3_OTP_DEFAULT;
12427 }
12428
Matt Carlsonf51f3562008-05-25 23:45:08 -070012429 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
Matt Carlson8ef21422008-05-02 16:47:53 -070012430 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12431 else
12432 tp->mi_mode = MAC_MI_MODE_BASE;
12433
Linus Torvalds1da177e2005-04-16 15:20:36 -070012434 tp->coalesce_mode = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012435 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12436 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12437 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12438
Matt Carlson57e69832008-05-25 23:48:31 -070012439 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12440 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12441
Matt Carlson158d7ab2008-05-29 01:37:54 -070012442 err = tg3_mdio_init(tp);
12443 if (err)
12444 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012445
12446 /* Initialize data/descriptor byte/word swapping. */
12447 val = tr32(GRC_MODE);
12448 val &= GRC_MODE_HOST_STACKUP;
12449 tw32(GRC_MODE, val | tp->grc_mode);
12450
12451 tg3_switch_clocks(tp);
12452
12453 /* Clear this out for sanity. */
12454 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12455
12456 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12457 &pci_state_reg);
12458 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12459 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12460 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12461
12462 if (chiprevid == CHIPREV_ID_5701_A0 ||
12463 chiprevid == CHIPREV_ID_5701_B0 ||
12464 chiprevid == CHIPREV_ID_5701_B2 ||
12465 chiprevid == CHIPREV_ID_5701_B5) {
12466 void __iomem *sram_base;
12467
12468 /* Write some dummy words into the SRAM status block
12469 * area, see if it reads back correctly. If the return
12470 * value is bad, force enable the PCIX workaround.
12471 */
12472 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12473
12474 writel(0x00000000, sram_base);
12475 writel(0x00000000, sram_base + 4);
12476 writel(0xffffffff, sram_base + 4);
12477 if (readl(sram_base) != 0x00000000)
12478 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12479 }
12480 }
12481
12482 udelay(50);
12483 tg3_nvram_init(tp);
12484
12485 grc_misc_cfg = tr32(GRC_MISC_CFG);
12486 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12487
Linus Torvalds1da177e2005-04-16 15:20:36 -070012488 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12489 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12490 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12491 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12492
David S. Millerfac9b832005-05-18 22:46:34 -070012493 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12494 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12495 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12496 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12497 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12498 HOSTCC_MODE_CLRTICK_TXBD);
12499
12500 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12501 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12502 tp->misc_host_ctrl);
12503 }
12504
Matt Carlson3bda1252008-08-15 14:08:22 -070012505 /* Preserve the APE MAC_MODE bits */
12506 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12507 tp->mac_mode = tr32(MAC_MODE) |
12508 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12509 else
12510 tp->mac_mode = TG3_DEF_MAC_MODE;
12511
Linus Torvalds1da177e2005-04-16 15:20:36 -070012512 /* these are limited to 10/100 only */
12513 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12514 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12515 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12516 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12517 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12518 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12519 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12520 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12521 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
Michael Chan676917d2006-12-07 00:20:22 -080012522 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12523 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
Michael Chanb5d37722006-09-27 16:06:21 -070012524 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012525 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12526
12527 err = tg3_phy_probe(tp);
12528 if (err) {
12529 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12530 pci_name(tp->pdev), err);
12531 /* ... but do not return immediately ... */
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070012532 tg3_mdio_fini(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012533 }
12534
12535 tg3_read_partno(tp);
Michael Chanc4e65752006-03-20 22:29:32 -080012536 tg3_read_fw_ver(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012537
12538 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12539 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12540 } else {
12541 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12542 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12543 else
12544 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12545 }
12546
12547 /* 5700 {AX,BX} chips have a broken status block link
12548 * change bit implementation, so we must use the
12549 * status register in those cases.
12550 */
12551 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12552 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12553 else
12554 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12555
12556 /* The led_ctrl is set during tg3_phy_probe, here we might
12557 * have to force the link status polling mechanism based
12558 * upon subsystem IDs.
12559 */
12560 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
Michael Chan007a880d2007-05-31 14:49:51 -070012561 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070012562 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12563 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12564 TG3_FLAG_USE_LINKCHG_REG);
12565 }
12566
12567 /* For all SERDES we poll the MAC status register. */
12568 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12569 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12570 else
12571 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12572
Michael Chan5a6f3072006-03-20 22:28:05 -080012573 /* All chips before 5787 can get confused if TX buffers
Linus Torvalds1da177e2005-04-16 15:20:36 -070012574 * straddle the 4GB address boundary in some cases.
12575 */
Michael Chanaf36e6b2006-03-23 01:28:06 -080012576 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012577 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012578 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012579 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012580 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012581 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Michael Chan5a6f3072006-03-20 22:28:05 -080012582 tp->dev->hard_start_xmit = tg3_start_xmit;
12583 else
12584 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012585
12586 tp->rx_offset = 2;
12587 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12588 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12589 tp->rx_offset = 0;
12590
Michael Chanf92905d2006-06-29 20:14:29 -070012591 tp->rx_std_max_post = TG3_RX_RING_SIZE;
12592
12593 /* Increment the rx prod index on the rx std ring by at most
12594 * 8 for these chips to workaround hw errata.
12595 */
12596 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12597 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12598 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12599 tp->rx_std_max_post = 8;
12600
Matt Carlson8ed5d972007-05-07 00:25:49 -070012601 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12602 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12603 PCIE_PWR_MGMT_L1_THRESH_MSK;
12604
Linus Torvalds1da177e2005-04-16 15:20:36 -070012605 return err;
12606}
12607
David S. Miller49b6e95f2007-03-29 01:38:42 -070012608#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012609static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12610{
12611 struct net_device *dev = tp->dev;
12612 struct pci_dev *pdev = tp->pdev;
David S. Miller49b6e95f2007-03-29 01:38:42 -070012613 struct device_node *dp = pci_device_to_OF_node(pdev);
David S. Miller374d4ca2007-03-29 01:57:57 -070012614 const unsigned char *addr;
David S. Miller49b6e95f2007-03-29 01:38:42 -070012615 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012616
David S. Miller49b6e95f2007-03-29 01:38:42 -070012617 addr = of_get_property(dp, "local-mac-address", &len);
12618 if (addr && len == 6) {
12619 memcpy(dev->dev_addr, addr, 6);
12620 memcpy(dev->perm_addr, dev->dev_addr, 6);
12621 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012622 }
12623 return -ENODEV;
12624}
12625
12626static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12627{
12628 struct net_device *dev = tp->dev;
12629
12630 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
John W. Linville2ff43692005-09-12 14:44:20 -070012631 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012632 return 0;
12633}
12634#endif
12635
12636static int __devinit tg3_get_device_address(struct tg3 *tp)
12637{
12638 struct net_device *dev = tp->dev;
12639 u32 hi, lo, mac_offset;
Michael Chan008652b2006-03-27 23:14:53 -080012640 int addr_ok = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012641
David S. Miller49b6e95f2007-03-29 01:38:42 -070012642#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012643 if (!tg3_get_macaddr_sparc(tp))
12644 return 0;
12645#endif
12646
12647 mac_offset = 0x7c;
David S. Millerf49639e2006-06-09 11:58:36 -070012648 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
Michael Chana4e2b342005-10-26 15:46:52 -070012649 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012650 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12651 mac_offset = 0xcc;
12652 if (tg3_nvram_lock(tp))
12653 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12654 else
12655 tg3_nvram_unlock(tp);
12656 }
Michael Chanb5d37722006-09-27 16:06:21 -070012657 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12658 mac_offset = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012659
12660 /* First try to get it from MAC address mailbox. */
12661 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12662 if ((hi >> 16) == 0x484b) {
12663 dev->dev_addr[0] = (hi >> 8) & 0xff;
12664 dev->dev_addr[1] = (hi >> 0) & 0xff;
12665
12666 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12667 dev->dev_addr[2] = (lo >> 24) & 0xff;
12668 dev->dev_addr[3] = (lo >> 16) & 0xff;
12669 dev->dev_addr[4] = (lo >> 8) & 0xff;
12670 dev->dev_addr[5] = (lo >> 0) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012671
Michael Chan008652b2006-03-27 23:14:53 -080012672 /* Some old bootcode may report a 0 MAC address in SRAM */
12673 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12674 }
12675 if (!addr_ok) {
12676 /* Next, try NVRAM. */
David S. Millerf49639e2006-06-09 11:58:36 -070012677 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
Michael Chan008652b2006-03-27 23:14:53 -080012678 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12679 dev->dev_addr[0] = ((hi >> 16) & 0xff);
12680 dev->dev_addr[1] = ((hi >> 24) & 0xff);
12681 dev->dev_addr[2] = ((lo >> 0) & 0xff);
12682 dev->dev_addr[3] = ((lo >> 8) & 0xff);
12683 dev->dev_addr[4] = ((lo >> 16) & 0xff);
12684 dev->dev_addr[5] = ((lo >> 24) & 0xff);
12685 }
12686 /* Finally just fetch it out of the MAC control regs. */
12687 else {
12688 hi = tr32(MAC_ADDR_0_HIGH);
12689 lo = tr32(MAC_ADDR_0_LOW);
12690
12691 dev->dev_addr[5] = lo & 0xff;
12692 dev->dev_addr[4] = (lo >> 8) & 0xff;
12693 dev->dev_addr[3] = (lo >> 16) & 0xff;
12694 dev->dev_addr[2] = (lo >> 24) & 0xff;
12695 dev->dev_addr[1] = hi & 0xff;
12696 dev->dev_addr[0] = (hi >> 8) & 0xff;
12697 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012698 }
12699
12700 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
David S. Miller7582a332008-03-20 15:53:15 -070012701#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012702 if (!tg3_get_default_macaddr_sparc(tp))
12703 return 0;
12704#endif
12705 return -EINVAL;
12706 }
John W. Linville2ff43692005-09-12 14:44:20 -070012707 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012708 return 0;
12709}
12710
David S. Miller59e6b432005-05-18 22:50:10 -070012711#define BOUNDARY_SINGLE_CACHELINE 1
12712#define BOUNDARY_MULTI_CACHELINE 2
12713
12714static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12715{
12716 int cacheline_size;
12717 u8 byte;
12718 int goal;
12719
12720 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12721 if (byte == 0)
12722 cacheline_size = 1024;
12723 else
12724 cacheline_size = (int) byte * 4;
12725
12726 /* On 5703 and later chips, the boundary bits have no
12727 * effect.
12728 */
12729 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12730 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12731 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12732 goto out;
12733
12734#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12735 goal = BOUNDARY_MULTI_CACHELINE;
12736#else
12737#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12738 goal = BOUNDARY_SINGLE_CACHELINE;
12739#else
12740 goal = 0;
12741#endif
12742#endif
12743
12744 if (!goal)
12745 goto out;
12746
12747 /* PCI controllers on most RISC systems tend to disconnect
12748 * when a device tries to burst across a cache-line boundary.
12749 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12750 *
12751 * Unfortunately, for PCI-E there are only limited
12752 * write-side controls for this, and thus for reads
12753 * we will still get the disconnects. We'll also waste
12754 * these PCI cycles for both read and write for chips
12755 * other than 5700 and 5701 which do not implement the
12756 * boundary bits.
12757 */
12758 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12759 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12760 switch (cacheline_size) {
12761 case 16:
12762 case 32:
12763 case 64:
12764 case 128:
12765 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12766 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12767 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12768 } else {
12769 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12770 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12771 }
12772 break;
12773
12774 case 256:
12775 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12776 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12777 break;
12778
12779 default:
12780 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12781 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12782 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012783 }
David S. Miller59e6b432005-05-18 22:50:10 -070012784 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12785 switch (cacheline_size) {
12786 case 16:
12787 case 32:
12788 case 64:
12789 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12790 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12791 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12792 break;
12793 }
12794 /* fallthrough */
12795 case 128:
12796 default:
12797 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12798 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12799 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012800 }
David S. Miller59e6b432005-05-18 22:50:10 -070012801 } else {
12802 switch (cacheline_size) {
12803 case 16:
12804 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12805 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12806 DMA_RWCTRL_WRITE_BNDRY_16);
12807 break;
12808 }
12809 /* fallthrough */
12810 case 32:
12811 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12812 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12813 DMA_RWCTRL_WRITE_BNDRY_32);
12814 break;
12815 }
12816 /* fallthrough */
12817 case 64:
12818 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12819 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12820 DMA_RWCTRL_WRITE_BNDRY_64);
12821 break;
12822 }
12823 /* fallthrough */
12824 case 128:
12825 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12826 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12827 DMA_RWCTRL_WRITE_BNDRY_128);
12828 break;
12829 }
12830 /* fallthrough */
12831 case 256:
12832 val |= (DMA_RWCTRL_READ_BNDRY_256 |
12833 DMA_RWCTRL_WRITE_BNDRY_256);
12834 break;
12835 case 512:
12836 val |= (DMA_RWCTRL_READ_BNDRY_512 |
12837 DMA_RWCTRL_WRITE_BNDRY_512);
12838 break;
12839 case 1024:
12840 default:
12841 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12842 DMA_RWCTRL_WRITE_BNDRY_1024);
12843 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012844 }
David S. Miller59e6b432005-05-18 22:50:10 -070012845 }
12846
12847out:
12848 return val;
12849}
12850
Linus Torvalds1da177e2005-04-16 15:20:36 -070012851static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12852{
12853 struct tg3_internal_buffer_desc test_desc;
12854 u32 sram_dma_descs;
12855 int i, ret;
12856
12857 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12858
12859 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12860 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12861 tw32(RDMAC_STATUS, 0);
12862 tw32(WDMAC_STATUS, 0);
12863
12864 tw32(BUFMGR_MODE, 0);
12865 tw32(FTQ_RESET, 0);
12866
12867 test_desc.addr_hi = ((u64) buf_dma) >> 32;
12868 test_desc.addr_lo = buf_dma & 0xffffffff;
12869 test_desc.nic_mbuf = 0x00002100;
12870 test_desc.len = size;
12871
12872 /*
12873 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12874 * the *second* time the tg3 driver was getting loaded after an
12875 * initial scan.
12876 *
12877 * Broadcom tells me:
12878 * ...the DMA engine is connected to the GRC block and a DMA
12879 * reset may affect the GRC block in some unpredictable way...
12880 * The behavior of resets to individual blocks has not been tested.
12881 *
12882 * Broadcom noted the GRC reset will also reset all sub-components.
12883 */
12884 if (to_device) {
12885 test_desc.cqid_sqid = (13 << 8) | 2;
12886
12887 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12888 udelay(40);
12889 } else {
12890 test_desc.cqid_sqid = (16 << 8) | 7;
12891
12892 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12893 udelay(40);
12894 }
12895 test_desc.flags = 0x00000005;
12896
12897 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12898 u32 val;
12899
12900 val = *(((u32 *)&test_desc) + i);
12901 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12902 sram_dma_descs + (i * sizeof(u32)));
12903 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12904 }
12905 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12906
12907 if (to_device) {
12908 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12909 } else {
12910 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12911 }
12912
12913 ret = -ENODEV;
12914 for (i = 0; i < 40; i++) {
12915 u32 val;
12916
12917 if (to_device)
12918 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12919 else
12920 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12921 if ((val & 0xffff) == sram_dma_descs) {
12922 ret = 0;
12923 break;
12924 }
12925
12926 udelay(100);
12927 }
12928
12929 return ret;
12930}
12931
David S. Millerded73402005-05-23 13:59:47 -070012932#define TEST_BUFFER_SIZE 0x2000
Linus Torvalds1da177e2005-04-16 15:20:36 -070012933
12934static int __devinit tg3_test_dma(struct tg3 *tp)
12935{
12936 dma_addr_t buf_dma;
David S. Miller59e6b432005-05-18 22:50:10 -070012937 u32 *buf, saved_dma_rwctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012938 int ret;
12939
12940 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12941 if (!buf) {
12942 ret = -ENOMEM;
12943 goto out_nofree;
12944 }
12945
12946 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12947 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12948
David S. Miller59e6b432005-05-18 22:50:10 -070012949 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012950
12951 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12952 /* DMA read watermark not used on PCIE */
12953 tp->dma_rwctrl |= 0x00180000;
12954 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
Michael Chan85e94ce2005-04-21 17:05:28 -070012955 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12956 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012957 tp->dma_rwctrl |= 0x003f0000;
12958 else
12959 tp->dma_rwctrl |= 0x003f000f;
12960 } else {
12961 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12962 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12963 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
Michael Chan49afdeb2007-02-13 12:17:03 -080012964 u32 read_water = 0x7;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012965
Michael Chan4a29cc22006-03-19 13:21:12 -080012966 /* If the 5704 is behind the EPB bridge, we can
12967 * do the less restrictive ONE_DMA workaround for
12968 * better performance.
12969 */
12970 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12971 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12972 tp->dma_rwctrl |= 0x8000;
12973 else if (ccval == 0x6 || ccval == 0x7)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012974 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12975
Michael Chan49afdeb2007-02-13 12:17:03 -080012976 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12977 read_water = 4;
David S. Miller59e6b432005-05-18 22:50:10 -070012978 /* Set bit 23 to enable PCIX hw bug fix */
Michael Chan49afdeb2007-02-13 12:17:03 -080012979 tp->dma_rwctrl |=
12980 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12981 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12982 (1 << 23);
Michael Chan4cf78e42005-07-25 12:29:19 -070012983 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12984 /* 5780 always in PCIX mode */
12985 tp->dma_rwctrl |= 0x00144000;
Michael Chana4e2b342005-10-26 15:46:52 -070012986 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12987 /* 5714 always in PCIX mode */
12988 tp->dma_rwctrl |= 0x00148000;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012989 } else {
12990 tp->dma_rwctrl |= 0x001b000f;
12991 }
12992 }
12993
12994 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12995 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12996 tp->dma_rwctrl &= 0xfffffff0;
12997
12998 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12999 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13000 /* Remove this if it causes problems for some boards. */
13001 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
13002
13003 /* On 5700/5701 chips, we need to set this bit.
13004 * Otherwise the chip will issue cacheline transactions
13005 * to streamable DMA memory with not all the byte
13006 * enables turned on. This is an error on several
13007 * RISC PCI controllers, in particular sparc64.
13008 *
13009 * On 5703/5704 chips, this bit has been reassigned
13010 * a different meaning. In particular, it is used
13011 * on those chips to enable a PCI-X workaround.
13012 */
13013 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
13014 }
13015
13016 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13017
13018#if 0
13019 /* Unneeded, already done by tg3_get_invariants. */
13020 tg3_switch_clocks(tp);
13021#endif
13022
13023 ret = 0;
13024 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13025 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13026 goto out;
13027
David S. Miller59e6b432005-05-18 22:50:10 -070013028 /* It is best to perform DMA test with maximum write burst size
13029 * to expose the 5700/5701 write DMA bug.
13030 */
13031 saved_dma_rwctrl = tp->dma_rwctrl;
13032 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13033 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13034
Linus Torvalds1da177e2005-04-16 15:20:36 -070013035 while (1) {
13036 u32 *p = buf, i;
13037
13038 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
13039 p[i] = i;
13040
13041 /* Send the buffer to the chip. */
13042 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13043 if (ret) {
13044 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
13045 break;
13046 }
13047
13048#if 0
13049 /* validate data reached card RAM correctly. */
13050 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13051 u32 val;
13052 tg3_read_mem(tp, 0x2100 + (i*4), &val);
13053 if (le32_to_cpu(val) != p[i]) {
13054 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
13055 /* ret = -ENODEV here? */
13056 }
13057 p[i] = 0;
13058 }
13059#endif
13060 /* Now read it back. */
13061 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13062 if (ret) {
13063 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13064
13065 break;
13066 }
13067
13068 /* Verify it. */
13069 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13070 if (p[i] == i)
13071 continue;
13072
David S. Miller59e6b432005-05-18 22:50:10 -070013073 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13074 DMA_RWCTRL_WRITE_BNDRY_16) {
13075 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013076 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13077 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13078 break;
13079 } else {
13080 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13081 ret = -ENODEV;
13082 goto out;
13083 }
13084 }
13085
13086 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13087 /* Success. */
13088 ret = 0;
13089 break;
13090 }
13091 }
David S. Miller59e6b432005-05-18 22:50:10 -070013092 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13093 DMA_RWCTRL_WRITE_BNDRY_16) {
Michael Chan6d1cfba2005-06-08 14:13:14 -070013094 static struct pci_device_id dma_wait_state_chipsets[] = {
13095 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13096 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13097 { },
13098 };
13099
David S. Miller59e6b432005-05-18 22:50:10 -070013100 /* DMA test passed without adjusting DMA boundary,
Michael Chan6d1cfba2005-06-08 14:13:14 -070013101 * now look for chipsets that are known to expose the
13102 * DMA bug without failing the test.
David S. Miller59e6b432005-05-18 22:50:10 -070013103 */
Michael Chan6d1cfba2005-06-08 14:13:14 -070013104 if (pci_dev_present(dma_wait_state_chipsets)) {
13105 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13106 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13107 }
13108 else
13109 /* Safe to use the calculated DMA boundary. */
13110 tp->dma_rwctrl = saved_dma_rwctrl;
13111
David S. Miller59e6b432005-05-18 22:50:10 -070013112 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13113 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013114
13115out:
13116 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13117out_nofree:
13118 return ret;
13119}
13120
13121static void __devinit tg3_init_link_config(struct tg3 *tp)
13122{
13123 tp->link_config.advertising =
13124 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13125 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13126 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13127 ADVERTISED_Autoneg | ADVERTISED_MII);
13128 tp->link_config.speed = SPEED_INVALID;
13129 tp->link_config.duplex = DUPLEX_INVALID;
13130 tp->link_config.autoneg = AUTONEG_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013131 tp->link_config.active_speed = SPEED_INVALID;
13132 tp->link_config.active_duplex = DUPLEX_INVALID;
13133 tp->link_config.phy_is_low_power = 0;
13134 tp->link_config.orig_speed = SPEED_INVALID;
13135 tp->link_config.orig_duplex = DUPLEX_INVALID;
13136 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13137}
13138
13139static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13140{
Michael Chanfdfec172005-07-25 12:31:48 -070013141 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13142 tp->bufmgr_config.mbuf_read_dma_low_water =
13143 DEFAULT_MB_RDMA_LOW_WATER_5705;
13144 tp->bufmgr_config.mbuf_mac_rx_low_water =
13145 DEFAULT_MB_MACRX_LOW_WATER_5705;
13146 tp->bufmgr_config.mbuf_high_water =
13147 DEFAULT_MB_HIGH_WATER_5705;
Michael Chanb5d37722006-09-27 16:06:21 -070013148 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13149 tp->bufmgr_config.mbuf_mac_rx_low_water =
13150 DEFAULT_MB_MACRX_LOW_WATER_5906;
13151 tp->bufmgr_config.mbuf_high_water =
13152 DEFAULT_MB_HIGH_WATER_5906;
13153 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013154
Michael Chanfdfec172005-07-25 12:31:48 -070013155 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13156 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13157 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13158 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13159 tp->bufmgr_config.mbuf_high_water_jumbo =
13160 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13161 } else {
13162 tp->bufmgr_config.mbuf_read_dma_low_water =
13163 DEFAULT_MB_RDMA_LOW_WATER;
13164 tp->bufmgr_config.mbuf_mac_rx_low_water =
13165 DEFAULT_MB_MACRX_LOW_WATER;
13166 tp->bufmgr_config.mbuf_high_water =
13167 DEFAULT_MB_HIGH_WATER;
13168
13169 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13170 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13171 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13172 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13173 tp->bufmgr_config.mbuf_high_water_jumbo =
13174 DEFAULT_MB_HIGH_WATER_JUMBO;
13175 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013176
13177 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13178 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13179}
13180
13181static char * __devinit tg3_phy_string(struct tg3 *tp)
13182{
13183 switch (tp->phy_id & PHY_ID_MASK) {
13184 case PHY_ID_BCM5400: return "5400";
13185 case PHY_ID_BCM5401: return "5401";
13186 case PHY_ID_BCM5411: return "5411";
13187 case PHY_ID_BCM5701: return "5701";
13188 case PHY_ID_BCM5703: return "5703";
13189 case PHY_ID_BCM5704: return "5704";
13190 case PHY_ID_BCM5705: return "5705";
13191 case PHY_ID_BCM5750: return "5750";
Michael Chan85e94ce2005-04-21 17:05:28 -070013192 case PHY_ID_BCM5752: return "5752";
Michael Chana4e2b342005-10-26 15:46:52 -070013193 case PHY_ID_BCM5714: return "5714";
Michael Chan4cf78e42005-07-25 12:29:19 -070013194 case PHY_ID_BCM5780: return "5780";
Michael Chanaf36e6b2006-03-23 01:28:06 -080013195 case PHY_ID_BCM5755: return "5755";
Michael Chand9ab5ad2006-03-20 22:27:35 -080013196 case PHY_ID_BCM5787: return "5787";
Matt Carlsond30cdd22007-10-07 23:28:35 -070013197 case PHY_ID_BCM5784: return "5784";
Michael Chan126a3362006-09-27 16:03:07 -070013198 case PHY_ID_BCM5756: return "5722/5756";
Michael Chanb5d37722006-09-27 16:06:21 -070013199 case PHY_ID_BCM5906: return "5906";
Matt Carlson9936bcf2007-10-10 18:03:07 -070013200 case PHY_ID_BCM5761: return "5761";
Linus Torvalds1da177e2005-04-16 15:20:36 -070013201 case PHY_ID_BCM8002: return "8002/serdes";
13202 case 0: return "serdes";
13203 default: return "unknown";
Stephen Hemminger855e1112008-04-16 16:37:28 -070013204 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013205}
13206
Michael Chanf9804dd2005-09-27 12:13:10 -070013207static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13208{
13209 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13210 strcpy(str, "PCI Express");
13211 return str;
13212 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13213 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13214
13215 strcpy(str, "PCIX:");
13216
13217 if ((clock_ctrl == 7) ||
13218 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13219 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13220 strcat(str, "133MHz");
13221 else if (clock_ctrl == 0)
13222 strcat(str, "33MHz");
13223 else if (clock_ctrl == 2)
13224 strcat(str, "50MHz");
13225 else if (clock_ctrl == 4)
13226 strcat(str, "66MHz");
13227 else if (clock_ctrl == 6)
13228 strcat(str, "100MHz");
Michael Chanf9804dd2005-09-27 12:13:10 -070013229 } else {
13230 strcpy(str, "PCI:");
13231 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13232 strcat(str, "66MHz");
13233 else
13234 strcat(str, "33MHz");
13235 }
13236 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13237 strcat(str, ":32-bit");
13238 else
13239 strcat(str, ":64-bit");
13240 return str;
13241}
13242
Michael Chan8c2dc7e2005-12-19 16:26:02 -080013243static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013244{
13245 struct pci_dev *peer;
13246 unsigned int func, devnr = tp->pdev->devfn & ~7;
13247
13248 for (func = 0; func < 8; func++) {
13249 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13250 if (peer && peer != tp->pdev)
13251 break;
13252 pci_dev_put(peer);
13253 }
Michael Chan16fe9d72005-12-13 21:09:54 -080013254 /* 5704 can be configured in single-port mode, set peer to
13255 * tp->pdev in that case.
13256 */
13257 if (!peer) {
13258 peer = tp->pdev;
13259 return peer;
13260 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013261
13262 /*
13263 * We don't need to keep the refcount elevated; there's no way
13264 * to remove one half of this device without removing the other
13265 */
13266 pci_dev_put(peer);
13267
13268 return peer;
13269}
13270
David S. Miller15f98502005-05-18 22:49:26 -070013271static void __devinit tg3_init_coal(struct tg3 *tp)
13272{
13273 struct ethtool_coalesce *ec = &tp->coal;
13274
13275 memset(ec, 0, sizeof(*ec));
13276 ec->cmd = ETHTOOL_GCOALESCE;
13277 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13278 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13279 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13280 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13281 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13282 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13283 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13284 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13285 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13286
13287 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13288 HOSTCC_MODE_CLRTICK_TXBD)) {
13289 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13290 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13291 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13292 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13293 }
Michael Chand244c892005-07-05 14:42:33 -070013294
13295 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13296 ec->rx_coalesce_usecs_irq = 0;
13297 ec->tx_coalesce_usecs_irq = 0;
13298 ec->stats_block_coalesce_usecs = 0;
13299 }
David S. Miller15f98502005-05-18 22:49:26 -070013300}
13301
Linus Torvalds1da177e2005-04-16 15:20:36 -070013302static int __devinit tg3_init_one(struct pci_dev *pdev,
13303 const struct pci_device_id *ent)
13304{
13305 static int tg3_version_printed = 0;
Matt Carlson63532392008-11-03 16:49:57 -080013306 resource_size_t tg3reg_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013307 struct net_device *dev;
13308 struct tg3 *tp;
Joe Perchesd6645372007-12-20 04:06:59 -080013309 int err, pm_cap;
Michael Chanf9804dd2005-09-27 12:13:10 -070013310 char str[40];
Michael Chan72f2afb2006-03-06 19:28:35 -080013311 u64 dma_mask, persist_dma_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013312
13313 if (tg3_version_printed++ == 0)
13314 printk(KERN_INFO "%s", version);
13315
13316 err = pci_enable_device(pdev);
13317 if (err) {
13318 printk(KERN_ERR PFX "Cannot enable PCI device, "
13319 "aborting.\n");
13320 return err;
13321 }
13322
Matt Carlson63532392008-11-03 16:49:57 -080013323 if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013324 printk(KERN_ERR PFX "Cannot find proper PCI device "
13325 "base address, aborting.\n");
13326 err = -ENODEV;
13327 goto err_out_disable_pdev;
13328 }
13329
13330 err = pci_request_regions(pdev, DRV_MODULE_NAME);
13331 if (err) {
13332 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13333 "aborting.\n");
13334 goto err_out_disable_pdev;
13335 }
13336
13337 pci_set_master(pdev);
13338
13339 /* Find power-management capability. */
13340 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13341 if (pm_cap == 0) {
13342 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13343 "aborting.\n");
13344 err = -EIO;
13345 goto err_out_free_res;
13346 }
13347
Linus Torvalds1da177e2005-04-16 15:20:36 -070013348 dev = alloc_etherdev(sizeof(*tp));
13349 if (!dev) {
13350 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13351 err = -ENOMEM;
13352 goto err_out_free_res;
13353 }
13354
Linus Torvalds1da177e2005-04-16 15:20:36 -070013355 SET_NETDEV_DEV(dev, &pdev->dev);
13356
Linus Torvalds1da177e2005-04-16 15:20:36 -070013357#if TG3_VLAN_TAG_USED
13358 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13359 dev->vlan_rx_register = tg3_vlan_rx_register;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013360#endif
13361
13362 tp = netdev_priv(dev);
13363 tp->pdev = pdev;
13364 tp->dev = dev;
13365 tp->pm_cap = pm_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013366 tp->rx_mode = TG3_DEF_RX_MODE;
13367 tp->tx_mode = TG3_DEF_TX_MODE;
Matt Carlson8ef21422008-05-02 16:47:53 -070013368
Linus Torvalds1da177e2005-04-16 15:20:36 -070013369 if (tg3_debug > 0)
13370 tp->msg_enable = tg3_debug;
13371 else
13372 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13373
13374 /* The word/byte swap controls here control register access byte
13375 * swapping. DMA data byte swapping is controlled in the GRC_MODE
13376 * setting below.
13377 */
13378 tp->misc_host_ctrl =
13379 MISC_HOST_CTRL_MASK_PCI_INT |
13380 MISC_HOST_CTRL_WORD_SWAP |
13381 MISC_HOST_CTRL_INDIR_ACCESS |
13382 MISC_HOST_CTRL_PCISTATE_RW;
13383
13384 /* The NONFRM (non-frame) byte/word swap controls take effect
13385 * on descriptor entries, anything which isn't packet data.
13386 *
13387 * The StrongARM chips on the board (one for tx, one for rx)
13388 * are running in big-endian mode.
13389 */
13390 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13391 GRC_MODE_WSWAP_NONFRM_DATA);
13392#ifdef __BIG_ENDIAN
13393 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13394#endif
13395 spin_lock_init(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013396 spin_lock_init(&tp->indirect_lock);
David Howellsc4028952006-11-22 14:57:56 +000013397 INIT_WORK(&tp->reset_task, tg3_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013398
Matt Carlson63532392008-11-03 16:49:57 -080013399 dev->mem_start = pci_resource_start(pdev, BAR_0);
13400 tg3reg_len = pci_resource_len(pdev, BAR_0);
13401 dev->mem_end = dev->mem_start + tg3reg_len;
13402
13403 tp->regs = ioremap_nocache(dev->mem_start, tg3reg_len);
Andy Gospodarekab0049b2007-09-06 20:42:14 +010013404 if (!tp->regs) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013405 printk(KERN_ERR PFX "Cannot map device registers, "
13406 "aborting.\n");
13407 err = -ENOMEM;
13408 goto err_out_free_dev;
13409 }
13410
13411 tg3_init_link_config(tp);
13412
Linus Torvalds1da177e2005-04-16 15:20:36 -070013413 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13414 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13415 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13416
13417 dev->open = tg3_open;
13418 dev->stop = tg3_close;
13419 dev->get_stats = tg3_get_stats;
13420 dev->set_multicast_list = tg3_set_rx_mode;
13421 dev->set_mac_address = tg3_set_mac_addr;
13422 dev->do_ioctl = tg3_ioctl;
13423 dev->tx_timeout = tg3_tx_timeout;
Stephen Hemmingerbea33482007-10-03 16:41:36 -070013424 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013425 dev->ethtool_ops = &tg3_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013426 dev->watchdog_timeo = TG3_TX_TIMEOUT;
13427 dev->change_mtu = tg3_change_mtu;
13428 dev->irq = pdev->irq;
13429#ifdef CONFIG_NET_POLL_CONTROLLER
13430 dev->poll_controller = tg3_poll_controller;
13431#endif
13432
13433 err = tg3_get_invariants(tp);
13434 if (err) {
13435 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13436 "aborting.\n");
13437 goto err_out_iounmap;
13438 }
13439
Michael Chan4a29cc22006-03-19 13:21:12 -080013440 /* The EPB bridge inside 5714, 5715, and 5780 and any
13441 * device behind the EPB cannot support DMA addresses > 40-bit.
Michael Chan72f2afb2006-03-06 19:28:35 -080013442 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13443 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13444 * do DMA address check in tg3_start_xmit().
13445 */
Michael Chan4a29cc22006-03-19 13:21:12 -080013446 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13447 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13448 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
Michael Chan72f2afb2006-03-06 19:28:35 -080013449 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13450#ifdef CONFIG_HIGHMEM
13451 dma_mask = DMA_64BIT_MASK;
13452#endif
Michael Chan4a29cc22006-03-19 13:21:12 -080013453 } else
Michael Chan72f2afb2006-03-06 19:28:35 -080013454 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13455
13456 /* Configure DMA attributes. */
13457 if (dma_mask > DMA_32BIT_MASK) {
13458 err = pci_set_dma_mask(pdev, dma_mask);
13459 if (!err) {
13460 dev->features |= NETIF_F_HIGHDMA;
13461 err = pci_set_consistent_dma_mask(pdev,
13462 persist_dma_mask);
13463 if (err < 0) {
13464 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13465 "DMA for consistent allocations\n");
13466 goto err_out_iounmap;
13467 }
13468 }
13469 }
13470 if (err || dma_mask == DMA_32BIT_MASK) {
13471 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13472 if (err) {
13473 printk(KERN_ERR PFX "No usable DMA configuration, "
13474 "aborting.\n");
13475 goto err_out_iounmap;
13476 }
13477 }
13478
Michael Chanfdfec172005-07-25 12:31:48 -070013479 tg3_init_bufmgr_config(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013480
Linus Torvalds1da177e2005-04-16 15:20:36 -070013481 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13482 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13483 }
13484 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13485 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13486 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
Michael Chanc7835a72006-11-15 21:14:42 -080013487 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -070013488 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13489 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13490 } else {
Michael Chan7f62ad52007-02-20 23:25:40 -080013491 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013492 }
13493
Michael Chan4e3a7aa2006-03-20 17:47:44 -080013494 /* TSO is on by default on chips that support hardware TSO.
13495 * Firmware TSO on older chips gives lower performance, so it
13496 * is off by default, but can be enabled using ethtool.
13497 */
Michael Chanb0026622006-07-03 19:42:14 -070013498 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013499 dev->features |= NETIF_F_TSO;
Michael Chanb5d37722006-09-27 16:06:21 -070013500 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13501 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
Michael Chanb0026622006-07-03 19:42:14 -070013502 dev->features |= NETIF_F_TSO6;
Matt Carlson57e69832008-05-25 23:48:31 -070013503 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13504 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13505 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13506 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlson9936bcf2007-10-10 18:03:07 -070013507 dev->features |= NETIF_F_TSO_ECN;
Michael Chanb0026622006-07-03 19:42:14 -070013508 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013509
Linus Torvalds1da177e2005-04-16 15:20:36 -070013510
13511 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13512 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13513 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13514 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13515 tp->rx_pending = 63;
13516 }
13517
Linus Torvalds1da177e2005-04-16 15:20:36 -070013518 err = tg3_get_device_address(tp);
13519 if (err) {
13520 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13521 "aborting.\n");
13522 goto err_out_iounmap;
13523 }
13524
Matt Carlson0d3031d2007-10-10 18:02:43 -070013525 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
Matt Carlson63532392008-11-03 16:49:57 -080013526 if (!(pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM)) {
Matt Carlson0d3031d2007-10-10 18:02:43 -070013527 printk(KERN_ERR PFX "Cannot find proper PCI device "
13528 "base address for APE, aborting.\n");
13529 err = -ENODEV;
13530 goto err_out_iounmap;
13531 }
13532
Matt Carlson63532392008-11-03 16:49:57 -080013533 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
Al Viro79ea13c2008-01-24 02:06:46 -080013534 if (!tp->aperegs) {
Matt Carlson0d3031d2007-10-10 18:02:43 -070013535 printk(KERN_ERR PFX "Cannot map APE registers, "
13536 "aborting.\n");
13537 err = -ENOMEM;
13538 goto err_out_iounmap;
13539 }
13540
13541 tg3_ape_lock_init(tp);
13542 }
13543
Matt Carlsonc88864d2007-11-12 21:07:01 -080013544 /*
13545 * Reset chip in case UNDI or EFI driver did not shutdown
13546 * DMA self test will enable WDMAC and we'll see (spurious)
13547 * pending DMA on the PCI bus at that point.
13548 */
13549 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13550 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13551 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13552 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13553 }
13554
13555 err = tg3_test_dma(tp);
13556 if (err) {
13557 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13558 goto err_out_apeunmap;
13559 }
13560
13561 /* Tigon3 can do ipv4 only... and some chips have buggy
13562 * checksumming.
13563 */
13564 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13565 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13566 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13567 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13568 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070013569 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13570 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsonc88864d2007-11-12 21:07:01 -080013571 dev->features |= NETIF_F_IPV6_CSUM;
13572
13573 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13574 } else
13575 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13576
13577 /* flow control autonegotiation is default behavior */
13578 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
Matt Carlson8d018622007-12-20 20:05:44 -080013579 tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
Matt Carlsonc88864d2007-11-12 21:07:01 -080013580
13581 tg3_init_coal(tp);
13582
Michael Chanc49a1562006-12-17 17:07:29 -080013583 pci_set_drvdata(pdev, dev);
13584
Linus Torvalds1da177e2005-04-16 15:20:36 -070013585 err = register_netdev(dev);
13586 if (err) {
13587 printk(KERN_ERR PFX "Cannot register net device, "
13588 "aborting.\n");
Matt Carlson0d3031d2007-10-10 18:02:43 -070013589 goto err_out_apeunmap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013590 }
13591
Matt Carlsondf59c942008-11-03 16:52:56 -080013592 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070013593 dev->name,
13594 tp->board_part_number,
13595 tp->pci_chip_rev_id,
Michael Chanf9804dd2005-09-27 12:13:10 -070013596 tg3_bus_string(tp, str),
Johannes Berge1749612008-10-27 15:59:26 -070013597 dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013598
Matt Carlsondf59c942008-11-03 16:52:56 -080013599 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
13600 printk(KERN_INFO
13601 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
13602 tp->dev->name,
13603 tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
13604 tp->mdio_bus->phy_map[PHY_ADDR]->dev.bus_id);
13605 else
13606 printk(KERN_INFO
13607 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
13608 tp->dev->name, tg3_phy_string(tp),
13609 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13610 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13611 "10/100/1000Base-T")),
13612 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
13613
13614 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070013615 dev->name,
13616 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13617 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13618 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13619 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -070013620 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
Michael Chan4a29cc22006-03-19 13:21:12 -080013621 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13622 dev->name, tp->dma_rwctrl,
13623 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13624 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
Linus Torvalds1da177e2005-04-16 15:20:36 -070013625
13626 return 0;
13627
Matt Carlson0d3031d2007-10-10 18:02:43 -070013628err_out_apeunmap:
13629 if (tp->aperegs) {
13630 iounmap(tp->aperegs);
13631 tp->aperegs = NULL;
13632 }
13633
Linus Torvalds1da177e2005-04-16 15:20:36 -070013634err_out_iounmap:
Michael Chan68929142005-08-09 20:17:14 -070013635 if (tp->regs) {
13636 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070013637 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070013638 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013639
13640err_out_free_dev:
13641 free_netdev(dev);
13642
13643err_out_free_res:
13644 pci_release_regions(pdev);
13645
13646err_out_disable_pdev:
13647 pci_disable_device(pdev);
13648 pci_set_drvdata(pdev, NULL);
13649 return err;
13650}
13651
13652static void __devexit tg3_remove_one(struct pci_dev *pdev)
13653{
13654 struct net_device *dev = pci_get_drvdata(pdev);
13655
13656 if (dev) {
13657 struct tg3 *tp = netdev_priv(dev);
13658
Michael Chan7faa0062006-02-02 17:29:28 -080013659 flush_scheduled_work();
Matt Carlson158d7ab2008-05-29 01:37:54 -070013660
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013661 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13662 tg3_phy_fini(tp);
Matt Carlson158d7ab2008-05-29 01:37:54 -070013663 tg3_mdio_fini(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013664 }
Matt Carlson158d7ab2008-05-29 01:37:54 -070013665
Linus Torvalds1da177e2005-04-16 15:20:36 -070013666 unregister_netdev(dev);
Matt Carlson0d3031d2007-10-10 18:02:43 -070013667 if (tp->aperegs) {
13668 iounmap(tp->aperegs);
13669 tp->aperegs = NULL;
13670 }
Michael Chan68929142005-08-09 20:17:14 -070013671 if (tp->regs) {
13672 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070013673 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070013674 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013675 free_netdev(dev);
13676 pci_release_regions(pdev);
13677 pci_disable_device(pdev);
13678 pci_set_drvdata(pdev, NULL);
13679 }
13680}
13681
13682static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13683{
13684 struct net_device *dev = pci_get_drvdata(pdev);
13685 struct tg3 *tp = netdev_priv(dev);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070013686 pci_power_t target_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013687 int err;
13688
Michael Chan3e0c95f2007-08-03 20:56:54 -070013689 /* PCI register 4 needs to be saved whether netif_running() or not.
13690 * MSI address and data need to be saved if using MSI and
13691 * netif_running().
13692 */
13693 pci_save_state(pdev);
13694
Linus Torvalds1da177e2005-04-16 15:20:36 -070013695 if (!netif_running(dev))
13696 return 0;
13697
Michael Chan7faa0062006-02-02 17:29:28 -080013698 flush_scheduled_work();
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013699 tg3_phy_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013700 tg3_netif_stop(tp);
13701
13702 del_timer_sync(&tp->timer);
13703
David S. Millerf47c11e2005-06-24 20:18:35 -070013704 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013705 tg3_disable_ints(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -070013706 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013707
13708 netif_device_detach(dev);
13709
David S. Millerf47c11e2005-06-24 20:18:35 -070013710 tg3_full_lock(tp, 0);
Michael Chan944d9802005-05-29 14:57:48 -070013711 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan6a9eba12005-12-13 21:08:58 -080013712 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
David S. Millerf47c11e2005-06-24 20:18:35 -070013713 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013714
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070013715 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13716
13717 err = tg3_set_power_state(tp, target_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013718 if (err) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013719 int err2;
13720
David S. Millerf47c11e2005-06-24 20:18:35 -070013721 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013722
Michael Chan6a9eba12005-12-13 21:08:58 -080013723 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013724 err2 = tg3_restart_hw(tp, 1);
13725 if (err2)
Michael Chanb9ec6c12006-07-25 16:37:27 -070013726 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013727
13728 tp->timer.expires = jiffies + tp->timer_offset;
13729 add_timer(&tp->timer);
13730
13731 netif_device_attach(dev);
13732 tg3_netif_start(tp);
13733
Michael Chanb9ec6c12006-07-25 16:37:27 -070013734out:
David S. Millerf47c11e2005-06-24 20:18:35 -070013735 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013736
13737 if (!err2)
13738 tg3_phy_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013739 }
13740
13741 return err;
13742}
13743
13744static int tg3_resume(struct pci_dev *pdev)
13745{
13746 struct net_device *dev = pci_get_drvdata(pdev);
13747 struct tg3 *tp = netdev_priv(dev);
13748 int err;
13749
Michael Chan3e0c95f2007-08-03 20:56:54 -070013750 pci_restore_state(tp->pdev);
13751
Linus Torvalds1da177e2005-04-16 15:20:36 -070013752 if (!netif_running(dev))
13753 return 0;
13754
Michael Chanbc1c7562006-03-20 17:48:03 -080013755 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013756 if (err)
13757 return err;
13758
13759 netif_device_attach(dev);
13760
David S. Millerf47c11e2005-06-24 20:18:35 -070013761 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013762
Michael Chan6a9eba12005-12-13 21:08:58 -080013763 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Michael Chanb9ec6c12006-07-25 16:37:27 -070013764 err = tg3_restart_hw(tp, 1);
13765 if (err)
13766 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013767
13768 tp->timer.expires = jiffies + tp->timer_offset;
13769 add_timer(&tp->timer);
13770
Linus Torvalds1da177e2005-04-16 15:20:36 -070013771 tg3_netif_start(tp);
13772
Michael Chanb9ec6c12006-07-25 16:37:27 -070013773out:
David S. Millerf47c11e2005-06-24 20:18:35 -070013774 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013775
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013776 if (!err)
13777 tg3_phy_start(tp);
13778
Michael Chanb9ec6c12006-07-25 16:37:27 -070013779 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013780}
13781
13782static struct pci_driver tg3_driver = {
13783 .name = DRV_MODULE_NAME,
13784 .id_table = tg3_pci_tbl,
13785 .probe = tg3_init_one,
13786 .remove = __devexit_p(tg3_remove_one),
13787 .suspend = tg3_suspend,
13788 .resume = tg3_resume
13789};
13790
13791static int __init tg3_init(void)
13792{
Jeff Garzik29917622006-08-19 17:48:59 -040013793 return pci_register_driver(&tg3_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013794}
13795
13796static void __exit tg3_cleanup(void)
13797{
13798 pci_unregister_driver(&tg3_driver);
13799}
13800
13801module_init(tg3_init);
13802module_exit(tg3_cleanup);