blob: b47e8a1a41f8f137fb74b4f66fdbcc77762d74ca [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
Michael Chan65610fb2007-02-13 12:18:46 -08007 * Copyright (C) 2005-2007 Broadcom Corporation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Firmware is:
Michael Chan49cabf42005-06-06 15:15:17 -070010 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/compiler.h>
24#include <linux/slab.h>
25#include <linux/delay.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020026#include <linux/in.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
Matt Carlson158d7ab2008-05-29 01:37:54 -070035#include <linux/phy.h>
Matt Carlsona9daf362008-05-25 23:49:44 -070036#include <linux/brcmphy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/if_vlan.h>
38#include <linux/ip.h>
39#include <linux/tcp.h>
40#include <linux/workqueue.h>
Michael Chan61487482005-09-05 17:53:19 -070041#include <linux/prefetch.h>
Tobias Klauserf9a5f7d2005-10-29 15:09:26 +020042#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44#include <net/checksum.h>
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -030045#include <net/ip.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47#include <asm/system.h>
48#include <asm/io.h>
49#include <asm/byteorder.h>
50#include <asm/uaccess.h>
51
David S. Miller49b6e95f2007-03-29 01:38:42 -070052#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <asm/idprom.h>
David S. Miller49b6e95f2007-03-29 01:38:42 -070054#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#endif
56
57#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
58#define TG3_VLAN_TAG_USED 1
59#else
60#define TG3_VLAN_TAG_USED 0
61#endif
62
Linus Torvalds1da177e2005-04-16 15:20:36 -070063#define TG3_TSO_SUPPORT 1
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
65#include "tg3.h"
66
67#define DRV_MODULE_NAME "tg3"
68#define PFX DRV_MODULE_NAME ": "
Matt Carlson23197912008-08-15 14:11:19 -070069#define DRV_MODULE_VERSION "3.94"
70#define DRV_MODULE_RELDATE "August 14, 2008"
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
72#define TG3_DEF_MAC_MODE 0
73#define TG3_DEF_RX_MODE 0
74#define TG3_DEF_TX_MODE 0
75#define TG3_DEF_MSG_ENABLE \
76 (NETIF_MSG_DRV | \
77 NETIF_MSG_PROBE | \
78 NETIF_MSG_LINK | \
79 NETIF_MSG_TIMER | \
80 NETIF_MSG_IFDOWN | \
81 NETIF_MSG_IFUP | \
82 NETIF_MSG_RX_ERR | \
83 NETIF_MSG_TX_ERR)
84
85/* length of time before we decide the hardware is borked,
86 * and dev->tx_timeout() should be called to fix the problem
87 */
88#define TG3_TX_TIMEOUT (5 * HZ)
89
90/* hardware minimum and maximum for a single frame's data payload */
91#define TG3_MIN_MTU 60
92#define TG3_MAX_MTU(tp) \
Michael Chan0f893dc2005-07-25 12:30:38 -070093 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
95/* These numbers seem to be hard coded in the NIC firmware somehow.
96 * You can't change the ring sizes, but you can change where you place
97 * them in the NIC onboard memory.
98 */
99#define TG3_RX_RING_SIZE 512
100#define TG3_DEF_RX_RING_PENDING 200
101#define TG3_RX_JUMBO_RING_SIZE 256
102#define TG3_DEF_RX_JUMBO_RING_PENDING 100
103
104/* Do not place this n-ring entries value into the tp struct itself,
105 * we really want to expose these constants to GCC so that modulo et
106 * al. operations are done with shifts and masks instead of with
107 * hw multiply/modulo instructions. Another solution would be to
108 * replace things like '% foo' with '& (foo - 1)'.
109 */
110#define TG3_RX_RCB_RING_SIZE(tp) \
111 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
112
113#define TG3_TX_RING_SIZE 512
114#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
115
116#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117 TG3_RX_RING_SIZE)
118#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_JUMBO_RING_SIZE)
120#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
121 TG3_RX_RCB_RING_SIZE(tp))
122#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
123 TG3_TX_RING_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
125
126#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
127#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
128
129/* minimum number of free TX descriptors required to wake up TX process */
Ranjit Manomohan42952232006-10-18 20:54:26 -0700130#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131
132/* number of ETHTOOL_GSTATS u64's */
133#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
134
Michael Chan4cafd3f2005-05-29 14:56:34 -0700135#define TG3_NUM_TEST 6
136
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137static char version[] __devinitdata =
138 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
139
140MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
141MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
142MODULE_LICENSE("GPL");
143MODULE_VERSION(DRV_MODULE_VERSION);
144
145static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
146module_param(tg3_debug, int, 0);
147MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
148
149static struct pci_device_id tg3_pci_tbl[] = {
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700150 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
151 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
152 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
Michael Chan126a3362006-09-27 16:03:07 -0700174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
Michael Chan126a3362006-09-27 16:03:07 -0700189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
Michael Chan676917d2006-12-07 00:20:22 -0800193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
Michael Chanb5d37722006-09-27 16:06:21 -0700201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
Matt Carlsond30cdd22007-10-07 23:28:35 -0700203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
Matt Carlson6c7af272007-10-21 16:12:02 -0700205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
Matt Carlson9936bcf2007-10-10 18:03:07 -0700206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
Matt Carlsonc88e6682008-11-03 16:49:18 -0800208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
Matt Carlson57e69832008-05-25 23:48:31 -0700210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700211 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
212 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
213 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
214 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
215 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
216 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
217 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
218 {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219};
220
221MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
222
Andreas Mohr50da8592006-08-14 23:54:30 -0700223static const struct {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 const char string[ETH_GSTRING_LEN];
225} ethtool_stats_keys[TG3_NUM_STATS] = {
226 { "rx_octets" },
227 { "rx_fragments" },
228 { "rx_ucast_packets" },
229 { "rx_mcast_packets" },
230 { "rx_bcast_packets" },
231 { "rx_fcs_errors" },
232 { "rx_align_errors" },
233 { "rx_xon_pause_rcvd" },
234 { "rx_xoff_pause_rcvd" },
235 { "rx_mac_ctrl_rcvd" },
236 { "rx_xoff_entered" },
237 { "rx_frame_too_long_errors" },
238 { "rx_jabbers" },
239 { "rx_undersize_packets" },
240 { "rx_in_length_errors" },
241 { "rx_out_length_errors" },
242 { "rx_64_or_less_octet_packets" },
243 { "rx_65_to_127_octet_packets" },
244 { "rx_128_to_255_octet_packets" },
245 { "rx_256_to_511_octet_packets" },
246 { "rx_512_to_1023_octet_packets" },
247 { "rx_1024_to_1522_octet_packets" },
248 { "rx_1523_to_2047_octet_packets" },
249 { "rx_2048_to_4095_octet_packets" },
250 { "rx_4096_to_8191_octet_packets" },
251 { "rx_8192_to_9022_octet_packets" },
252
253 { "tx_octets" },
254 { "tx_collisions" },
255
256 { "tx_xon_sent" },
257 { "tx_xoff_sent" },
258 { "tx_flow_control" },
259 { "tx_mac_errors" },
260 { "tx_single_collisions" },
261 { "tx_mult_collisions" },
262 { "tx_deferred" },
263 { "tx_excessive_collisions" },
264 { "tx_late_collisions" },
265 { "tx_collide_2times" },
266 { "tx_collide_3times" },
267 { "tx_collide_4times" },
268 { "tx_collide_5times" },
269 { "tx_collide_6times" },
270 { "tx_collide_7times" },
271 { "tx_collide_8times" },
272 { "tx_collide_9times" },
273 { "tx_collide_10times" },
274 { "tx_collide_11times" },
275 { "tx_collide_12times" },
276 { "tx_collide_13times" },
277 { "tx_collide_14times" },
278 { "tx_collide_15times" },
279 { "tx_ucast_packets" },
280 { "tx_mcast_packets" },
281 { "tx_bcast_packets" },
282 { "tx_carrier_sense_errors" },
283 { "tx_discards" },
284 { "tx_errors" },
285
286 { "dma_writeq_full" },
287 { "dma_write_prioq_full" },
288 { "rxbds_empty" },
289 { "rx_discards" },
290 { "rx_errors" },
291 { "rx_threshold_hit" },
292
293 { "dma_readq_full" },
294 { "dma_read_prioq_full" },
295 { "tx_comp_queue_full" },
296
297 { "ring_set_send_prod_index" },
298 { "ring_status_update" },
299 { "nic_irqs" },
300 { "nic_avoided_irqs" },
301 { "nic_tx_threshold_hit" }
302};
303
Andreas Mohr50da8592006-08-14 23:54:30 -0700304static const struct {
Michael Chan4cafd3f2005-05-29 14:56:34 -0700305 const char string[ETH_GSTRING_LEN];
306} ethtool_test_keys[TG3_NUM_TEST] = {
307 { "nvram test (online) " },
308 { "link test (online) " },
309 { "register test (offline)" },
310 { "memory test (offline)" },
311 { "loopback test (offline)" },
312 { "interrupt test (offline)" },
313};
314
Michael Chanb401e9e2005-12-19 16:27:04 -0800315static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
316{
317 writel(val, tp->regs + off);
318}
319
320static u32 tg3_read32(struct tg3 *tp, u32 off)
321{
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400322 return (readl(tp->regs + off));
Michael Chanb401e9e2005-12-19 16:27:04 -0800323}
324
Matt Carlson0d3031d2007-10-10 18:02:43 -0700325static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
326{
327 writel(val, tp->aperegs + off);
328}
329
330static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
331{
332 return (readl(tp->aperegs + off));
333}
334
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
336{
Michael Chan68929142005-08-09 20:17:14 -0700337 unsigned long flags;
338
339 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700340 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
341 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
Michael Chan68929142005-08-09 20:17:14 -0700342 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700343}
344
345static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
346{
347 writel(val, tp->regs + off);
348 readl(tp->regs + off);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349}
350
Michael Chan68929142005-08-09 20:17:14 -0700351static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
352{
353 unsigned long flags;
354 u32 val;
355
356 spin_lock_irqsave(&tp->indirect_lock, flags);
357 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
358 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
359 spin_unlock_irqrestore(&tp->indirect_lock, flags);
360 return val;
361}
362
363static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
364{
365 unsigned long flags;
366
367 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
368 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
369 TG3_64BIT_REG_LOW, val);
370 return;
371 }
372 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
373 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
374 TG3_64BIT_REG_LOW, val);
375 return;
376 }
377
378 spin_lock_irqsave(&tp->indirect_lock, flags);
379 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
380 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
381 spin_unlock_irqrestore(&tp->indirect_lock, flags);
382
383 /* In indirect mode when disabling interrupts, we also need
384 * to clear the interrupt bit in the GRC local ctrl register.
385 */
386 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
387 (val == 0x1)) {
388 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
389 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
390 }
391}
392
393static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
394{
395 unsigned long flags;
396 u32 val;
397
398 spin_lock_irqsave(&tp->indirect_lock, flags);
399 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
400 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
401 spin_unlock_irqrestore(&tp->indirect_lock, flags);
402 return val;
403}
404
Michael Chanb401e9e2005-12-19 16:27:04 -0800405/* usec_wait specifies the wait time in usec when writing to certain registers
406 * where it is unsafe to read back the register without some delay.
407 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
408 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
409 */
410static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411{
Michael Chanb401e9e2005-12-19 16:27:04 -0800412 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
413 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
414 /* Non-posted methods */
415 tp->write32(tp, off, val);
416 else {
417 /* Posted method */
418 tg3_write32(tp, off, val);
419 if (usec_wait)
420 udelay(usec_wait);
421 tp->read32(tp, off);
422 }
423 /* Wait again after the read for the posted method to guarantee that
424 * the wait time is met.
425 */
426 if (usec_wait)
427 udelay(usec_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428}
429
Michael Chan09ee9292005-08-09 20:17:00 -0700430static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
431{
432 tp->write32_mbox(tp, off, val);
Michael Chan68929142005-08-09 20:17:14 -0700433 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
434 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
435 tp->read32_mbox(tp, off);
Michael Chan09ee9292005-08-09 20:17:00 -0700436}
437
Michael Chan20094932005-08-09 20:16:32 -0700438static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439{
440 void __iomem *mbox = tp->regs + off;
441 writel(val, mbox);
442 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
443 writel(val, mbox);
444 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
445 readl(mbox);
446}
447
Michael Chanb5d37722006-09-27 16:06:21 -0700448static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
449{
450 return (readl(tp->regs + off + GRCMBOX_BASE));
451}
452
453static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
454{
455 writel(val, tp->regs + off + GRCMBOX_BASE);
456}
457
Michael Chan20094932005-08-09 20:16:32 -0700458#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700459#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
Michael Chan20094932005-08-09 20:16:32 -0700460#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
461#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700462#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
Michael Chan20094932005-08-09 20:16:32 -0700463
464#define tw32(reg,val) tp->write32(tp, reg, val)
Michael Chanb401e9e2005-12-19 16:27:04 -0800465#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
466#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
Michael Chan20094932005-08-09 20:16:32 -0700467#define tr32(reg) tp->read32(tp, reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468
469static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
470{
Michael Chan68929142005-08-09 20:17:14 -0700471 unsigned long flags;
472
Michael Chanb5d37722006-09-27 16:06:21 -0700473 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
474 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
475 return;
476
Michael Chan68929142005-08-09 20:17:14 -0700477 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700478 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
479 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
480 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481
Michael Chanbbadf502006-04-06 21:46:34 -0700482 /* Always leave this as zero. */
483 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
484 } else {
485 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
486 tw32_f(TG3PCI_MEM_WIN_DATA, val);
487
488 /* Always leave this as zero. */
489 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
490 }
Michael Chan68929142005-08-09 20:17:14 -0700491 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492}
493
494static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
495{
Michael Chan68929142005-08-09 20:17:14 -0700496 unsigned long flags;
497
Michael Chanb5d37722006-09-27 16:06:21 -0700498 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
499 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
500 *val = 0;
501 return;
502 }
503
Michael Chan68929142005-08-09 20:17:14 -0700504 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700505 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
506 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
507 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508
Michael Chanbbadf502006-04-06 21:46:34 -0700509 /* Always leave this as zero. */
510 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
511 } else {
512 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
513 *val = tr32(TG3PCI_MEM_WIN_DATA);
514
515 /* Always leave this as zero. */
516 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
517 }
Michael Chan68929142005-08-09 20:17:14 -0700518 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519}
520
Matt Carlson0d3031d2007-10-10 18:02:43 -0700521static void tg3_ape_lock_init(struct tg3 *tp)
522{
523 int i;
524
525 /* Make sure the driver hasn't any stale locks. */
526 for (i = 0; i < 8; i++)
527 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
528 APE_LOCK_GRANT_DRIVER);
529}
530
531static int tg3_ape_lock(struct tg3 *tp, int locknum)
532{
533 int i, off;
534 int ret = 0;
535 u32 status;
536
537 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
538 return 0;
539
540 switch (locknum) {
Matt Carlson77b483f2008-08-15 14:07:24 -0700541 case TG3_APE_LOCK_GRC:
Matt Carlson0d3031d2007-10-10 18:02:43 -0700542 case TG3_APE_LOCK_MEM:
543 break;
544 default:
545 return -EINVAL;
546 }
547
548 off = 4 * locknum;
549
550 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
551
552 /* Wait for up to 1 millisecond to acquire lock. */
553 for (i = 0; i < 100; i++) {
554 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
555 if (status == APE_LOCK_GRANT_DRIVER)
556 break;
557 udelay(10);
558 }
559
560 if (status != APE_LOCK_GRANT_DRIVER) {
561 /* Revoke the lock request. */
562 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
563 APE_LOCK_GRANT_DRIVER);
564
565 ret = -EBUSY;
566 }
567
568 return ret;
569}
570
571static void tg3_ape_unlock(struct tg3 *tp, int locknum)
572{
573 int off;
574
575 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
576 return;
577
578 switch (locknum) {
Matt Carlson77b483f2008-08-15 14:07:24 -0700579 case TG3_APE_LOCK_GRC:
Matt Carlson0d3031d2007-10-10 18:02:43 -0700580 case TG3_APE_LOCK_MEM:
581 break;
582 default:
583 return;
584 }
585
586 off = 4 * locknum;
587 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
588}
589
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590static void tg3_disable_ints(struct tg3 *tp)
591{
592 tw32(TG3PCI_MISC_HOST_CTRL,
593 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700594 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595}
596
597static inline void tg3_cond_int(struct tg3 *tp)
598{
Michael Chan38f38432005-09-05 17:53:32 -0700599 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
600 (tp->hw_status->status & SD_STATUS_UPDATED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
Michael Chanb5d37722006-09-27 16:06:21 -0700602 else
603 tw32(HOSTCC_MODE, tp->coalesce_mode |
604 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605}
606
607static void tg3_enable_ints(struct tg3 *tp)
608{
Michael Chanbbe832c2005-06-24 20:20:04 -0700609 tp->irq_sync = 0;
610 wmb();
611
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 tw32(TG3PCI_MISC_HOST_CTRL,
613 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700614 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
615 (tp->last_tag << 24));
Michael Chanfcfa0a32006-03-20 22:28:41 -0800616 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
617 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
618 (tp->last_tag << 24));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 tg3_cond_int(tp);
620}
621
Michael Chan04237dd2005-04-25 15:17:17 -0700622static inline unsigned int tg3_has_work(struct tg3 *tp)
623{
624 struct tg3_hw_status *sblk = tp->hw_status;
625 unsigned int work_exists = 0;
626
627 /* check for phy events */
628 if (!(tp->tg3_flags &
629 (TG3_FLAG_USE_LINKCHG_REG |
630 TG3_FLAG_POLL_SERDES))) {
631 if (sblk->status & SD_STATUS_LINK_CHG)
632 work_exists = 1;
633 }
634 /* check for RX/TX work to do */
635 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
636 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
637 work_exists = 1;
638
639 return work_exists;
640}
641
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642/* tg3_restart_ints
Michael Chan04237dd2005-04-25 15:17:17 -0700643 * similar to tg3_enable_ints, but it accurately determines whether there
644 * is new work pending and can return without flushing the PIO write
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400645 * which reenables interrupts
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 */
647static void tg3_restart_ints(struct tg3 *tp)
648{
David S. Millerfac9b832005-05-18 22:46:34 -0700649 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
650 tp->last_tag << 24);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 mmiowb();
652
David S. Millerfac9b832005-05-18 22:46:34 -0700653 /* When doing tagged status, this work check is unnecessary.
654 * The last_tag we write above tells the chip which piece of
655 * work we've completed.
656 */
657 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
658 tg3_has_work(tp))
Michael Chan04237dd2005-04-25 15:17:17 -0700659 tw32(HOSTCC_MODE, tp->coalesce_mode |
660 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661}
662
663static inline void tg3_netif_stop(struct tg3 *tp)
664{
Michael Chanbbe832c2005-06-24 20:20:04 -0700665 tp->dev->trans_start = jiffies; /* prevent tx timeout */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700666 napi_disable(&tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 netif_tx_disable(tp->dev);
668}
669
670static inline void tg3_netif_start(struct tg3 *tp)
671{
672 netif_wake_queue(tp->dev);
673 /* NOTE: unconditional netif_wake_queue is only appropriate
674 * so long as all callers are assured to have free tx slots
675 * (such as after tg3_init_hw)
676 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700677 napi_enable(&tp->napi);
David S. Millerf47c11e2005-06-24 20:18:35 -0700678 tp->hw_status->status |= SD_STATUS_UPDATED;
679 tg3_enable_ints(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680}
681
682static void tg3_switch_clocks(struct tg3 *tp)
683{
684 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
685 u32 orig_clock_ctrl;
686
Matt Carlson795d01c2007-10-07 23:28:17 -0700687 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
688 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan4cf78e42005-07-25 12:29:19 -0700689 return;
690
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 orig_clock_ctrl = clock_ctrl;
692 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
693 CLOCK_CTRL_CLKRUN_OENABLE |
694 0x1f);
695 tp->pci_clock_ctrl = clock_ctrl;
696
697 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
698 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800699 tw32_wait_f(TG3PCI_CLOCK_CTRL,
700 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 }
702 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800703 tw32_wait_f(TG3PCI_CLOCK_CTRL,
704 clock_ctrl |
705 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
706 40);
707 tw32_wait_f(TG3PCI_CLOCK_CTRL,
708 clock_ctrl | (CLOCK_CTRL_ALTCLK),
709 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 }
Michael Chanb401e9e2005-12-19 16:27:04 -0800711 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712}
713
714#define PHY_BUSY_LOOPS 5000
715
716static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
717{
718 u32 frame_val;
719 unsigned int loops;
720 int ret;
721
722 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
723 tw32_f(MAC_MI_MODE,
724 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
725 udelay(80);
726 }
727
728 *val = 0x0;
729
730 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
731 MI_COM_PHY_ADDR_MASK);
732 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
733 MI_COM_REG_ADDR_MASK);
734 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400735
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 tw32_f(MAC_MI_COM, frame_val);
737
738 loops = PHY_BUSY_LOOPS;
739 while (loops != 0) {
740 udelay(10);
741 frame_val = tr32(MAC_MI_COM);
742
743 if ((frame_val & MI_COM_BUSY) == 0) {
744 udelay(5);
745 frame_val = tr32(MAC_MI_COM);
746 break;
747 }
748 loops -= 1;
749 }
750
751 ret = -EBUSY;
752 if (loops != 0) {
753 *val = frame_val & MI_COM_DATA_MASK;
754 ret = 0;
755 }
756
757 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
758 tw32_f(MAC_MI_MODE, tp->mi_mode);
759 udelay(80);
760 }
761
762 return ret;
763}
764
765static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
766{
767 u32 frame_val;
768 unsigned int loops;
769 int ret;
770
Michael Chanb5d37722006-09-27 16:06:21 -0700771 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
772 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
773 return 0;
774
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
776 tw32_f(MAC_MI_MODE,
777 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
778 udelay(80);
779 }
780
781 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
782 MI_COM_PHY_ADDR_MASK);
783 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
784 MI_COM_REG_ADDR_MASK);
785 frame_val |= (val & MI_COM_DATA_MASK);
786 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400787
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 tw32_f(MAC_MI_COM, frame_val);
789
790 loops = PHY_BUSY_LOOPS;
791 while (loops != 0) {
792 udelay(10);
793 frame_val = tr32(MAC_MI_COM);
794 if ((frame_val & MI_COM_BUSY) == 0) {
795 udelay(5);
796 frame_val = tr32(MAC_MI_COM);
797 break;
798 }
799 loops -= 1;
800 }
801
802 ret = -EBUSY;
803 if (loops != 0)
804 ret = 0;
805
806 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
807 tw32_f(MAC_MI_MODE, tp->mi_mode);
808 udelay(80);
809 }
810
811 return ret;
812}
813
Matt Carlson95e28692008-05-25 23:44:14 -0700814static int tg3_bmcr_reset(struct tg3 *tp)
815{
816 u32 phy_control;
817 int limit, err;
818
819 /* OK, reset it, and poll the BMCR_RESET bit until it
820 * clears or we time out.
821 */
822 phy_control = BMCR_RESET;
823 err = tg3_writephy(tp, MII_BMCR, phy_control);
824 if (err != 0)
825 return -EBUSY;
826
827 limit = 5000;
828 while (limit--) {
829 err = tg3_readphy(tp, MII_BMCR, &phy_control);
830 if (err != 0)
831 return -EBUSY;
832
833 if ((phy_control & BMCR_RESET) == 0) {
834 udelay(40);
835 break;
836 }
837 udelay(10);
838 }
839 if (limit <= 0)
840 return -EBUSY;
841
842 return 0;
843}
844
Matt Carlson158d7ab2008-05-29 01:37:54 -0700845static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
846{
847 struct tg3 *tp = (struct tg3 *)bp->priv;
848 u32 val;
849
850 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
851 return -EAGAIN;
852
853 if (tg3_readphy(tp, reg, &val))
854 return -EIO;
855
856 return val;
857}
858
859static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
860{
861 struct tg3 *tp = (struct tg3 *)bp->priv;
862
863 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
864 return -EAGAIN;
865
866 if (tg3_writephy(tp, reg, val))
867 return -EIO;
868
869 return 0;
870}
871
872static int tg3_mdio_reset(struct mii_bus *bp)
873{
874 return 0;
875}
876
Matt Carlsona9daf362008-05-25 23:49:44 -0700877static void tg3_mdio_config(struct tg3 *tp)
878{
879 u32 val;
880
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -0700881 if (tp->mdio_bus->phy_map[PHY_ADDR]->interface !=
Matt Carlsona9daf362008-05-25 23:49:44 -0700882 PHY_INTERFACE_MODE_RGMII)
883 return;
884
885 val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
886 MAC_PHYCFG1_RGMII_SND_STAT_EN);
887 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
888 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
889 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
890 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
891 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
892 }
893 tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
894
895 val = tr32(MAC_PHYCFG2) & ~(MAC_PHYCFG2_INBAND_ENABLE);
896 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
897 val |= MAC_PHYCFG2_INBAND_ENABLE;
898 tw32(MAC_PHYCFG2, val);
899
900 val = tr32(MAC_EXT_RGMII_MODE);
901 val &= ~(MAC_RGMII_MODE_RX_INT_B |
902 MAC_RGMII_MODE_RX_QUALITY |
903 MAC_RGMII_MODE_RX_ACTIVITY |
904 MAC_RGMII_MODE_RX_ENG_DET |
905 MAC_RGMII_MODE_TX_ENABLE |
906 MAC_RGMII_MODE_TX_LOWPWR |
907 MAC_RGMII_MODE_TX_RESET);
908 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
909 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
910 val |= MAC_RGMII_MODE_RX_INT_B |
911 MAC_RGMII_MODE_RX_QUALITY |
912 MAC_RGMII_MODE_RX_ACTIVITY |
913 MAC_RGMII_MODE_RX_ENG_DET;
914 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
915 val |= MAC_RGMII_MODE_TX_ENABLE |
916 MAC_RGMII_MODE_TX_LOWPWR |
917 MAC_RGMII_MODE_TX_RESET;
918 }
919 tw32(MAC_EXT_RGMII_MODE, val);
920}
921
Matt Carlson158d7ab2008-05-29 01:37:54 -0700922static void tg3_mdio_start(struct tg3 *tp)
923{
924 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -0700925 mutex_lock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700926 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -0700927 mutex_unlock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700928 }
929
930 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
931 tw32_f(MAC_MI_MODE, tp->mi_mode);
932 udelay(80);
Matt Carlsona9daf362008-05-25 23:49:44 -0700933
934 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)
935 tg3_mdio_config(tp);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700936}
937
938static void tg3_mdio_stop(struct tg3 *tp)
939{
940 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -0700941 mutex_lock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700942 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -0700943 mutex_unlock(&tp->mdio_bus->mdio_lock);
Matt Carlson158d7ab2008-05-29 01:37:54 -0700944 }
945}
946
947static int tg3_mdio_init(struct tg3 *tp)
948{
949 int i;
950 u32 reg;
Matt Carlsona9daf362008-05-25 23:49:44 -0700951 struct phy_device *phydev;
Matt Carlson158d7ab2008-05-29 01:37:54 -0700952
953 tg3_mdio_start(tp);
954
955 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
956 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
957 return 0;
958
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -0700959 tp->mdio_bus = mdiobus_alloc();
960 if (tp->mdio_bus == NULL)
961 return -ENOMEM;
Matt Carlson158d7ab2008-05-29 01:37:54 -0700962
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -0700963 tp->mdio_bus->name = "tg3 mdio bus";
964 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
Matt Carlson158d7ab2008-05-29 01:37:54 -0700965 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -0700966 tp->mdio_bus->priv = tp;
967 tp->mdio_bus->parent = &tp->pdev->dev;
968 tp->mdio_bus->read = &tg3_mdio_read;
969 tp->mdio_bus->write = &tg3_mdio_write;
970 tp->mdio_bus->reset = &tg3_mdio_reset;
971 tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
972 tp->mdio_bus->irq = &tp->mdio_irq[0];
Matt Carlson158d7ab2008-05-29 01:37:54 -0700973
974 for (i = 0; i < PHY_MAX_ADDR; i++)
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -0700975 tp->mdio_bus->irq[i] = PHY_POLL;
Matt Carlson158d7ab2008-05-29 01:37:54 -0700976
977 /* The bus registration will look for all the PHYs on the mdio bus.
978 * Unfortunately, it does not ensure the PHY is powered up before
979 * accessing the PHY ID registers. A chip reset is the
980 * quickest way to bring the device back to an operational state..
981 */
982 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
983 tg3_bmcr_reset(tp);
984
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -0700985 i = mdiobus_register(tp->mdio_bus);
Matt Carlsona9daf362008-05-25 23:49:44 -0700986 if (i) {
Matt Carlson158d7ab2008-05-29 01:37:54 -0700987 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
988 tp->dev->name, i);
Matt Carlsona9daf362008-05-25 23:49:44 -0700989 return i;
990 }
Matt Carlson158d7ab2008-05-29 01:37:54 -0700991
Matt Carlsona9daf362008-05-25 23:49:44 -0700992 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
993
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -0700994 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsona9daf362008-05-25 23:49:44 -0700995
996 switch (phydev->phy_id) {
997 case TG3_PHY_ID_BCM50610:
998 phydev->interface = PHY_INTERFACE_MODE_RGMII;
999 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1000 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1001 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1002 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1003 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1004 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1005 break;
1006 case TG3_PHY_ID_BCMAC131:
1007 phydev->interface = PHY_INTERFACE_MODE_MII;
1008 break;
1009 }
1010
1011 tg3_mdio_config(tp);
1012
1013 return 0;
Matt Carlson158d7ab2008-05-29 01:37:54 -07001014}
1015
1016static void tg3_mdio_fini(struct tg3 *tp)
1017{
1018 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1019 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -07001020 mdiobus_unregister(tp->mdio_bus);
1021 mdiobus_free(tp->mdio_bus);
Matt Carlson158d7ab2008-05-29 01:37:54 -07001022 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1023 }
1024}
1025
Matt Carlson95e28692008-05-25 23:44:14 -07001026/* tp->lock is held. */
Matt Carlson4ba526c2008-08-15 14:10:04 -07001027static inline void tg3_generate_fw_event(struct tg3 *tp)
1028{
1029 u32 val;
1030
1031 val = tr32(GRC_RX_CPU_EVENT);
1032 val |= GRC_RX_CPU_DRIVER_EVENT;
1033 tw32_f(GRC_RX_CPU_EVENT, val);
1034
1035 tp->last_event_jiffies = jiffies;
1036}
1037
1038#define TG3_FW_EVENT_TIMEOUT_USEC 2500
1039
1040/* tp->lock is held. */
Matt Carlson95e28692008-05-25 23:44:14 -07001041static void tg3_wait_for_event_ack(struct tg3 *tp)
1042{
1043 int i;
Matt Carlson4ba526c2008-08-15 14:10:04 -07001044 unsigned int delay_cnt;
1045 long time_remain;
Matt Carlson95e28692008-05-25 23:44:14 -07001046
Matt Carlson4ba526c2008-08-15 14:10:04 -07001047 /* If enough time has passed, no wait is necessary. */
1048 time_remain = (long)(tp->last_event_jiffies + 1 +
1049 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1050 (long)jiffies;
1051 if (time_remain < 0)
1052 return;
1053
1054 /* Check if we can shorten the wait time. */
1055 delay_cnt = jiffies_to_usecs(time_remain);
1056 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1057 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1058 delay_cnt = (delay_cnt >> 3) + 1;
1059
1060 for (i = 0; i < delay_cnt; i++) {
Matt Carlson95e28692008-05-25 23:44:14 -07001061 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1062 break;
Matt Carlson4ba526c2008-08-15 14:10:04 -07001063 udelay(8);
Matt Carlson95e28692008-05-25 23:44:14 -07001064 }
1065}
1066
1067/* tp->lock is held. */
1068static void tg3_ump_link_report(struct tg3 *tp)
1069{
1070 u32 reg;
1071 u32 val;
1072
1073 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1074 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1075 return;
1076
1077 tg3_wait_for_event_ack(tp);
1078
1079 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1080
1081 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1082
1083 val = 0;
1084 if (!tg3_readphy(tp, MII_BMCR, &reg))
1085 val = reg << 16;
1086 if (!tg3_readphy(tp, MII_BMSR, &reg))
1087 val |= (reg & 0xffff);
1088 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1089
1090 val = 0;
1091 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1092 val = reg << 16;
1093 if (!tg3_readphy(tp, MII_LPA, &reg))
1094 val |= (reg & 0xffff);
1095 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1096
1097 val = 0;
1098 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1099 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1100 val = reg << 16;
1101 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1102 val |= (reg & 0xffff);
1103 }
1104 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1105
1106 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1107 val = reg << 16;
1108 else
1109 val = 0;
1110 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1111
Matt Carlson4ba526c2008-08-15 14:10:04 -07001112 tg3_generate_fw_event(tp);
Matt Carlson95e28692008-05-25 23:44:14 -07001113}
1114
1115static void tg3_link_report(struct tg3 *tp)
1116{
1117 if (!netif_carrier_ok(tp->dev)) {
1118 if (netif_msg_link(tp))
1119 printk(KERN_INFO PFX "%s: Link is down.\n",
1120 tp->dev->name);
1121 tg3_ump_link_report(tp);
1122 } else if (netif_msg_link(tp)) {
1123 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1124 tp->dev->name,
1125 (tp->link_config.active_speed == SPEED_1000 ?
1126 1000 :
1127 (tp->link_config.active_speed == SPEED_100 ?
1128 100 : 10)),
1129 (tp->link_config.active_duplex == DUPLEX_FULL ?
1130 "full" : "half"));
1131
1132 printk(KERN_INFO PFX
1133 "%s: Flow control is %s for TX and %s for RX.\n",
1134 tp->dev->name,
1135 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1136 "on" : "off",
1137 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1138 "on" : "off");
1139 tg3_ump_link_report(tp);
1140 }
1141}
1142
1143static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1144{
1145 u16 miireg;
1146
1147 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1148 miireg = ADVERTISE_PAUSE_CAP;
1149 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1150 miireg = ADVERTISE_PAUSE_ASYM;
1151 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1152 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1153 else
1154 miireg = 0;
1155
1156 return miireg;
1157}
1158
1159static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1160{
1161 u16 miireg;
1162
1163 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1164 miireg = ADVERTISE_1000XPAUSE;
1165 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1166 miireg = ADVERTISE_1000XPSE_ASYM;
1167 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1168 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1169 else
1170 miireg = 0;
1171
1172 return miireg;
1173}
1174
1175static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1176{
1177 u8 cap = 0;
1178
1179 if (lcladv & ADVERTISE_PAUSE_CAP) {
1180 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1181 if (rmtadv & LPA_PAUSE_CAP)
1182 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1183 else if (rmtadv & LPA_PAUSE_ASYM)
1184 cap = TG3_FLOW_CTRL_RX;
1185 } else {
1186 if (rmtadv & LPA_PAUSE_CAP)
1187 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1188 }
1189 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1190 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1191 cap = TG3_FLOW_CTRL_TX;
1192 }
1193
1194 return cap;
1195}
1196
1197static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1198{
1199 u8 cap = 0;
1200
1201 if (lcladv & ADVERTISE_1000XPAUSE) {
1202 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1203 if (rmtadv & LPA_1000XPAUSE)
1204 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1205 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1206 cap = TG3_FLOW_CTRL_RX;
1207 } else {
1208 if (rmtadv & LPA_1000XPAUSE)
1209 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1210 }
1211 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1212 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1213 cap = TG3_FLOW_CTRL_TX;
1214 }
1215
1216 return cap;
1217}
1218
Matt Carlsonf51f3562008-05-25 23:45:08 -07001219static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
Matt Carlson95e28692008-05-25 23:44:14 -07001220{
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001221 u8 autoneg;
Matt Carlsonf51f3562008-05-25 23:45:08 -07001222 u8 flowctrl = 0;
Matt Carlson95e28692008-05-25 23:44:14 -07001223 u32 old_rx_mode = tp->rx_mode;
1224 u32 old_tx_mode = tp->tx_mode;
1225
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001226 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -07001227 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001228 else
1229 autoneg = tp->link_config.autoneg;
1230
1231 if (autoneg == AUTONEG_ENABLE &&
Matt Carlson95e28692008-05-25 23:44:14 -07001232 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1233 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Matt Carlsonf51f3562008-05-25 23:45:08 -07001234 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
Matt Carlson95e28692008-05-25 23:44:14 -07001235 else
Matt Carlsonf51f3562008-05-25 23:45:08 -07001236 flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1237 } else
1238 flowctrl = tp->link_config.flowctrl;
Matt Carlson95e28692008-05-25 23:44:14 -07001239
Matt Carlsonf51f3562008-05-25 23:45:08 -07001240 tp->link_config.active_flowctrl = flowctrl;
Matt Carlson95e28692008-05-25 23:44:14 -07001241
Matt Carlsonf51f3562008-05-25 23:45:08 -07001242 if (flowctrl & TG3_FLOW_CTRL_RX)
Matt Carlson95e28692008-05-25 23:44:14 -07001243 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1244 else
1245 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1246
Matt Carlsonf51f3562008-05-25 23:45:08 -07001247 if (old_rx_mode != tp->rx_mode)
Matt Carlson95e28692008-05-25 23:44:14 -07001248 tw32_f(MAC_RX_MODE, tp->rx_mode);
Matt Carlson95e28692008-05-25 23:44:14 -07001249
Matt Carlsonf51f3562008-05-25 23:45:08 -07001250 if (flowctrl & TG3_FLOW_CTRL_TX)
Matt Carlson95e28692008-05-25 23:44:14 -07001251 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1252 else
1253 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1254
Matt Carlsonf51f3562008-05-25 23:45:08 -07001255 if (old_tx_mode != tp->tx_mode)
Matt Carlson95e28692008-05-25 23:44:14 -07001256 tw32_f(MAC_TX_MODE, tp->tx_mode);
Matt Carlson95e28692008-05-25 23:44:14 -07001257}
1258
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001259static void tg3_adjust_link(struct net_device *dev)
1260{
1261 u8 oldflowctrl, linkmesg = 0;
1262 u32 mac_mode, lcl_adv, rmt_adv;
1263 struct tg3 *tp = netdev_priv(dev);
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -07001264 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001265
1266 spin_lock(&tp->lock);
1267
1268 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1269 MAC_MODE_HALF_DUPLEX);
1270
1271 oldflowctrl = tp->link_config.active_flowctrl;
1272
1273 if (phydev->link) {
1274 lcl_adv = 0;
1275 rmt_adv = 0;
1276
1277 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1278 mac_mode |= MAC_MODE_PORT_MODE_MII;
1279 else
1280 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1281
1282 if (phydev->duplex == DUPLEX_HALF)
1283 mac_mode |= MAC_MODE_HALF_DUPLEX;
1284 else {
1285 lcl_adv = tg3_advert_flowctrl_1000T(
1286 tp->link_config.flowctrl);
1287
1288 if (phydev->pause)
1289 rmt_adv = LPA_PAUSE_CAP;
1290 if (phydev->asym_pause)
1291 rmt_adv |= LPA_PAUSE_ASYM;
1292 }
1293
1294 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1295 } else
1296 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1297
1298 if (mac_mode != tp->mac_mode) {
1299 tp->mac_mode = mac_mode;
1300 tw32_f(MAC_MODE, tp->mac_mode);
1301 udelay(40);
1302 }
1303
1304 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1305 tw32(MAC_TX_LENGTHS,
1306 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1307 (6 << TX_LENGTHS_IPG_SHIFT) |
1308 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1309 else
1310 tw32(MAC_TX_LENGTHS,
1311 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1312 (6 << TX_LENGTHS_IPG_SHIFT) |
1313 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1314
1315 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1316 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1317 phydev->speed != tp->link_config.active_speed ||
1318 phydev->duplex != tp->link_config.active_duplex ||
1319 oldflowctrl != tp->link_config.active_flowctrl)
1320 linkmesg = 1;
1321
1322 tp->link_config.active_speed = phydev->speed;
1323 tp->link_config.active_duplex = phydev->duplex;
1324
1325 spin_unlock(&tp->lock);
1326
1327 if (linkmesg)
1328 tg3_link_report(tp);
1329}
1330
1331static int tg3_phy_init(struct tg3 *tp)
1332{
1333 struct phy_device *phydev;
1334
1335 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1336 return 0;
1337
1338 /* Bring the PHY back to a known state. */
1339 tg3_bmcr_reset(tp);
1340
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -07001341 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001342
1343 /* Attach the MAC to the PHY. */
Matt Carlsona9daf362008-05-25 23:49:44 -07001344 phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link,
1345 phydev->dev_flags, phydev->interface);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001346 if (IS_ERR(phydev)) {
1347 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1348 return PTR_ERR(phydev);
1349 }
1350
1351 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1352
1353 /* Mask with MAC supported features. */
1354 phydev->supported &= (PHY_GBIT_FEATURES |
1355 SUPPORTED_Pause |
1356 SUPPORTED_Asym_Pause);
1357
1358 phydev->advertising = phydev->supported;
1359
1360 printk(KERN_INFO
1361 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
1362 tp->dev->name, phydev->drv->name, phydev->dev.bus_id);
1363
1364 return 0;
1365}
1366
1367static void tg3_phy_start(struct tg3 *tp)
1368{
1369 struct phy_device *phydev;
1370
1371 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1372 return;
1373
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -07001374 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001375
1376 if (tp->link_config.phy_is_low_power) {
1377 tp->link_config.phy_is_low_power = 0;
1378 phydev->speed = tp->link_config.orig_speed;
1379 phydev->duplex = tp->link_config.orig_duplex;
1380 phydev->autoneg = tp->link_config.orig_autoneg;
1381 phydev->advertising = tp->link_config.orig_advertising;
1382 }
1383
1384 phy_start(phydev);
1385
1386 phy_start_aneg(phydev);
1387}
1388
1389static void tg3_phy_stop(struct tg3 *tp)
1390{
1391 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1392 return;
1393
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -07001394 phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001395}
1396
1397static void tg3_phy_fini(struct tg3 *tp)
1398{
1399 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -07001400 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07001401 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1402 }
1403}
1404
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001405static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1406{
1407 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1408 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1409}
1410
Matt Carlson9ef8ca92007-07-11 19:48:29 -07001411static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1412{
1413 u32 phy;
1414
1415 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1416 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1417 return;
1418
1419 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1420 u32 ephy;
1421
1422 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1423 tg3_writephy(tp, MII_TG3_EPHY_TEST,
1424 ephy | MII_TG3_EPHY_SHADOW_EN);
1425 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1426 if (enable)
1427 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1428 else
1429 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1430 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1431 }
1432 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1433 }
1434 } else {
1435 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1436 MII_TG3_AUXCTL_SHDWSEL_MISC;
1437 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1438 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1439 if (enable)
1440 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1441 else
1442 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1443 phy |= MII_TG3_AUXCTL_MISC_WREN;
1444 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1445 }
1446 }
1447}
1448
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449static void tg3_phy_set_wirespeed(struct tg3 *tp)
1450{
1451 u32 val;
1452
1453 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1454 return;
1455
1456 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1457 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1458 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1459 (val | (1 << 15) | (1 << 4)));
1460}
1461
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001462static void tg3_phy_apply_otp(struct tg3 *tp)
1463{
1464 u32 otp, phy;
1465
1466 if (!tp->phy_otp)
1467 return;
1468
1469 otp = tp->phy_otp;
1470
1471 /* Enable SM_DSP clock and tx 6dB coding. */
1472 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1473 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1474 MII_TG3_AUXCTL_ACTL_TX_6DB;
1475 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1476
1477 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1478 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1479 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1480
1481 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1482 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1483 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1484
1485 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1486 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1487 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1488
1489 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1490 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1491
1492 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1493 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1494
1495 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1496 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1497 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1498
1499 /* Turn off SM_DSP clock. */
1500 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1501 MII_TG3_AUXCTL_ACTL_TX_6DB;
1502 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1503}
1504
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505static int tg3_wait_macro_done(struct tg3 *tp)
1506{
1507 int limit = 100;
1508
1509 while (limit--) {
1510 u32 tmp32;
1511
1512 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1513 if ((tmp32 & 0x1000) == 0)
1514 break;
1515 }
1516 }
1517 if (limit <= 0)
1518 return -EBUSY;
1519
1520 return 0;
1521}
1522
1523static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1524{
1525 static const u32 test_pat[4][6] = {
1526 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1527 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1528 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1529 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1530 };
1531 int chan;
1532
1533 for (chan = 0; chan < 4; chan++) {
1534 int i;
1535
1536 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1537 (chan * 0x2000) | 0x0200);
1538 tg3_writephy(tp, 0x16, 0x0002);
1539
1540 for (i = 0; i < 6; i++)
1541 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1542 test_pat[chan][i]);
1543
1544 tg3_writephy(tp, 0x16, 0x0202);
1545 if (tg3_wait_macro_done(tp)) {
1546 *resetp = 1;
1547 return -EBUSY;
1548 }
1549
1550 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1551 (chan * 0x2000) | 0x0200);
1552 tg3_writephy(tp, 0x16, 0x0082);
1553 if (tg3_wait_macro_done(tp)) {
1554 *resetp = 1;
1555 return -EBUSY;
1556 }
1557
1558 tg3_writephy(tp, 0x16, 0x0802);
1559 if (tg3_wait_macro_done(tp)) {
1560 *resetp = 1;
1561 return -EBUSY;
1562 }
1563
1564 for (i = 0; i < 6; i += 2) {
1565 u32 low, high;
1566
1567 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1568 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1569 tg3_wait_macro_done(tp)) {
1570 *resetp = 1;
1571 return -EBUSY;
1572 }
1573 low &= 0x7fff;
1574 high &= 0x000f;
1575 if (low != test_pat[chan][i] ||
1576 high != test_pat[chan][i+1]) {
1577 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1578 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1579 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1580
1581 return -EBUSY;
1582 }
1583 }
1584 }
1585
1586 return 0;
1587}
1588
1589static int tg3_phy_reset_chanpat(struct tg3 *tp)
1590{
1591 int chan;
1592
1593 for (chan = 0; chan < 4; chan++) {
1594 int i;
1595
1596 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1597 (chan * 0x2000) | 0x0200);
1598 tg3_writephy(tp, 0x16, 0x0002);
1599 for (i = 0; i < 6; i++)
1600 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1601 tg3_writephy(tp, 0x16, 0x0202);
1602 if (tg3_wait_macro_done(tp))
1603 return -EBUSY;
1604 }
1605
1606 return 0;
1607}
1608
1609static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1610{
1611 u32 reg32, phy9_orig;
1612 int retries, do_phy_reset, err;
1613
1614 retries = 10;
1615 do_phy_reset = 1;
1616 do {
1617 if (do_phy_reset) {
1618 err = tg3_bmcr_reset(tp);
1619 if (err)
1620 return err;
1621 do_phy_reset = 0;
1622 }
1623
1624 /* Disable transmitter and interrupt. */
1625 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1626 continue;
1627
1628 reg32 |= 0x3000;
1629 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1630
1631 /* Set full-duplex, 1000 mbps. */
1632 tg3_writephy(tp, MII_BMCR,
1633 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1634
1635 /* Set to master mode. */
1636 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1637 continue;
1638
1639 tg3_writephy(tp, MII_TG3_CTRL,
1640 (MII_TG3_CTRL_AS_MASTER |
1641 MII_TG3_CTRL_ENABLE_AS_MASTER));
1642
1643 /* Enable SM_DSP_CLOCK and 6dB. */
1644 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1645
1646 /* Block the PHY control access. */
1647 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1648 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1649
1650 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1651 if (!err)
1652 break;
1653 } while (--retries);
1654
1655 err = tg3_phy_reset_chanpat(tp);
1656 if (err)
1657 return err;
1658
1659 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1660 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1661
1662 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1663 tg3_writephy(tp, 0x16, 0x0000);
1664
1665 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1666 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1667 /* Set Extended packet length bit for jumbo frames */
1668 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1669 }
1670 else {
1671 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1672 }
1673
1674 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1675
1676 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1677 reg32 &= ~0x3000;
1678 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1679 } else if (!err)
1680 err = -EBUSY;
1681
1682 return err;
1683}
1684
1685/* This will reset the tigon3 PHY if there is no valid
1686 * link unless the FORCE argument is non-zero.
1687 */
1688static int tg3_phy_reset(struct tg3 *tp)
1689{
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001690 u32 cpmuctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 u32 phy_status;
1692 int err;
1693
Michael Chan60189dd2006-12-17 17:08:07 -08001694 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1695 u32 val;
1696
1697 val = tr32(GRC_MISC_CFG);
1698 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1699 udelay(40);
1700 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1702 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1703 if (err != 0)
1704 return -EBUSY;
1705
Michael Chanc8e1e822006-04-29 18:55:17 -07001706 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1707 netif_carrier_off(tp->dev);
1708 tg3_link_report(tp);
1709 }
1710
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1712 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1713 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1714 err = tg3_phy_reset_5703_4_5(tp);
1715 if (err)
1716 return err;
1717 goto out;
1718 }
1719
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001720 cpmuctrl = 0;
1721 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1722 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1723 cpmuctrl = tr32(TG3_CPMU_CTRL);
1724 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1725 tw32(TG3_CPMU_CTRL,
1726 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1727 }
1728
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 err = tg3_bmcr_reset(tp);
1730 if (err)
1731 return err;
1732
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001733 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1734 u32 phy;
1735
1736 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1737 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1738
1739 tw32(TG3_CPMU_CTRL, cpmuctrl);
1740 }
1741
Matt Carlsonb5af7122007-11-12 21:22:02 -08001742 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
Matt Carlsonce057f02007-11-12 21:08:03 -08001743 u32 val;
1744
1745 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1746 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1747 CPMU_LSPD_1000MB_MACCLK_12_5) {
1748 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1749 udelay(40);
1750 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1751 }
Matt Carlson662f38d2007-11-12 21:16:17 -08001752
1753 /* Disable GPHY autopowerdown. */
1754 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1755 MII_TG3_MISC_SHDW_WREN |
1756 MII_TG3_MISC_SHDW_APD_SEL |
1757 MII_TG3_MISC_SHDW_APD_WKTM_84MS);
Matt Carlsonce057f02007-11-12 21:08:03 -08001758 }
1759
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001760 tg3_phy_apply_otp(tp);
1761
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762out:
1763 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1764 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1765 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1766 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1767 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1768 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1769 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1770 }
1771 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1772 tg3_writephy(tp, 0x1c, 0x8d68);
1773 tg3_writephy(tp, 0x1c, 0x8d68);
1774 }
1775 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1776 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1777 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1778 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1779 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1780 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1781 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1782 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1783 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1784 }
Michael Chanc424cb22006-04-29 18:56:34 -07001785 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1786 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1787 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
Michael Chanc1d2a192007-01-08 19:57:20 -08001788 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1789 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1790 tg3_writephy(tp, MII_TG3_TEST1,
1791 MII_TG3_TEST1_TRIM_EN | 0x4);
1792 } else
1793 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
Michael Chanc424cb22006-04-29 18:56:34 -07001794 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1795 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 /* Set Extended packet length bit (bit 14) on all chips that */
1797 /* support jumbo frames */
1798 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1799 /* Cannot do read-modify-write on 5401 */
1800 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
Michael Chan0f893dc2005-07-25 12:30:38 -07001801 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 u32 phy_reg;
1803
1804 /* Set bit 14 with read-modify-write to preserve other bits */
1805 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1806 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1807 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1808 }
1809
1810 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1811 * jumbo frames transmission.
1812 */
Michael Chan0f893dc2005-07-25 12:30:38 -07001813 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 u32 phy_reg;
1815
1816 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1817 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1818 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1819 }
1820
Michael Chan715116a2006-09-27 16:09:25 -07001821 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan715116a2006-09-27 16:09:25 -07001822 /* adjust output voltage */
1823 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
Michael Chan715116a2006-09-27 16:09:25 -07001824 }
1825
Matt Carlson9ef8ca92007-07-11 19:48:29 -07001826 tg3_phy_toggle_automdix(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 tg3_phy_set_wirespeed(tp);
1828 return 0;
1829}
1830
1831static void tg3_frob_aux_power(struct tg3 *tp)
1832{
1833 struct tg3 *tp_peer = tp;
1834
Michael Chan9d26e212006-12-07 00:21:14 -08001835 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 return;
1837
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001838 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1839 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1840 struct net_device *dev_peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001842 dev_peer = pci_get_drvdata(tp->pdev_peer);
Michael Chanbc1c7562006-03-20 17:48:03 -08001843 /* remove_one() may have been run on the peer. */
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001844 if (!dev_peer)
Michael Chanbc1c7562006-03-20 17:48:03 -08001845 tp_peer = tp;
1846 else
1847 tp_peer = netdev_priv(dev_peer);
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001848 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849
1850 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
Michael Chan6921d202005-12-13 21:15:53 -08001851 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1852 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1853 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1855 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
Michael Chanb401e9e2005-12-19 16:27:04 -08001856 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1857 (GRC_LCLCTRL_GPIO_OE0 |
1858 GRC_LCLCTRL_GPIO_OE1 |
1859 GRC_LCLCTRL_GPIO_OE2 |
1860 GRC_LCLCTRL_GPIO_OUTPUT0 |
1861 GRC_LCLCTRL_GPIO_OUTPUT1),
1862 100);
Matt Carlson5f0c4a32008-06-09 15:41:12 -07001863 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1864 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1865 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1866 GRC_LCLCTRL_GPIO_OE1 |
1867 GRC_LCLCTRL_GPIO_OE2 |
1868 GRC_LCLCTRL_GPIO_OUTPUT0 |
1869 GRC_LCLCTRL_GPIO_OUTPUT1 |
1870 tp->grc_local_ctrl;
1871 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1872
1873 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1874 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1875
1876 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1877 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 } else {
1879 u32 no_gpio2;
Michael Chandc56b7d2005-12-19 16:26:28 -08001880 u32 grc_local_ctrl = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881
1882 if (tp_peer != tp &&
1883 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1884 return;
1885
Michael Chandc56b7d2005-12-19 16:26:28 -08001886 /* Workaround to prevent overdrawing Amps. */
1887 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1888 ASIC_REV_5714) {
1889 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chanb401e9e2005-12-19 16:27:04 -08001890 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1891 grc_local_ctrl, 100);
Michael Chandc56b7d2005-12-19 16:26:28 -08001892 }
1893
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 /* On 5753 and variants, GPIO2 cannot be used. */
1895 no_gpio2 = tp->nic_sram_data_cfg &
1896 NIC_SRAM_DATA_CFG_NO_GPIO2;
1897
Michael Chandc56b7d2005-12-19 16:26:28 -08001898 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 GRC_LCLCTRL_GPIO_OE1 |
1900 GRC_LCLCTRL_GPIO_OE2 |
1901 GRC_LCLCTRL_GPIO_OUTPUT1 |
1902 GRC_LCLCTRL_GPIO_OUTPUT2;
1903 if (no_gpio2) {
1904 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1905 GRC_LCLCTRL_GPIO_OUTPUT2);
1906 }
Michael Chanb401e9e2005-12-19 16:27:04 -08001907 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1908 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909
1910 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1911
Michael Chanb401e9e2005-12-19 16:27:04 -08001912 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1913 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914
1915 if (!no_gpio2) {
1916 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chanb401e9e2005-12-19 16:27:04 -08001917 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1918 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 }
1920 }
1921 } else {
1922 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1923 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1924 if (tp_peer != tp &&
1925 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1926 return;
1927
Michael Chanb401e9e2005-12-19 16:27:04 -08001928 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1929 (GRC_LCLCTRL_GPIO_OE1 |
1930 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931
Michael Chanb401e9e2005-12-19 16:27:04 -08001932 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1933 GRC_LCLCTRL_GPIO_OE1, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934
Michael Chanb401e9e2005-12-19 16:27:04 -08001935 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1936 (GRC_LCLCTRL_GPIO_OE1 |
1937 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 }
1939 }
1940}
1941
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07001942static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1943{
1944 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1945 return 1;
1946 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1947 if (speed != SPEED_10)
1948 return 1;
1949 } else if (speed == SPEED_10)
1950 return 1;
1951
1952 return 0;
1953}
1954
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955static int tg3_setup_phy(struct tg3 *, int);
1956
1957#define RESET_KIND_SHUTDOWN 0
1958#define RESET_KIND_INIT 1
1959#define RESET_KIND_SUSPEND 2
1960
1961static void tg3_write_sig_post_reset(struct tg3 *, int);
1962static int tg3_halt_cpu(struct tg3 *, u32);
Michael Chan6921d202005-12-13 21:15:53 -08001963static int tg3_nvram_lock(struct tg3 *);
1964static void tg3_nvram_unlock(struct tg3 *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965
Michael Chan15c3b692006-03-22 01:06:52 -08001966static void tg3_power_down_phy(struct tg3 *tp)
1967{
Matt Carlsonce057f02007-11-12 21:08:03 -08001968 u32 val;
1969
Michael Chan51297242007-02-13 12:17:57 -08001970 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1971 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1972 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1973 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1974
1975 sg_dig_ctrl |=
1976 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1977 tw32(SG_DIG_CTRL, sg_dig_ctrl);
1978 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1979 }
Michael Chan3f7045c2006-09-27 16:02:29 -07001980 return;
Michael Chan51297242007-02-13 12:17:57 -08001981 }
Michael Chan3f7045c2006-09-27 16:02:29 -07001982
Michael Chan60189dd2006-12-17 17:08:07 -08001983 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan60189dd2006-12-17 17:08:07 -08001984 tg3_bmcr_reset(tp);
1985 val = tr32(GRC_MISC_CFG);
1986 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1987 udelay(40);
1988 return;
Matt Carlsondd477002008-05-25 23:45:58 -07001989 } else if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
Michael Chan715116a2006-09-27 16:09:25 -07001990 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1991 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1992 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1993 }
Michael Chan3f7045c2006-09-27 16:02:29 -07001994
Michael Chan15c3b692006-03-22 01:06:52 -08001995 /* The PHY should not be powered down on some chips because
1996 * of bugs.
1997 */
1998 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1999 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2000 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2001 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2002 return;
Matt Carlsonce057f02007-11-12 21:08:03 -08002003
Matt Carlsonb5af7122007-11-12 21:22:02 -08002004 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
Matt Carlsonce057f02007-11-12 21:08:03 -08002005 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2006 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2007 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2008 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2009 }
2010
Michael Chan15c3b692006-03-22 01:06:52 -08002011 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2012}
2013
Michael Chanbc1c7562006-03-20 17:48:03 -08002014static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015{
2016 u32 misc_host_ctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017
2018 /* Make sure register accesses (indirect or otherwise)
2019 * will function correctly.
2020 */
2021 pci_write_config_dword(tp->pdev,
2022 TG3PCI_MISC_HOST_CTRL,
2023 tp->misc_host_ctrl);
2024
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 switch (state) {
Michael Chanbc1c7562006-03-20 17:48:03 -08002026 case PCI_D0:
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002027 pci_enable_wake(tp->pdev, state, false);
2028 pci_set_power_state(tp->pdev, PCI_D0);
Michael Chan8c6bda12005-04-21 17:09:08 -07002029
Michael Chan9d26e212006-12-07 00:21:14 -08002030 /* Switch out of Vaux if it is a NIC */
2031 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
Michael Chanb401e9e2005-12-19 16:27:04 -08002032 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033
2034 return 0;
2035
Michael Chanbc1c7562006-03-20 17:48:03 -08002036 case PCI_D1:
Michael Chanbc1c7562006-03-20 17:48:03 -08002037 case PCI_D2:
Michael Chanbc1c7562006-03-20 17:48:03 -08002038 case PCI_D3hot:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 break;
2040
2041 default:
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002042 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2043 tp->dev->name, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 return -EINVAL;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002045 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2047 tw32(TG3PCI_MISC_HOST_CTRL,
2048 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2049
Matt Carlsondd477002008-05-25 23:45:58 -07002050 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002051 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2052 !tp->link_config.phy_is_low_power) {
2053 struct phy_device *phydev;
2054 u32 advertising;
2055
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -07002056 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07002057
2058 tp->link_config.phy_is_low_power = 1;
2059
2060 tp->link_config.orig_speed = phydev->speed;
2061 tp->link_config.orig_duplex = phydev->duplex;
2062 tp->link_config.orig_autoneg = phydev->autoneg;
2063 tp->link_config.orig_advertising = phydev->advertising;
2064
2065 advertising = ADVERTISED_TP |
2066 ADVERTISED_Pause |
2067 ADVERTISED_Autoneg |
2068 ADVERTISED_10baseT_Half;
2069
2070 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2071 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
2072 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2073 advertising |=
2074 ADVERTISED_100baseT_Half |
2075 ADVERTISED_100baseT_Full |
2076 ADVERTISED_10baseT_Full;
2077 else
2078 advertising |= ADVERTISED_10baseT_Full;
2079 }
2080
2081 phydev->advertising = advertising;
2082
2083 phy_start_aneg(phydev);
2084 }
Matt Carlsondd477002008-05-25 23:45:58 -07002085 } else {
2086 if (tp->link_config.phy_is_low_power == 0) {
2087 tp->link_config.phy_is_low_power = 1;
2088 tp->link_config.orig_speed = tp->link_config.speed;
2089 tp->link_config.orig_duplex = tp->link_config.duplex;
2090 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2091 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092
Matt Carlsondd477002008-05-25 23:45:58 -07002093 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2094 tp->link_config.speed = SPEED_10;
2095 tp->link_config.duplex = DUPLEX_HALF;
2096 tp->link_config.autoneg = AUTONEG_ENABLE;
2097 tg3_setup_phy(tp, 0);
2098 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 }
2100
Michael Chanb5d37722006-09-27 16:06:21 -07002101 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2102 u32 val;
2103
2104 val = tr32(GRC_VCPU_EXT_CTRL);
2105 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2106 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chan6921d202005-12-13 21:15:53 -08002107 int i;
2108 u32 val;
2109
2110 for (i = 0; i < 200; i++) {
2111 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2112 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2113 break;
2114 msleep(1);
2115 }
2116 }
Gary Zambranoa85feb82007-05-05 11:52:19 -07002117 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2118 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2119 WOL_DRV_STATE_SHUTDOWN |
2120 WOL_DRV_WOL |
2121 WOL_SET_MAGIC_PKT);
Michael Chan6921d202005-12-13 21:15:53 -08002122
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
2124 u32 mac_mode;
2125
2126 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
Matt Carlsondd477002008-05-25 23:45:58 -07002127 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
2128 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2129 udelay(40);
2130 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131
Michael Chan3f7045c2006-09-27 16:02:29 -07002132 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2133 mac_mode = MAC_MODE_PORT_MODE_GMII;
2134 else
2135 mac_mode = MAC_MODE_PORT_MODE_MII;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002137 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2138 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2139 ASIC_REV_5700) {
2140 u32 speed = (tp->tg3_flags &
2141 TG3_FLAG_WOL_SPEED_100MB) ?
2142 SPEED_100 : SPEED_10;
2143 if (tg3_5700_link_polarity(tp, speed))
2144 mac_mode |= MAC_MODE_LINK_POLARITY;
2145 else
2146 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2147 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 } else {
2149 mac_mode = MAC_MODE_PORT_MODE_TBI;
2150 }
2151
John W. Linvillecbf46852005-04-21 17:01:29 -07002152 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153 tw32(MAC_LED_CTRL, tp->led_ctrl);
2154
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002155 if (pci_pme_capable(tp->pdev, state) &&
2156 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2158
Matt Carlson3bda1252008-08-15 14:08:22 -07002159 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2160 mac_mode |= tp->mac_mode &
2161 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2162 if (mac_mode & MAC_MODE_APE_TX_EN)
2163 mac_mode |= MAC_MODE_TDE_ENABLE;
2164 }
2165
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166 tw32_f(MAC_MODE, mac_mode);
2167 udelay(100);
2168
2169 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2170 udelay(10);
2171 }
2172
2173 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2174 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2175 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2176 u32 base_val;
2177
2178 base_val = tp->pci_clock_ctrl;
2179 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2180 CLOCK_CTRL_TXCLK_DISABLE);
2181
Michael Chanb401e9e2005-12-19 16:27:04 -08002182 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2183 CLOCK_CTRL_PWRDOWN_PLL133, 40);
Michael Chand7b0a852007-02-13 12:17:38 -08002184 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
Matt Carlson795d01c2007-10-07 23:28:17 -07002185 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
Michael Chand7b0a852007-02-13 12:17:38 -08002186 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
Michael Chan4cf78e42005-07-25 12:29:19 -07002187 /* do nothing */
Michael Chan85e94ce2005-04-21 17:05:28 -07002188 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2190 u32 newbits1, newbits2;
2191
2192 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2193 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2194 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2195 CLOCK_CTRL_TXCLK_DISABLE |
2196 CLOCK_CTRL_ALTCLK);
2197 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2198 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2199 newbits1 = CLOCK_CTRL_625_CORE;
2200 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2201 } else {
2202 newbits1 = CLOCK_CTRL_ALTCLK;
2203 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2204 }
2205
Michael Chanb401e9e2005-12-19 16:27:04 -08002206 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2207 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208
Michael Chanb401e9e2005-12-19 16:27:04 -08002209 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2210 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211
2212 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2213 u32 newbits3;
2214
2215 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2216 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2217 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2218 CLOCK_CTRL_TXCLK_DISABLE |
2219 CLOCK_CTRL_44MHZ_CORE);
2220 } else {
2221 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2222 }
2223
Michael Chanb401e9e2005-12-19 16:27:04 -08002224 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2225 tp->pci_clock_ctrl | newbits3, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226 }
2227 }
2228
Michael Chan6921d202005-12-13 21:15:53 -08002229 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
Matt Carlson0d3031d2007-10-10 18:02:43 -07002230 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
2231 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
Michael Chan3f7045c2006-09-27 16:02:29 -07002232 tg3_power_down_phy(tp);
Michael Chan6921d202005-12-13 21:15:53 -08002233
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234 tg3_frob_aux_power(tp);
2235
2236 /* Workaround for unstable PLL clock */
2237 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2238 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2239 u32 val = tr32(0x7d00);
2240
2241 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2242 tw32(0x7d00, val);
Michael Chan6921d202005-12-13 21:15:53 -08002243 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chanec41c7d2006-01-17 02:40:55 -08002244 int err;
2245
2246 err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 tg3_halt_cpu(tp, RX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -08002248 if (!err)
2249 tg3_nvram_unlock(tp);
Michael Chan6921d202005-12-13 21:15:53 -08002250 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 }
2252
Michael Chanbbadf502006-04-06 21:46:34 -07002253 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2254
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002255 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
2256 pci_enable_wake(tp->pdev, state, true);
2257
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258 /* Finally, set the new power state. */
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07002259 pci_set_power_state(tp->pdev, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 return 0;
2262}
2263
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2265{
2266 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2267 case MII_TG3_AUX_STAT_10HALF:
2268 *speed = SPEED_10;
2269 *duplex = DUPLEX_HALF;
2270 break;
2271
2272 case MII_TG3_AUX_STAT_10FULL:
2273 *speed = SPEED_10;
2274 *duplex = DUPLEX_FULL;
2275 break;
2276
2277 case MII_TG3_AUX_STAT_100HALF:
2278 *speed = SPEED_100;
2279 *duplex = DUPLEX_HALF;
2280 break;
2281
2282 case MII_TG3_AUX_STAT_100FULL:
2283 *speed = SPEED_100;
2284 *duplex = DUPLEX_FULL;
2285 break;
2286
2287 case MII_TG3_AUX_STAT_1000HALF:
2288 *speed = SPEED_1000;
2289 *duplex = DUPLEX_HALF;
2290 break;
2291
2292 case MII_TG3_AUX_STAT_1000FULL:
2293 *speed = SPEED_1000;
2294 *duplex = DUPLEX_FULL;
2295 break;
2296
2297 default:
Michael Chan715116a2006-09-27 16:09:25 -07002298 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2299 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2300 SPEED_10;
2301 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2302 DUPLEX_HALF;
2303 break;
2304 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305 *speed = SPEED_INVALID;
2306 *duplex = DUPLEX_INVALID;
2307 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002308 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309}
2310
2311static void tg3_phy_copper_begin(struct tg3 *tp)
2312{
2313 u32 new_adv;
2314 int i;
2315
2316 if (tp->link_config.phy_is_low_power) {
2317 /* Entering low power mode. Disable gigabit and
2318 * 100baseT advertisements.
2319 */
2320 tg3_writephy(tp, MII_TG3_CTRL, 0);
2321
2322 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2323 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2324 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2325 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2326
2327 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2328 } else if (tp->link_config.speed == SPEED_INVALID) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2330 tp->link_config.advertising &=
2331 ~(ADVERTISED_1000baseT_Half |
2332 ADVERTISED_1000baseT_Full);
2333
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002334 new_adv = ADVERTISE_CSMA;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2336 new_adv |= ADVERTISE_10HALF;
2337 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2338 new_adv |= ADVERTISE_10FULL;
2339 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2340 new_adv |= ADVERTISE_100HALF;
2341 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2342 new_adv |= ADVERTISE_100FULL;
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002343
2344 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2345
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2347
2348 if (tp->link_config.advertising &
2349 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2350 new_adv = 0;
2351 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2352 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2353 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2354 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2355 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2356 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2357 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2358 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2359 MII_TG3_CTRL_ENABLE_AS_MASTER);
2360 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2361 } else {
2362 tg3_writephy(tp, MII_TG3_CTRL, 0);
2363 }
2364 } else {
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002365 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2366 new_adv |= ADVERTISE_CSMA;
2367
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368 /* Asking for a specific link mode. */
2369 if (tp->link_config.speed == SPEED_1000) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2371
2372 if (tp->link_config.duplex == DUPLEX_FULL)
2373 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2374 else
2375 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2376 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2377 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2378 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2379 MII_TG3_CTRL_ENABLE_AS_MASTER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381 if (tp->link_config.speed == SPEED_100) {
2382 if (tp->link_config.duplex == DUPLEX_FULL)
2383 new_adv |= ADVERTISE_100FULL;
2384 else
2385 new_adv |= ADVERTISE_100HALF;
2386 } else {
2387 if (tp->link_config.duplex == DUPLEX_FULL)
2388 new_adv |= ADVERTISE_10FULL;
2389 else
2390 new_adv |= ADVERTISE_10HALF;
2391 }
2392 tg3_writephy(tp, MII_ADVERTISE, new_adv);
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002393
2394 new_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395 }
Matt Carlsonba4d07a2007-12-20 20:08:00 -08002396
2397 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398 }
2399
2400 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2401 tp->link_config.speed != SPEED_INVALID) {
2402 u32 bmcr, orig_bmcr;
2403
2404 tp->link_config.active_speed = tp->link_config.speed;
2405 tp->link_config.active_duplex = tp->link_config.duplex;
2406
2407 bmcr = 0;
2408 switch (tp->link_config.speed) {
2409 default:
2410 case SPEED_10:
2411 break;
2412
2413 case SPEED_100:
2414 bmcr |= BMCR_SPEED100;
2415 break;
2416
2417 case SPEED_1000:
2418 bmcr |= TG3_BMCR_SPEED1000;
2419 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07002420 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421
2422 if (tp->link_config.duplex == DUPLEX_FULL)
2423 bmcr |= BMCR_FULLDPLX;
2424
2425 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2426 (bmcr != orig_bmcr)) {
2427 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2428 for (i = 0; i < 1500; i++) {
2429 u32 tmp;
2430
2431 udelay(10);
2432 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2433 tg3_readphy(tp, MII_BMSR, &tmp))
2434 continue;
2435 if (!(tmp & BMSR_LSTATUS)) {
2436 udelay(40);
2437 break;
2438 }
2439 }
2440 tg3_writephy(tp, MII_BMCR, bmcr);
2441 udelay(40);
2442 }
2443 } else {
2444 tg3_writephy(tp, MII_BMCR,
2445 BMCR_ANENABLE | BMCR_ANRESTART);
2446 }
2447}
2448
2449static int tg3_init_5401phy_dsp(struct tg3 *tp)
2450{
2451 int err;
2452
2453 /* Turn off tap power management. */
2454 /* Set Extended packet length bit */
2455 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2456
2457 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2458 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2459
2460 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2461 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2462
2463 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2464 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2465
2466 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2467 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2468
2469 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2470 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2471
2472 udelay(40);
2473
2474 return err;
2475}
2476
Michael Chan3600d912006-12-07 00:21:48 -08002477static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478{
Michael Chan3600d912006-12-07 00:21:48 -08002479 u32 adv_reg, all_mask = 0;
2480
2481 if (mask & ADVERTISED_10baseT_Half)
2482 all_mask |= ADVERTISE_10HALF;
2483 if (mask & ADVERTISED_10baseT_Full)
2484 all_mask |= ADVERTISE_10FULL;
2485 if (mask & ADVERTISED_100baseT_Half)
2486 all_mask |= ADVERTISE_100HALF;
2487 if (mask & ADVERTISED_100baseT_Full)
2488 all_mask |= ADVERTISE_100FULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489
2490 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2491 return 0;
2492
Linus Torvalds1da177e2005-04-16 15:20:36 -07002493 if ((adv_reg & all_mask) != all_mask)
2494 return 0;
2495 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2496 u32 tg3_ctrl;
2497
Michael Chan3600d912006-12-07 00:21:48 -08002498 all_mask = 0;
2499 if (mask & ADVERTISED_1000baseT_Half)
2500 all_mask |= ADVERTISE_1000HALF;
2501 if (mask & ADVERTISED_1000baseT_Full)
2502 all_mask |= ADVERTISE_1000FULL;
2503
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2505 return 0;
2506
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507 if ((tg3_ctrl & all_mask) != all_mask)
2508 return 0;
2509 }
2510 return 1;
2511}
2512
Matt Carlsonef167e22007-12-20 20:10:01 -08002513static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2514{
2515 u32 curadv, reqadv;
2516
2517 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2518 return 1;
2519
2520 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2521 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2522
2523 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2524 if (curadv != reqadv)
2525 return 0;
2526
2527 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2528 tg3_readphy(tp, MII_LPA, rmtadv);
2529 } else {
2530 /* Reprogram the advertisement register, even if it
2531 * does not affect the current link. If the link
2532 * gets renegotiated in the future, we can save an
2533 * additional renegotiation cycle by advertising
2534 * it correctly in the first place.
2535 */
2536 if (curadv != reqadv) {
2537 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2538 ADVERTISE_PAUSE_ASYM);
2539 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2540 }
2541 }
2542
2543 return 1;
2544}
2545
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2547{
2548 int current_link_up;
2549 u32 bmsr, dummy;
Matt Carlsonef167e22007-12-20 20:10:01 -08002550 u32 lcl_adv, rmt_adv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551 u16 current_speed;
2552 u8 current_duplex;
2553 int i, err;
2554
2555 tw32(MAC_EVENT, 0);
2556
2557 tw32_f(MAC_STATUS,
2558 (MAC_STATUS_SYNC_CHANGED |
2559 MAC_STATUS_CFG_CHANGED |
2560 MAC_STATUS_MI_COMPLETION |
2561 MAC_STATUS_LNKSTATE_CHANGED));
2562 udelay(40);
2563
Matt Carlson8ef21422008-05-02 16:47:53 -07002564 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2565 tw32_f(MAC_MI_MODE,
2566 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2567 udelay(80);
2568 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569
2570 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2571
2572 /* Some third-party PHYs need to be reset on link going
2573 * down.
2574 */
2575 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2576 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2577 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2578 netif_carrier_ok(tp->dev)) {
2579 tg3_readphy(tp, MII_BMSR, &bmsr);
2580 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2581 !(bmsr & BMSR_LSTATUS))
2582 force_reset = 1;
2583 }
2584 if (force_reset)
2585 tg3_phy_reset(tp);
2586
2587 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2588 tg3_readphy(tp, MII_BMSR, &bmsr);
2589 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2590 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2591 bmsr = 0;
2592
2593 if (!(bmsr & BMSR_LSTATUS)) {
2594 err = tg3_init_5401phy_dsp(tp);
2595 if (err)
2596 return err;
2597
2598 tg3_readphy(tp, MII_BMSR, &bmsr);
2599 for (i = 0; i < 1000; i++) {
2600 udelay(10);
2601 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2602 (bmsr & BMSR_LSTATUS)) {
2603 udelay(40);
2604 break;
2605 }
2606 }
2607
2608 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2609 !(bmsr & BMSR_LSTATUS) &&
2610 tp->link_config.active_speed == SPEED_1000) {
2611 err = tg3_phy_reset(tp);
2612 if (!err)
2613 err = tg3_init_5401phy_dsp(tp);
2614 if (err)
2615 return err;
2616 }
2617 }
2618 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2619 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2620 /* 5701 {A0,B0} CRC bug workaround */
2621 tg3_writephy(tp, 0x15, 0x0a75);
2622 tg3_writephy(tp, 0x1c, 0x8c68);
2623 tg3_writephy(tp, 0x1c, 0x8d68);
2624 tg3_writephy(tp, 0x1c, 0x8c68);
2625 }
2626
2627 /* Clear pending interrupts... */
2628 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2629 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2630
2631 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2632 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
Michael Chan715116a2006-09-27 16:09:25 -07002633 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2635
2636 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2637 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2638 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2639 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2640 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2641 else
2642 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2643 }
2644
2645 current_link_up = 0;
2646 current_speed = SPEED_INVALID;
2647 current_duplex = DUPLEX_INVALID;
2648
2649 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2650 u32 val;
2651
2652 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2653 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2654 if (!(val & (1 << 10))) {
2655 val |= (1 << 10);
2656 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2657 goto relink;
2658 }
2659 }
2660
2661 bmsr = 0;
2662 for (i = 0; i < 100; i++) {
2663 tg3_readphy(tp, MII_BMSR, &bmsr);
2664 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2665 (bmsr & BMSR_LSTATUS))
2666 break;
2667 udelay(40);
2668 }
2669
2670 if (bmsr & BMSR_LSTATUS) {
2671 u32 aux_stat, bmcr;
2672
2673 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2674 for (i = 0; i < 2000; i++) {
2675 udelay(10);
2676 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2677 aux_stat)
2678 break;
2679 }
2680
2681 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2682 &current_speed,
2683 &current_duplex);
2684
2685 bmcr = 0;
2686 for (i = 0; i < 200; i++) {
2687 tg3_readphy(tp, MII_BMCR, &bmcr);
2688 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2689 continue;
2690 if (bmcr && bmcr != 0x7fff)
2691 break;
2692 udelay(10);
2693 }
2694
Matt Carlsonef167e22007-12-20 20:10:01 -08002695 lcl_adv = 0;
2696 rmt_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697
Matt Carlsonef167e22007-12-20 20:10:01 -08002698 tp->link_config.active_speed = current_speed;
2699 tp->link_config.active_duplex = current_duplex;
2700
2701 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2702 if ((bmcr & BMCR_ANENABLE) &&
2703 tg3_copper_is_advertising_all(tp,
2704 tp->link_config.advertising)) {
2705 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2706 &rmt_adv))
2707 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708 }
2709 } else {
2710 if (!(bmcr & BMCR_ANENABLE) &&
2711 tp->link_config.speed == current_speed &&
Matt Carlsonef167e22007-12-20 20:10:01 -08002712 tp->link_config.duplex == current_duplex &&
2713 tp->link_config.flowctrl ==
2714 tp->link_config.active_flowctrl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716 }
2717 }
2718
Matt Carlsonef167e22007-12-20 20:10:01 -08002719 if (current_link_up == 1 &&
2720 tp->link_config.active_duplex == DUPLEX_FULL)
2721 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722 }
2723
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724relink:
Michael Chan6921d202005-12-13 21:15:53 -08002725 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726 u32 tmp;
2727
2728 tg3_phy_copper_begin(tp);
2729
2730 tg3_readphy(tp, MII_BMSR, &tmp);
2731 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2732 (tmp & BMSR_LSTATUS))
2733 current_link_up = 1;
2734 }
2735
2736 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2737 if (current_link_up == 1) {
2738 if (tp->link_config.active_speed == SPEED_100 ||
2739 tp->link_config.active_speed == SPEED_10)
2740 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2741 else
2742 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2743 } else
2744 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2745
2746 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2747 if (tp->link_config.active_duplex == DUPLEX_HALF)
2748 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2749
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002751 if (current_link_up == 1 &&
2752 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002754 else
2755 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756 }
2757
2758 /* ??? Without this setting Netgear GA302T PHY does not
2759 * ??? send/receive packets...
2760 */
2761 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2762 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2763 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2764 tw32_f(MAC_MI_MODE, tp->mi_mode);
2765 udelay(80);
2766 }
2767
2768 tw32_f(MAC_MODE, tp->mac_mode);
2769 udelay(40);
2770
2771 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2772 /* Polled via timer. */
2773 tw32_f(MAC_EVENT, 0);
2774 } else {
2775 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2776 }
2777 udelay(40);
2778
2779 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2780 current_link_up == 1 &&
2781 tp->link_config.active_speed == SPEED_1000 &&
2782 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2783 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2784 udelay(120);
2785 tw32_f(MAC_STATUS,
2786 (MAC_STATUS_SYNC_CHANGED |
2787 MAC_STATUS_CFG_CHANGED));
2788 udelay(40);
2789 tg3_write_mem(tp,
2790 NIC_SRAM_FIRMWARE_MBOX,
2791 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2792 }
2793
2794 if (current_link_up != netif_carrier_ok(tp->dev)) {
2795 if (current_link_up)
2796 netif_carrier_on(tp->dev);
2797 else
2798 netif_carrier_off(tp->dev);
2799 tg3_link_report(tp);
2800 }
2801
2802 return 0;
2803}
2804
2805struct tg3_fiber_aneginfo {
2806 int state;
2807#define ANEG_STATE_UNKNOWN 0
2808#define ANEG_STATE_AN_ENABLE 1
2809#define ANEG_STATE_RESTART_INIT 2
2810#define ANEG_STATE_RESTART 3
2811#define ANEG_STATE_DISABLE_LINK_OK 4
2812#define ANEG_STATE_ABILITY_DETECT_INIT 5
2813#define ANEG_STATE_ABILITY_DETECT 6
2814#define ANEG_STATE_ACK_DETECT_INIT 7
2815#define ANEG_STATE_ACK_DETECT 8
2816#define ANEG_STATE_COMPLETE_ACK_INIT 9
2817#define ANEG_STATE_COMPLETE_ACK 10
2818#define ANEG_STATE_IDLE_DETECT_INIT 11
2819#define ANEG_STATE_IDLE_DETECT 12
2820#define ANEG_STATE_LINK_OK 13
2821#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2822#define ANEG_STATE_NEXT_PAGE_WAIT 15
2823
2824 u32 flags;
2825#define MR_AN_ENABLE 0x00000001
2826#define MR_RESTART_AN 0x00000002
2827#define MR_AN_COMPLETE 0x00000004
2828#define MR_PAGE_RX 0x00000008
2829#define MR_NP_LOADED 0x00000010
2830#define MR_TOGGLE_TX 0x00000020
2831#define MR_LP_ADV_FULL_DUPLEX 0x00000040
2832#define MR_LP_ADV_HALF_DUPLEX 0x00000080
2833#define MR_LP_ADV_SYM_PAUSE 0x00000100
2834#define MR_LP_ADV_ASYM_PAUSE 0x00000200
2835#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2836#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2837#define MR_LP_ADV_NEXT_PAGE 0x00001000
2838#define MR_TOGGLE_RX 0x00002000
2839#define MR_NP_RX 0x00004000
2840
2841#define MR_LINK_OK 0x80000000
2842
2843 unsigned long link_time, cur_time;
2844
2845 u32 ability_match_cfg;
2846 int ability_match_count;
2847
2848 char ability_match, idle_match, ack_match;
2849
2850 u32 txconfig, rxconfig;
2851#define ANEG_CFG_NP 0x00000080
2852#define ANEG_CFG_ACK 0x00000040
2853#define ANEG_CFG_RF2 0x00000020
2854#define ANEG_CFG_RF1 0x00000010
2855#define ANEG_CFG_PS2 0x00000001
2856#define ANEG_CFG_PS1 0x00008000
2857#define ANEG_CFG_HD 0x00004000
2858#define ANEG_CFG_FD 0x00002000
2859#define ANEG_CFG_INVAL 0x00001f06
2860
2861};
2862#define ANEG_OK 0
2863#define ANEG_DONE 1
2864#define ANEG_TIMER_ENAB 2
2865#define ANEG_FAILED -1
2866
2867#define ANEG_STATE_SETTLE_TIME 10000
2868
2869static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2870 struct tg3_fiber_aneginfo *ap)
2871{
Matt Carlson5be73b42007-12-20 20:09:29 -08002872 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873 unsigned long delta;
2874 u32 rx_cfg_reg;
2875 int ret;
2876
2877 if (ap->state == ANEG_STATE_UNKNOWN) {
2878 ap->rxconfig = 0;
2879 ap->link_time = 0;
2880 ap->cur_time = 0;
2881 ap->ability_match_cfg = 0;
2882 ap->ability_match_count = 0;
2883 ap->ability_match = 0;
2884 ap->idle_match = 0;
2885 ap->ack_match = 0;
2886 }
2887 ap->cur_time++;
2888
2889 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2890 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2891
2892 if (rx_cfg_reg != ap->ability_match_cfg) {
2893 ap->ability_match_cfg = rx_cfg_reg;
2894 ap->ability_match = 0;
2895 ap->ability_match_count = 0;
2896 } else {
2897 if (++ap->ability_match_count > 1) {
2898 ap->ability_match = 1;
2899 ap->ability_match_cfg = rx_cfg_reg;
2900 }
2901 }
2902 if (rx_cfg_reg & ANEG_CFG_ACK)
2903 ap->ack_match = 1;
2904 else
2905 ap->ack_match = 0;
2906
2907 ap->idle_match = 0;
2908 } else {
2909 ap->idle_match = 1;
2910 ap->ability_match_cfg = 0;
2911 ap->ability_match_count = 0;
2912 ap->ability_match = 0;
2913 ap->ack_match = 0;
2914
2915 rx_cfg_reg = 0;
2916 }
2917
2918 ap->rxconfig = rx_cfg_reg;
2919 ret = ANEG_OK;
2920
2921 switch(ap->state) {
2922 case ANEG_STATE_UNKNOWN:
2923 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2924 ap->state = ANEG_STATE_AN_ENABLE;
2925
2926 /* fallthru */
2927 case ANEG_STATE_AN_ENABLE:
2928 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2929 if (ap->flags & MR_AN_ENABLE) {
2930 ap->link_time = 0;
2931 ap->cur_time = 0;
2932 ap->ability_match_cfg = 0;
2933 ap->ability_match_count = 0;
2934 ap->ability_match = 0;
2935 ap->idle_match = 0;
2936 ap->ack_match = 0;
2937
2938 ap->state = ANEG_STATE_RESTART_INIT;
2939 } else {
2940 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2941 }
2942 break;
2943
2944 case ANEG_STATE_RESTART_INIT:
2945 ap->link_time = ap->cur_time;
2946 ap->flags &= ~(MR_NP_LOADED);
2947 ap->txconfig = 0;
2948 tw32(MAC_TX_AUTO_NEG, 0);
2949 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2950 tw32_f(MAC_MODE, tp->mac_mode);
2951 udelay(40);
2952
2953 ret = ANEG_TIMER_ENAB;
2954 ap->state = ANEG_STATE_RESTART;
2955
2956 /* fallthru */
2957 case ANEG_STATE_RESTART:
2958 delta = ap->cur_time - ap->link_time;
2959 if (delta > ANEG_STATE_SETTLE_TIME) {
2960 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2961 } else {
2962 ret = ANEG_TIMER_ENAB;
2963 }
2964 break;
2965
2966 case ANEG_STATE_DISABLE_LINK_OK:
2967 ret = ANEG_DONE;
2968 break;
2969
2970 case ANEG_STATE_ABILITY_DETECT_INIT:
2971 ap->flags &= ~(MR_TOGGLE_TX);
Matt Carlson5be73b42007-12-20 20:09:29 -08002972 ap->txconfig = ANEG_CFG_FD;
2973 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2974 if (flowctrl & ADVERTISE_1000XPAUSE)
2975 ap->txconfig |= ANEG_CFG_PS1;
2976 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2977 ap->txconfig |= ANEG_CFG_PS2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2979 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2980 tw32_f(MAC_MODE, tp->mac_mode);
2981 udelay(40);
2982
2983 ap->state = ANEG_STATE_ABILITY_DETECT;
2984 break;
2985
2986 case ANEG_STATE_ABILITY_DETECT:
2987 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2988 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2989 }
2990 break;
2991
2992 case ANEG_STATE_ACK_DETECT_INIT:
2993 ap->txconfig |= ANEG_CFG_ACK;
2994 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2995 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2996 tw32_f(MAC_MODE, tp->mac_mode);
2997 udelay(40);
2998
2999 ap->state = ANEG_STATE_ACK_DETECT;
3000
3001 /* fallthru */
3002 case ANEG_STATE_ACK_DETECT:
3003 if (ap->ack_match != 0) {
3004 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3005 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3006 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3007 } else {
3008 ap->state = ANEG_STATE_AN_ENABLE;
3009 }
3010 } else if (ap->ability_match != 0 &&
3011 ap->rxconfig == 0) {
3012 ap->state = ANEG_STATE_AN_ENABLE;
3013 }
3014 break;
3015
3016 case ANEG_STATE_COMPLETE_ACK_INIT:
3017 if (ap->rxconfig & ANEG_CFG_INVAL) {
3018 ret = ANEG_FAILED;
3019 break;
3020 }
3021 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3022 MR_LP_ADV_HALF_DUPLEX |
3023 MR_LP_ADV_SYM_PAUSE |
3024 MR_LP_ADV_ASYM_PAUSE |
3025 MR_LP_ADV_REMOTE_FAULT1 |
3026 MR_LP_ADV_REMOTE_FAULT2 |
3027 MR_LP_ADV_NEXT_PAGE |
3028 MR_TOGGLE_RX |
3029 MR_NP_RX);
3030 if (ap->rxconfig & ANEG_CFG_FD)
3031 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3032 if (ap->rxconfig & ANEG_CFG_HD)
3033 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3034 if (ap->rxconfig & ANEG_CFG_PS1)
3035 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3036 if (ap->rxconfig & ANEG_CFG_PS2)
3037 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3038 if (ap->rxconfig & ANEG_CFG_RF1)
3039 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3040 if (ap->rxconfig & ANEG_CFG_RF2)
3041 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3042 if (ap->rxconfig & ANEG_CFG_NP)
3043 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3044
3045 ap->link_time = ap->cur_time;
3046
3047 ap->flags ^= (MR_TOGGLE_TX);
3048 if (ap->rxconfig & 0x0008)
3049 ap->flags |= MR_TOGGLE_RX;
3050 if (ap->rxconfig & ANEG_CFG_NP)
3051 ap->flags |= MR_NP_RX;
3052 ap->flags |= MR_PAGE_RX;
3053
3054 ap->state = ANEG_STATE_COMPLETE_ACK;
3055 ret = ANEG_TIMER_ENAB;
3056 break;
3057
3058 case ANEG_STATE_COMPLETE_ACK:
3059 if (ap->ability_match != 0 &&
3060 ap->rxconfig == 0) {
3061 ap->state = ANEG_STATE_AN_ENABLE;
3062 break;
3063 }
3064 delta = ap->cur_time - ap->link_time;
3065 if (delta > ANEG_STATE_SETTLE_TIME) {
3066 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3067 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3068 } else {
3069 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3070 !(ap->flags & MR_NP_RX)) {
3071 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3072 } else {
3073 ret = ANEG_FAILED;
3074 }
3075 }
3076 }
3077 break;
3078
3079 case ANEG_STATE_IDLE_DETECT_INIT:
3080 ap->link_time = ap->cur_time;
3081 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3082 tw32_f(MAC_MODE, tp->mac_mode);
3083 udelay(40);
3084
3085 ap->state = ANEG_STATE_IDLE_DETECT;
3086 ret = ANEG_TIMER_ENAB;
3087 break;
3088
3089 case ANEG_STATE_IDLE_DETECT:
3090 if (ap->ability_match != 0 &&
3091 ap->rxconfig == 0) {
3092 ap->state = ANEG_STATE_AN_ENABLE;
3093 break;
3094 }
3095 delta = ap->cur_time - ap->link_time;
3096 if (delta > ANEG_STATE_SETTLE_TIME) {
3097 /* XXX another gem from the Broadcom driver :( */
3098 ap->state = ANEG_STATE_LINK_OK;
3099 }
3100 break;
3101
3102 case ANEG_STATE_LINK_OK:
3103 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3104 ret = ANEG_DONE;
3105 break;
3106
3107 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3108 /* ??? unimplemented */
3109 break;
3110
3111 case ANEG_STATE_NEXT_PAGE_WAIT:
3112 /* ??? unimplemented */
3113 break;
3114
3115 default:
3116 ret = ANEG_FAILED;
3117 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07003118 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003119
3120 return ret;
3121}
3122
Matt Carlson5be73b42007-12-20 20:09:29 -08003123static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003124{
3125 int res = 0;
3126 struct tg3_fiber_aneginfo aninfo;
3127 int status = ANEG_FAILED;
3128 unsigned int tick;
3129 u32 tmp;
3130
3131 tw32_f(MAC_TX_AUTO_NEG, 0);
3132
3133 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3134 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3135 udelay(40);
3136
3137 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3138 udelay(40);
3139
3140 memset(&aninfo, 0, sizeof(aninfo));
3141 aninfo.flags |= MR_AN_ENABLE;
3142 aninfo.state = ANEG_STATE_UNKNOWN;
3143 aninfo.cur_time = 0;
3144 tick = 0;
3145 while (++tick < 195000) {
3146 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3147 if (status == ANEG_DONE || status == ANEG_FAILED)
3148 break;
3149
3150 udelay(1);
3151 }
3152
3153 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3154 tw32_f(MAC_MODE, tp->mac_mode);
3155 udelay(40);
3156
Matt Carlson5be73b42007-12-20 20:09:29 -08003157 *txflags = aninfo.txconfig;
3158 *rxflags = aninfo.flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159
3160 if (status == ANEG_DONE &&
3161 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3162 MR_LP_ADV_FULL_DUPLEX)))
3163 res = 1;
3164
3165 return res;
3166}
3167
3168static void tg3_init_bcm8002(struct tg3 *tp)
3169{
3170 u32 mac_status = tr32(MAC_STATUS);
3171 int i;
3172
3173 /* Reset when initting first time or we have a link. */
3174 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3175 !(mac_status & MAC_STATUS_PCS_SYNCED))
3176 return;
3177
3178 /* Set PLL lock range. */
3179 tg3_writephy(tp, 0x16, 0x8007);
3180
3181 /* SW reset */
3182 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3183
3184 /* Wait for reset to complete. */
3185 /* XXX schedule_timeout() ... */
3186 for (i = 0; i < 500; i++)
3187 udelay(10);
3188
3189 /* Config mode; select PMA/Ch 1 regs. */
3190 tg3_writephy(tp, 0x10, 0x8411);
3191
3192 /* Enable auto-lock and comdet, select txclk for tx. */
3193 tg3_writephy(tp, 0x11, 0x0a10);
3194
3195 tg3_writephy(tp, 0x18, 0x00a0);
3196 tg3_writephy(tp, 0x16, 0x41ff);
3197
3198 /* Assert and deassert POR. */
3199 tg3_writephy(tp, 0x13, 0x0400);
3200 udelay(40);
3201 tg3_writephy(tp, 0x13, 0x0000);
3202
3203 tg3_writephy(tp, 0x11, 0x0a50);
3204 udelay(40);
3205 tg3_writephy(tp, 0x11, 0x0a10);
3206
3207 /* Wait for signal to stabilize */
3208 /* XXX schedule_timeout() ... */
3209 for (i = 0; i < 15000; i++)
3210 udelay(10);
3211
3212 /* Deselect the channel register so we can read the PHYID
3213 * later.
3214 */
3215 tg3_writephy(tp, 0x10, 0x8011);
3216}
3217
3218static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3219{
Matt Carlson82cd3d12007-12-20 20:09:00 -08003220 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221 u32 sg_dig_ctrl, sg_dig_status;
3222 u32 serdes_cfg, expected_sg_dig_ctrl;
3223 int workaround, port_a;
3224 int current_link_up;
3225
3226 serdes_cfg = 0;
3227 expected_sg_dig_ctrl = 0;
3228 workaround = 0;
3229 port_a = 1;
3230 current_link_up = 0;
3231
3232 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3233 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3234 workaround = 1;
3235 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3236 port_a = 0;
3237
3238 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3239 /* preserve bits 20-23 for voltage regulator */
3240 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3241 }
3242
3243 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3244
3245 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003246 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247 if (workaround) {
3248 u32 val = serdes_cfg;
3249
3250 if (port_a)
3251 val |= 0xc010000;
3252 else
3253 val |= 0x4010000;
3254 tw32_f(MAC_SERDES_CFG, val);
3255 }
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003256
3257 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003258 }
3259 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3260 tg3_setup_flow_control(tp, 0, 0);
3261 current_link_up = 1;
3262 }
3263 goto out;
3264 }
3265
3266 /* Want auto-negotiation. */
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003267 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003268
Matt Carlson82cd3d12007-12-20 20:09:00 -08003269 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3270 if (flowctrl & ADVERTISE_1000XPAUSE)
3271 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3272 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3273 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274
3275 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003276 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3277 tp->serdes_counter &&
3278 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3279 MAC_STATUS_RCVD_CFG)) ==
3280 MAC_STATUS_PCS_SYNCED)) {
3281 tp->serdes_counter--;
3282 current_link_up = 1;
3283 goto out;
3284 }
3285restart_autoneg:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286 if (workaround)
3287 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003288 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289 udelay(5);
3290 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3291
Michael Chan3d3ebe72006-09-27 15:59:15 -07003292 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3293 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003294 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3295 MAC_STATUS_SIGNAL_DET)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003296 sg_dig_status = tr32(SG_DIG_STATUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003297 mac_status = tr32(MAC_STATUS);
3298
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003299 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003300 (mac_status & MAC_STATUS_PCS_SYNCED)) {
Matt Carlson82cd3d12007-12-20 20:09:00 -08003301 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302
Matt Carlson82cd3d12007-12-20 20:09:00 -08003303 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3304 local_adv |= ADVERTISE_1000XPAUSE;
3305 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3306 local_adv |= ADVERTISE_1000XPSE_ASYM;
3307
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003308 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08003309 remote_adv |= LPA_1000XPAUSE;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003310 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08003311 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003312
3313 tg3_setup_flow_control(tp, local_adv, remote_adv);
3314 current_link_up = 1;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003315 tp->serdes_counter = 0;
3316 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003317 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07003318 if (tp->serdes_counter)
3319 tp->serdes_counter--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320 else {
3321 if (workaround) {
3322 u32 val = serdes_cfg;
3323
3324 if (port_a)
3325 val |= 0xc010000;
3326 else
3327 val |= 0x4010000;
3328
3329 tw32_f(MAC_SERDES_CFG, val);
3330 }
3331
Matt Carlsonc98f6e32007-12-20 20:08:32 -08003332 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003333 udelay(40);
3334
3335 /* Link parallel detection - link is up */
3336 /* only if we have PCS_SYNC and not */
3337 /* receiving config code words */
3338 mac_status = tr32(MAC_STATUS);
3339 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3340 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3341 tg3_setup_flow_control(tp, 0, 0);
3342 current_link_up = 1;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003343 tp->tg3_flags2 |=
3344 TG3_FLG2_PARALLEL_DETECT;
3345 tp->serdes_counter =
3346 SERDES_PARALLEL_DET_TIMEOUT;
3347 } else
3348 goto restart_autoneg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349 }
3350 }
Michael Chan3d3ebe72006-09-27 15:59:15 -07003351 } else {
3352 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3353 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354 }
3355
3356out:
3357 return current_link_up;
3358}
3359
3360static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3361{
3362 int current_link_up = 0;
3363
Michael Chan5cf64b8a2007-05-05 12:11:21 -07003364 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366
3367 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
Matt Carlson5be73b42007-12-20 20:09:29 -08003368 u32 txflags, rxflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003370
Matt Carlson5be73b42007-12-20 20:09:29 -08003371 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3372 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373
Matt Carlson5be73b42007-12-20 20:09:29 -08003374 if (txflags & ANEG_CFG_PS1)
3375 local_adv |= ADVERTISE_1000XPAUSE;
3376 if (txflags & ANEG_CFG_PS2)
3377 local_adv |= ADVERTISE_1000XPSE_ASYM;
3378
3379 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3380 remote_adv |= LPA_1000XPAUSE;
3381 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3382 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003383
3384 tg3_setup_flow_control(tp, local_adv, remote_adv);
3385
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386 current_link_up = 1;
3387 }
3388 for (i = 0; i < 30; i++) {
3389 udelay(20);
3390 tw32_f(MAC_STATUS,
3391 (MAC_STATUS_SYNC_CHANGED |
3392 MAC_STATUS_CFG_CHANGED));
3393 udelay(40);
3394 if ((tr32(MAC_STATUS) &
3395 (MAC_STATUS_SYNC_CHANGED |
3396 MAC_STATUS_CFG_CHANGED)) == 0)
3397 break;
3398 }
3399
3400 mac_status = tr32(MAC_STATUS);
3401 if (current_link_up == 0 &&
3402 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3403 !(mac_status & MAC_STATUS_RCVD_CFG))
3404 current_link_up = 1;
3405 } else {
Matt Carlson5be73b42007-12-20 20:09:29 -08003406 tg3_setup_flow_control(tp, 0, 0);
3407
Linus Torvalds1da177e2005-04-16 15:20:36 -07003408 /* Forcing 1000FD link up. */
3409 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410
3411 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3412 udelay(40);
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07003413
3414 tw32_f(MAC_MODE, tp->mac_mode);
3415 udelay(40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003416 }
3417
3418out:
3419 return current_link_up;
3420}
3421
3422static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3423{
3424 u32 orig_pause_cfg;
3425 u16 orig_active_speed;
3426 u8 orig_active_duplex;
3427 u32 mac_status;
3428 int current_link_up;
3429 int i;
3430
Matt Carlson8d018622007-12-20 20:05:44 -08003431 orig_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003432 orig_active_speed = tp->link_config.active_speed;
3433 orig_active_duplex = tp->link_config.active_duplex;
3434
3435 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3436 netif_carrier_ok(tp->dev) &&
3437 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3438 mac_status = tr32(MAC_STATUS);
3439 mac_status &= (MAC_STATUS_PCS_SYNCED |
3440 MAC_STATUS_SIGNAL_DET |
3441 MAC_STATUS_CFG_CHANGED |
3442 MAC_STATUS_RCVD_CFG);
3443 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3444 MAC_STATUS_SIGNAL_DET)) {
3445 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3446 MAC_STATUS_CFG_CHANGED));
3447 return 0;
3448 }
3449 }
3450
3451 tw32_f(MAC_TX_AUTO_NEG, 0);
3452
3453 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3454 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3455 tw32_f(MAC_MODE, tp->mac_mode);
3456 udelay(40);
3457
3458 if (tp->phy_id == PHY_ID_BCM8002)
3459 tg3_init_bcm8002(tp);
3460
3461 /* Enable link change event even when serdes polling. */
3462 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3463 udelay(40);
3464
3465 current_link_up = 0;
3466 mac_status = tr32(MAC_STATUS);
3467
3468 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3469 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3470 else
3471 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3472
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473 tp->hw_status->status =
3474 (SD_STATUS_UPDATED |
3475 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3476
3477 for (i = 0; i < 100; i++) {
3478 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3479 MAC_STATUS_CFG_CHANGED));
3480 udelay(5);
3481 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
Michael Chan3d3ebe72006-09-27 15:59:15 -07003482 MAC_STATUS_CFG_CHANGED |
3483 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484 break;
3485 }
3486
3487 mac_status = tr32(MAC_STATUS);
3488 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3489 current_link_up = 0;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003490 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3491 tp->serdes_counter == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492 tw32_f(MAC_MODE, (tp->mac_mode |
3493 MAC_MODE_SEND_CONFIGS));
3494 udelay(1);
3495 tw32_f(MAC_MODE, tp->mac_mode);
3496 }
3497 }
3498
3499 if (current_link_up == 1) {
3500 tp->link_config.active_speed = SPEED_1000;
3501 tp->link_config.active_duplex = DUPLEX_FULL;
3502 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3503 LED_CTRL_LNKLED_OVERRIDE |
3504 LED_CTRL_1000MBPS_ON));
3505 } else {
3506 tp->link_config.active_speed = SPEED_INVALID;
3507 tp->link_config.active_duplex = DUPLEX_INVALID;
3508 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3509 LED_CTRL_LNKLED_OVERRIDE |
3510 LED_CTRL_TRAFFIC_OVERRIDE));
3511 }
3512
3513 if (current_link_up != netif_carrier_ok(tp->dev)) {
3514 if (current_link_up)
3515 netif_carrier_on(tp->dev);
3516 else
3517 netif_carrier_off(tp->dev);
3518 tg3_link_report(tp);
3519 } else {
Matt Carlson8d018622007-12-20 20:05:44 -08003520 u32 now_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521 if (orig_pause_cfg != now_pause_cfg ||
3522 orig_active_speed != tp->link_config.active_speed ||
3523 orig_active_duplex != tp->link_config.active_duplex)
3524 tg3_link_report(tp);
3525 }
3526
3527 return 0;
3528}
3529
Michael Chan747e8f82005-07-25 12:33:22 -07003530static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3531{
3532 int current_link_up, err = 0;
3533 u32 bmsr, bmcr;
3534 u16 current_speed;
3535 u8 current_duplex;
Matt Carlsonef167e22007-12-20 20:10:01 -08003536 u32 local_adv, remote_adv;
Michael Chan747e8f82005-07-25 12:33:22 -07003537
3538 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3539 tw32_f(MAC_MODE, tp->mac_mode);
3540 udelay(40);
3541
3542 tw32(MAC_EVENT, 0);
3543
3544 tw32_f(MAC_STATUS,
3545 (MAC_STATUS_SYNC_CHANGED |
3546 MAC_STATUS_CFG_CHANGED |
3547 MAC_STATUS_MI_COMPLETION |
3548 MAC_STATUS_LNKSTATE_CHANGED));
3549 udelay(40);
3550
3551 if (force_reset)
3552 tg3_phy_reset(tp);
3553
3554 current_link_up = 0;
3555 current_speed = SPEED_INVALID;
3556 current_duplex = DUPLEX_INVALID;
3557
3558 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3559 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08003560 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3561 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3562 bmsr |= BMSR_LSTATUS;
3563 else
3564 bmsr &= ~BMSR_LSTATUS;
3565 }
Michael Chan747e8f82005-07-25 12:33:22 -07003566
3567 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3568
3569 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
Matt Carlson2bd3ed02008-06-09 15:39:55 -07003570 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
Michael Chan747e8f82005-07-25 12:33:22 -07003571 /* do nothing, just check for link up at the end */
3572 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3573 u32 adv, new_adv;
3574
3575 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3576 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3577 ADVERTISE_1000XPAUSE |
3578 ADVERTISE_1000XPSE_ASYM |
3579 ADVERTISE_SLCT);
3580
Matt Carlsonba4d07a2007-12-20 20:08:00 -08003581 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
Michael Chan747e8f82005-07-25 12:33:22 -07003582
3583 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3584 new_adv |= ADVERTISE_1000XHALF;
3585 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3586 new_adv |= ADVERTISE_1000XFULL;
3587
3588 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3589 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3590 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3591 tg3_writephy(tp, MII_BMCR, bmcr);
3592
3593 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
Michael Chan3d3ebe72006-09-27 15:59:15 -07003594 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
Michael Chan747e8f82005-07-25 12:33:22 -07003595 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3596
3597 return err;
3598 }
3599 } else {
3600 u32 new_bmcr;
3601
3602 bmcr &= ~BMCR_SPEED1000;
3603 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3604
3605 if (tp->link_config.duplex == DUPLEX_FULL)
3606 new_bmcr |= BMCR_FULLDPLX;
3607
3608 if (new_bmcr != bmcr) {
3609 /* BMCR_SPEED1000 is a reserved bit that needs
3610 * to be set on write.
3611 */
3612 new_bmcr |= BMCR_SPEED1000;
3613
3614 /* Force a linkdown */
3615 if (netif_carrier_ok(tp->dev)) {
3616 u32 adv;
3617
3618 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3619 adv &= ~(ADVERTISE_1000XFULL |
3620 ADVERTISE_1000XHALF |
3621 ADVERTISE_SLCT);
3622 tg3_writephy(tp, MII_ADVERTISE, adv);
3623 tg3_writephy(tp, MII_BMCR, bmcr |
3624 BMCR_ANRESTART |
3625 BMCR_ANENABLE);
3626 udelay(10);
3627 netif_carrier_off(tp->dev);
3628 }
3629 tg3_writephy(tp, MII_BMCR, new_bmcr);
3630 bmcr = new_bmcr;
3631 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3632 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08003633 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3634 ASIC_REV_5714) {
3635 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3636 bmsr |= BMSR_LSTATUS;
3637 else
3638 bmsr &= ~BMSR_LSTATUS;
3639 }
Michael Chan747e8f82005-07-25 12:33:22 -07003640 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3641 }
3642 }
3643
3644 if (bmsr & BMSR_LSTATUS) {
3645 current_speed = SPEED_1000;
3646 current_link_up = 1;
3647 if (bmcr & BMCR_FULLDPLX)
3648 current_duplex = DUPLEX_FULL;
3649 else
3650 current_duplex = DUPLEX_HALF;
3651
Matt Carlsonef167e22007-12-20 20:10:01 -08003652 local_adv = 0;
3653 remote_adv = 0;
3654
Michael Chan747e8f82005-07-25 12:33:22 -07003655 if (bmcr & BMCR_ANENABLE) {
Matt Carlsonef167e22007-12-20 20:10:01 -08003656 u32 common;
Michael Chan747e8f82005-07-25 12:33:22 -07003657
3658 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3659 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3660 common = local_adv & remote_adv;
3661 if (common & (ADVERTISE_1000XHALF |
3662 ADVERTISE_1000XFULL)) {
3663 if (common & ADVERTISE_1000XFULL)
3664 current_duplex = DUPLEX_FULL;
3665 else
3666 current_duplex = DUPLEX_HALF;
Michael Chan747e8f82005-07-25 12:33:22 -07003667 }
3668 else
3669 current_link_up = 0;
3670 }
3671 }
3672
Matt Carlsonef167e22007-12-20 20:10:01 -08003673 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3674 tg3_setup_flow_control(tp, local_adv, remote_adv);
3675
Michael Chan747e8f82005-07-25 12:33:22 -07003676 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3677 if (tp->link_config.active_duplex == DUPLEX_HALF)
3678 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3679
3680 tw32_f(MAC_MODE, tp->mac_mode);
3681 udelay(40);
3682
3683 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3684
3685 tp->link_config.active_speed = current_speed;
3686 tp->link_config.active_duplex = current_duplex;
3687
3688 if (current_link_up != netif_carrier_ok(tp->dev)) {
3689 if (current_link_up)
3690 netif_carrier_on(tp->dev);
3691 else {
3692 netif_carrier_off(tp->dev);
3693 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3694 }
3695 tg3_link_report(tp);
3696 }
3697 return err;
3698}
3699
3700static void tg3_serdes_parallel_detect(struct tg3 *tp)
3701{
Michael Chan3d3ebe72006-09-27 15:59:15 -07003702 if (tp->serdes_counter) {
Michael Chan747e8f82005-07-25 12:33:22 -07003703 /* Give autoneg time to complete. */
Michael Chan3d3ebe72006-09-27 15:59:15 -07003704 tp->serdes_counter--;
Michael Chan747e8f82005-07-25 12:33:22 -07003705 return;
3706 }
3707 if (!netif_carrier_ok(tp->dev) &&
3708 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3709 u32 bmcr;
3710
3711 tg3_readphy(tp, MII_BMCR, &bmcr);
3712 if (bmcr & BMCR_ANENABLE) {
3713 u32 phy1, phy2;
3714
3715 /* Select shadow register 0x1f */
3716 tg3_writephy(tp, 0x1c, 0x7c00);
3717 tg3_readphy(tp, 0x1c, &phy1);
3718
3719 /* Select expansion interrupt status register */
3720 tg3_writephy(tp, 0x17, 0x0f01);
3721 tg3_readphy(tp, 0x15, &phy2);
3722 tg3_readphy(tp, 0x15, &phy2);
3723
3724 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3725 /* We have signal detect and not receiving
3726 * config code words, link is up by parallel
3727 * detection.
3728 */
3729
3730 bmcr &= ~BMCR_ANENABLE;
3731 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3732 tg3_writephy(tp, MII_BMCR, bmcr);
3733 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3734 }
3735 }
3736 }
3737 else if (netif_carrier_ok(tp->dev) &&
3738 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3739 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3740 u32 phy2;
3741
3742 /* Select expansion interrupt status register */
3743 tg3_writephy(tp, 0x17, 0x0f01);
3744 tg3_readphy(tp, 0x15, &phy2);
3745 if (phy2 & 0x20) {
3746 u32 bmcr;
3747
3748 /* Config code words received, turn on autoneg. */
3749 tg3_readphy(tp, MII_BMCR, &bmcr);
3750 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3751
3752 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3753
3754 }
3755 }
3756}
3757
Linus Torvalds1da177e2005-04-16 15:20:36 -07003758static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3759{
3760 int err;
3761
3762 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3763 err = tg3_setup_fiber_phy(tp, force_reset);
Michael Chan747e8f82005-07-25 12:33:22 -07003764 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3765 err = tg3_setup_fiber_mii_phy(tp, force_reset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003766 } else {
3767 err = tg3_setup_copper_phy(tp, force_reset);
3768 }
3769
Matt Carlsonb5af7122007-11-12 21:22:02 -08003770 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3771 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
Matt Carlsonaa6c91f2007-11-12 21:18:04 -08003772 u32 val, scale;
3773
3774 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3775 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3776 scale = 65;
3777 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3778 scale = 6;
3779 else
3780 scale = 12;
3781
3782 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3783 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3784 tw32(GRC_MISC_CFG, val);
3785 }
3786
Linus Torvalds1da177e2005-04-16 15:20:36 -07003787 if (tp->link_config.active_speed == SPEED_1000 &&
3788 tp->link_config.active_duplex == DUPLEX_HALF)
3789 tw32(MAC_TX_LENGTHS,
3790 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3791 (6 << TX_LENGTHS_IPG_SHIFT) |
3792 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3793 else
3794 tw32(MAC_TX_LENGTHS,
3795 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3796 (6 << TX_LENGTHS_IPG_SHIFT) |
3797 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3798
3799 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3800 if (netif_carrier_ok(tp->dev)) {
3801 tw32(HOSTCC_STAT_COAL_TICKS,
David S. Miller15f98502005-05-18 22:49:26 -07003802 tp->coal.stats_block_coalesce_usecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003803 } else {
3804 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3805 }
3806 }
3807
Matt Carlson8ed5d972007-05-07 00:25:49 -07003808 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3809 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3810 if (!netif_carrier_ok(tp->dev))
3811 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3812 tp->pwrmgmt_thresh;
3813 else
3814 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3815 tw32(PCIE_PWR_MGMT_THRESH, val);
3816 }
3817
Linus Torvalds1da177e2005-04-16 15:20:36 -07003818 return err;
3819}
3820
Michael Chandf3e6542006-05-26 17:48:07 -07003821/* This is called whenever we suspect that the system chipset is re-
3822 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3823 * is bogus tx completions. We try to recover by setting the
3824 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3825 * in the workqueue.
3826 */
3827static void tg3_tx_recover(struct tg3 *tp)
3828{
3829 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3830 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3831
3832 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3833 "mapped I/O cycles to the network device, attempting to "
3834 "recover. Please report the problem to the driver maintainer "
3835 "and include system chipset information.\n", tp->dev->name);
3836
3837 spin_lock(&tp->lock);
Michael Chandf3e6542006-05-26 17:48:07 -07003838 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
Michael Chandf3e6542006-05-26 17:48:07 -07003839 spin_unlock(&tp->lock);
3840}
3841
Michael Chan1b2a7202006-08-07 21:46:02 -07003842static inline u32 tg3_tx_avail(struct tg3 *tp)
3843{
3844 smp_mb();
3845 return (tp->tx_pending -
3846 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3847}
3848
Linus Torvalds1da177e2005-04-16 15:20:36 -07003849/* Tigon3 never reports partial packet sends. So we do not
3850 * need special logic to handle SKBs that have not had all
3851 * of their frags sent yet, like SunGEM does.
3852 */
3853static void tg3_tx(struct tg3 *tp)
3854{
3855 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3856 u32 sw_idx = tp->tx_cons;
3857
3858 while (sw_idx != hw_idx) {
3859 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3860 struct sk_buff *skb = ri->skb;
Michael Chandf3e6542006-05-26 17:48:07 -07003861 int i, tx_bug = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003862
Michael Chandf3e6542006-05-26 17:48:07 -07003863 if (unlikely(skb == NULL)) {
3864 tg3_tx_recover(tp);
3865 return;
3866 }
3867
David S. Miller90079ce2008-09-11 04:52:51 -07003868 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003869
3870 ri->skb = NULL;
3871
3872 sw_idx = NEXT_TX(sw_idx);
3873
3874 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003875 ri = &tp->tx_buffers[sw_idx];
Michael Chandf3e6542006-05-26 17:48:07 -07003876 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3877 tx_bug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003878 sw_idx = NEXT_TX(sw_idx);
3879 }
3880
David S. Millerf47c11e2005-06-24 20:18:35 -07003881 dev_kfree_skb(skb);
Michael Chandf3e6542006-05-26 17:48:07 -07003882
3883 if (unlikely(tx_bug)) {
3884 tg3_tx_recover(tp);
3885 return;
3886 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003887 }
3888
3889 tp->tx_cons = sw_idx;
3890
Michael Chan1b2a7202006-08-07 21:46:02 -07003891 /* Need to make the tx_cons update visible to tg3_start_xmit()
3892 * before checking for netif_queue_stopped(). Without the
3893 * memory barrier, there is a small possibility that tg3_start_xmit()
3894 * will miss it and cause the queue to be stopped forever.
3895 */
3896 smp_mb();
3897
3898 if (unlikely(netif_queue_stopped(tp->dev) &&
Ranjit Manomohan42952232006-10-18 20:54:26 -07003899 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
Michael Chan1b2a7202006-08-07 21:46:02 -07003900 netif_tx_lock(tp->dev);
Michael Chan51b91462005-09-01 17:41:28 -07003901 if (netif_queue_stopped(tp->dev) &&
Ranjit Manomohan42952232006-10-18 20:54:26 -07003902 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
Michael Chan51b91462005-09-01 17:41:28 -07003903 netif_wake_queue(tp->dev);
Michael Chan1b2a7202006-08-07 21:46:02 -07003904 netif_tx_unlock(tp->dev);
Michael Chan51b91462005-09-01 17:41:28 -07003905 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003906}
3907
3908/* Returns size of skb allocated or < 0 on error.
3909 *
3910 * We only need to fill in the address because the other members
3911 * of the RX descriptor are invariant, see tg3_init_rings.
3912 *
3913 * Note the purposeful assymetry of cpu vs. chip accesses. For
3914 * posting buffers we only dirty the first cache line of the RX
3915 * descriptor (containing the address). Whereas for the RX status
3916 * buffers the cpu only reads the last cacheline of the RX descriptor
3917 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3918 */
3919static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3920 int src_idx, u32 dest_idx_unmasked)
3921{
3922 struct tg3_rx_buffer_desc *desc;
3923 struct ring_info *map, *src_map;
3924 struct sk_buff *skb;
3925 dma_addr_t mapping;
3926 int skb_size, dest_idx;
3927
3928 src_map = NULL;
3929 switch (opaque_key) {
3930 case RXD_OPAQUE_RING_STD:
3931 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3932 desc = &tp->rx_std[dest_idx];
3933 map = &tp->rx_std_buffers[dest_idx];
3934 if (src_idx >= 0)
3935 src_map = &tp->rx_std_buffers[src_idx];
Michael Chan7e72aad2005-07-25 12:31:17 -07003936 skb_size = tp->rx_pkt_buf_sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003937 break;
3938
3939 case RXD_OPAQUE_RING_JUMBO:
3940 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3941 desc = &tp->rx_jumbo[dest_idx];
3942 map = &tp->rx_jumbo_buffers[dest_idx];
3943 if (src_idx >= 0)
3944 src_map = &tp->rx_jumbo_buffers[src_idx];
3945 skb_size = RX_JUMBO_PKT_BUF_SZ;
3946 break;
3947
3948 default:
3949 return -EINVAL;
Stephen Hemminger855e1112008-04-16 16:37:28 -07003950 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003951
3952 /* Do not overwrite any of the map or rp information
3953 * until we are sure we can commit to a new buffer.
3954 *
3955 * Callers depend upon this behavior and assume that
3956 * we leave everything unchanged if we fail.
3957 */
David S. Millera20e9c62006-07-31 22:38:16 -07003958 skb = netdev_alloc_skb(tp->dev, skb_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003959 if (skb == NULL)
3960 return -ENOMEM;
3961
Linus Torvalds1da177e2005-04-16 15:20:36 -07003962 skb_reserve(skb, tp->rx_offset);
3963
3964 mapping = pci_map_single(tp->pdev, skb->data,
3965 skb_size - tp->rx_offset,
3966 PCI_DMA_FROMDEVICE);
3967
3968 map->skb = skb;
3969 pci_unmap_addr_set(map, mapping, mapping);
3970
3971 if (src_map != NULL)
3972 src_map->skb = NULL;
3973
3974 desc->addr_hi = ((u64)mapping >> 32);
3975 desc->addr_lo = ((u64)mapping & 0xffffffff);
3976
3977 return skb_size;
3978}
3979
3980/* We only need to move over in the address because the other
3981 * members of the RX descriptor are invariant. See notes above
3982 * tg3_alloc_rx_skb for full details.
3983 */
3984static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3985 int src_idx, u32 dest_idx_unmasked)
3986{
3987 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3988 struct ring_info *src_map, *dest_map;
3989 int dest_idx;
3990
3991 switch (opaque_key) {
3992 case RXD_OPAQUE_RING_STD:
3993 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3994 dest_desc = &tp->rx_std[dest_idx];
3995 dest_map = &tp->rx_std_buffers[dest_idx];
3996 src_desc = &tp->rx_std[src_idx];
3997 src_map = &tp->rx_std_buffers[src_idx];
3998 break;
3999
4000 case RXD_OPAQUE_RING_JUMBO:
4001 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4002 dest_desc = &tp->rx_jumbo[dest_idx];
4003 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4004 src_desc = &tp->rx_jumbo[src_idx];
4005 src_map = &tp->rx_jumbo_buffers[src_idx];
4006 break;
4007
4008 default:
4009 return;
Stephen Hemminger855e1112008-04-16 16:37:28 -07004010 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004011
4012 dest_map->skb = src_map->skb;
4013 pci_unmap_addr_set(dest_map, mapping,
4014 pci_unmap_addr(src_map, mapping));
4015 dest_desc->addr_hi = src_desc->addr_hi;
4016 dest_desc->addr_lo = src_desc->addr_lo;
4017
4018 src_map->skb = NULL;
4019}
4020
4021#if TG3_VLAN_TAG_USED
4022static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4023{
4024 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4025}
4026#endif
4027
4028/* The RX ring scheme is composed of multiple rings which post fresh
4029 * buffers to the chip, and one special ring the chip uses to report
4030 * status back to the host.
4031 *
4032 * The special ring reports the status of received packets to the
4033 * host. The chip does not write into the original descriptor the
4034 * RX buffer was obtained from. The chip simply takes the original
4035 * descriptor as provided by the host, updates the status and length
4036 * field, then writes this into the next status ring entry.
4037 *
4038 * Each ring the host uses to post buffers to the chip is described
4039 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4040 * it is first placed into the on-chip ram. When the packet's length
4041 * is known, it walks down the TG3_BDINFO entries to select the ring.
4042 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4043 * which is within the range of the new packet's length is chosen.
4044 *
4045 * The "separate ring for rx status" scheme may sound queer, but it makes
4046 * sense from a cache coherency perspective. If only the host writes
4047 * to the buffer post rings, and only the chip writes to the rx status
4048 * rings, then cache lines never move beyond shared-modified state.
4049 * If both the host and chip were to write into the same ring, cache line
4050 * eviction could occur since both entities want it in an exclusive state.
4051 */
4052static int tg3_rx(struct tg3 *tp, int budget)
4053{
Michael Chanf92905d2006-06-29 20:14:29 -07004054 u32 work_mask, rx_std_posted = 0;
Michael Chan483ba502005-04-25 15:14:03 -07004055 u32 sw_idx = tp->rx_rcb_ptr;
4056 u16 hw_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004057 int received;
4058
4059 hw_idx = tp->hw_status->idx[0].rx_producer;
4060 /*
4061 * We need to order the read of hw_idx and the read of
4062 * the opaque cookie.
4063 */
4064 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004065 work_mask = 0;
4066 received = 0;
4067 while (sw_idx != hw_idx && budget > 0) {
4068 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4069 unsigned int len;
4070 struct sk_buff *skb;
4071 dma_addr_t dma_addr;
4072 u32 opaque_key, desc_idx, *post_ptr;
4073
4074 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4075 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4076 if (opaque_key == RXD_OPAQUE_RING_STD) {
4077 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4078 mapping);
4079 skb = tp->rx_std_buffers[desc_idx].skb;
4080 post_ptr = &tp->rx_std_ptr;
Michael Chanf92905d2006-06-29 20:14:29 -07004081 rx_std_posted++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004082 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4083 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4084 mapping);
4085 skb = tp->rx_jumbo_buffers[desc_idx].skb;
4086 post_ptr = &tp->rx_jumbo_ptr;
4087 }
4088 else {
4089 goto next_pkt_nopost;
4090 }
4091
4092 work_mask |= opaque_key;
4093
4094 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4095 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4096 drop_it:
4097 tg3_recycle_rx(tp, opaque_key,
4098 desc_idx, *post_ptr);
4099 drop_it_no_recycle:
4100 /* Other statistics kept track of by card. */
4101 tp->net_stats.rx_dropped++;
4102 goto next_pkt;
4103 }
4104
4105 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
4106
Jeff Garzik6aa20a22006-09-13 13:24:59 -04004107 if (len > RX_COPY_THRESHOLD
Linus Torvalds1da177e2005-04-16 15:20:36 -07004108 && tp->rx_offset == 2
4109 /* rx_offset != 2 iff this is a 5701 card running
4110 * in PCI-X mode [see tg3_get_invariants()] */
4111 ) {
4112 int skb_size;
4113
4114 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4115 desc_idx, *post_ptr);
4116 if (skb_size < 0)
4117 goto drop_it;
4118
4119 pci_unmap_single(tp->pdev, dma_addr,
4120 skb_size - tp->rx_offset,
4121 PCI_DMA_FROMDEVICE);
4122
4123 skb_put(skb, len);
4124 } else {
4125 struct sk_buff *copy_skb;
4126
4127 tg3_recycle_rx(tp, opaque_key,
4128 desc_idx, *post_ptr);
4129
David S. Millera20e9c62006-07-31 22:38:16 -07004130 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004131 if (copy_skb == NULL)
4132 goto drop_it_no_recycle;
4133
Linus Torvalds1da177e2005-04-16 15:20:36 -07004134 skb_reserve(copy_skb, 2);
4135 skb_put(copy_skb, len);
4136 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03004137 skb_copy_from_linear_data(skb, copy_skb->data, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004138 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4139
4140 /* We'll reuse the original ring buffer. */
4141 skb = copy_skb;
4142 }
4143
4144 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4145 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4146 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4147 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4148 skb->ip_summed = CHECKSUM_UNNECESSARY;
4149 else
4150 skb->ip_summed = CHECKSUM_NONE;
4151
4152 skb->protocol = eth_type_trans(skb, tp->dev);
4153#if TG3_VLAN_TAG_USED
4154 if (tp->vlgrp != NULL &&
4155 desc->type_flags & RXD_FLAG_VLAN) {
4156 tg3_vlan_rx(tp, skb,
4157 desc->err_vlan & RXD_VLAN_MASK);
4158 } else
4159#endif
4160 netif_receive_skb(skb);
4161
4162 tp->dev->last_rx = jiffies;
4163 received++;
4164 budget--;
4165
4166next_pkt:
4167 (*post_ptr)++;
Michael Chanf92905d2006-06-29 20:14:29 -07004168
4169 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4170 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4171
4172 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4173 TG3_64BIT_REG_LOW, idx);
4174 work_mask &= ~RXD_OPAQUE_RING_STD;
4175 rx_std_posted = 0;
4176 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004177next_pkt_nopost:
Michael Chan483ba502005-04-25 15:14:03 -07004178 sw_idx++;
Eric Dumazet6b31a512007-02-06 13:29:21 -08004179 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
Michael Chan52f6d692005-04-25 15:14:32 -07004180
4181 /* Refresh hw_idx to see if there is new work */
4182 if (sw_idx == hw_idx) {
4183 hw_idx = tp->hw_status->idx[0].rx_producer;
4184 rmb();
4185 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004186 }
4187
4188 /* ACK the status ring. */
Michael Chan483ba502005-04-25 15:14:03 -07004189 tp->rx_rcb_ptr = sw_idx;
4190 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004191
4192 /* Refill RX ring(s). */
4193 if (work_mask & RXD_OPAQUE_RING_STD) {
4194 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4195 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4196 sw_idx);
4197 }
4198 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4199 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4200 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4201 sw_idx);
4202 }
4203 mmiowb();
4204
4205 return received;
4206}
4207
David S. Miller6f535762007-10-11 18:08:29 -07004208static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004209{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004210 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004211
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212 /* handle link change and other phy events */
4213 if (!(tp->tg3_flags &
4214 (TG3_FLAG_USE_LINKCHG_REG |
4215 TG3_FLAG_POLL_SERDES))) {
4216 if (sblk->status & SD_STATUS_LINK_CHG) {
4217 sblk->status = SD_STATUS_UPDATED |
4218 (sblk->status & ~SD_STATUS_LINK_CHG);
David S. Millerf47c11e2005-06-24 20:18:35 -07004219 spin_lock(&tp->lock);
Matt Carlsondd477002008-05-25 23:45:58 -07004220 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4221 tw32_f(MAC_STATUS,
4222 (MAC_STATUS_SYNC_CHANGED |
4223 MAC_STATUS_CFG_CHANGED |
4224 MAC_STATUS_MI_COMPLETION |
4225 MAC_STATUS_LNKSTATE_CHANGED));
4226 udelay(40);
4227 } else
4228 tg3_setup_phy(tp, 0);
David S. Millerf47c11e2005-06-24 20:18:35 -07004229 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004230 }
4231 }
4232
4233 /* run TX completion thread */
4234 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004235 tg3_tx(tp);
David S. Miller6f535762007-10-11 18:08:29 -07004236 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
Michael Chan4fd7ab52007-10-12 01:39:50 -07004237 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004238 }
4239
Linus Torvalds1da177e2005-04-16 15:20:36 -07004240 /* run RX thread, within the bounds set by NAPI.
4241 * All RX "locking" is done by ensuring outside
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004242 * code synchronizes with tg3->napi.poll()
Linus Torvalds1da177e2005-04-16 15:20:36 -07004243 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004244 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
David S. Miller6f535762007-10-11 18:08:29 -07004245 work_done += tg3_rx(tp, budget - work_done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004246
David S. Miller6f535762007-10-11 18:08:29 -07004247 return work_done;
4248}
David S. Millerf7383c22005-05-18 22:50:53 -07004249
David S. Miller6f535762007-10-11 18:08:29 -07004250static int tg3_poll(struct napi_struct *napi, int budget)
4251{
4252 struct tg3 *tp = container_of(napi, struct tg3, napi);
4253 int work_done = 0;
Michael Chan4fd7ab52007-10-12 01:39:50 -07004254 struct tg3_hw_status *sblk = tp->hw_status;
David S. Miller6f535762007-10-11 18:08:29 -07004255
4256 while (1) {
4257 work_done = tg3_poll_work(tp, work_done, budget);
4258
4259 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4260 goto tx_recovery;
4261
4262 if (unlikely(work_done >= budget))
4263 break;
4264
Michael Chan4fd7ab52007-10-12 01:39:50 -07004265 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4266 /* tp->last_tag is used in tg3_restart_ints() below
4267 * to tell the hw how much work has been processed,
4268 * so we must read it before checking for more work.
4269 */
4270 tp->last_tag = sblk->status_tag;
4271 rmb();
4272 } else
4273 sblk->status &= ~SD_STATUS_UPDATED;
4274
David S. Miller6f535762007-10-11 18:08:29 -07004275 if (likely(!tg3_has_work(tp))) {
David S. Miller6f535762007-10-11 18:08:29 -07004276 netif_rx_complete(tp->dev, napi);
4277 tg3_restart_ints(tp);
4278 break;
4279 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004280 }
4281
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004282 return work_done;
David S. Miller6f535762007-10-11 18:08:29 -07004283
4284tx_recovery:
Michael Chan4fd7ab52007-10-12 01:39:50 -07004285 /* work_done is guaranteed to be less than budget. */
David S. Miller6f535762007-10-11 18:08:29 -07004286 netif_rx_complete(tp->dev, napi);
4287 schedule_work(&tp->reset_task);
Michael Chan4fd7ab52007-10-12 01:39:50 -07004288 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004289}
4290
David S. Millerf47c11e2005-06-24 20:18:35 -07004291static void tg3_irq_quiesce(struct tg3 *tp)
4292{
4293 BUG_ON(tp->irq_sync);
4294
4295 tp->irq_sync = 1;
4296 smp_mb();
4297
4298 synchronize_irq(tp->pdev->irq);
4299}
4300
4301static inline int tg3_irq_sync(struct tg3 *tp)
4302{
4303 return tp->irq_sync;
4304}
4305
4306/* Fully shutdown all tg3 driver activity elsewhere in the system.
4307 * If irq_sync is non-zero, then the IRQ handler must be synchronized
4308 * with as well. Most of the time, this is not necessary except when
4309 * shutting down the device.
4310 */
4311static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4312{
Michael Chan46966542007-07-11 19:47:19 -07004313 spin_lock_bh(&tp->lock);
David S. Millerf47c11e2005-06-24 20:18:35 -07004314 if (irq_sync)
4315 tg3_irq_quiesce(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07004316}
4317
4318static inline void tg3_full_unlock(struct tg3 *tp)
4319{
David S. Millerf47c11e2005-06-24 20:18:35 -07004320 spin_unlock_bh(&tp->lock);
4321}
4322
Michael Chanfcfa0a32006-03-20 22:28:41 -08004323/* One-shot MSI handler - Chip automatically disables interrupt
4324 * after sending MSI so driver doesn't have to do it.
4325 */
David Howells7d12e782006-10-05 14:55:46 +01004326static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
Michael Chanfcfa0a32006-03-20 22:28:41 -08004327{
4328 struct net_device *dev = dev_id;
4329 struct tg3 *tp = netdev_priv(dev);
4330
4331 prefetch(tp->hw_status);
4332 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4333
4334 if (likely(!tg3_irq_sync(tp)))
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004335 netif_rx_schedule(dev, &tp->napi);
Michael Chanfcfa0a32006-03-20 22:28:41 -08004336
4337 return IRQ_HANDLED;
4338}
4339
Michael Chan88b06bc22005-04-21 17:13:25 -07004340/* MSI ISR - No need to check for interrupt sharing and no need to
4341 * flush status block and interrupt mailbox. PCI ordering rules
4342 * guarantee that MSI will arrive after the status block.
4343 */
David Howells7d12e782006-10-05 14:55:46 +01004344static irqreturn_t tg3_msi(int irq, void *dev_id)
Michael Chan88b06bc22005-04-21 17:13:25 -07004345{
4346 struct net_device *dev = dev_id;
4347 struct tg3 *tp = netdev_priv(dev);
Michael Chan88b06bc22005-04-21 17:13:25 -07004348
Michael Chan61487482005-09-05 17:53:19 -07004349 prefetch(tp->hw_status);
4350 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Michael Chan88b06bc22005-04-21 17:13:25 -07004351 /*
David S. Millerfac9b832005-05-18 22:46:34 -07004352 * Writing any value to intr-mbox-0 clears PCI INTA# and
Michael Chan88b06bc22005-04-21 17:13:25 -07004353 * chip-internal interrupt pending events.
David S. Millerfac9b832005-05-18 22:46:34 -07004354 * Writing non-zero to intr-mbox-0 additional tells the
Michael Chan88b06bc22005-04-21 17:13:25 -07004355 * NIC to stop sending us irqs, engaging "in-intr-handler"
4356 * event coalescing.
4357 */
4358 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chan61487482005-09-05 17:53:19 -07004359 if (likely(!tg3_irq_sync(tp)))
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004360 netif_rx_schedule(dev, &tp->napi);
Michael Chan61487482005-09-05 17:53:19 -07004361
Michael Chan88b06bc22005-04-21 17:13:25 -07004362 return IRQ_RETVAL(1);
4363}
4364
David Howells7d12e782006-10-05 14:55:46 +01004365static irqreturn_t tg3_interrupt(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004366{
4367 struct net_device *dev = dev_id;
4368 struct tg3 *tp = netdev_priv(dev);
4369 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004370 unsigned int handled = 1;
4371
Linus Torvalds1da177e2005-04-16 15:20:36 -07004372 /* In INTx mode, it is possible for the interrupt to arrive at
4373 * the CPU before the status block posted prior to the interrupt.
4374 * Reading the PCI State register will confirm whether the
4375 * interrupt is ours and will flush the status block.
4376 */
Michael Chand18edcb2007-03-24 20:57:11 -07004377 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4378 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4379 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4380 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07004381 goto out;
David S. Millerfac9b832005-05-18 22:46:34 -07004382 }
Michael Chand18edcb2007-03-24 20:57:11 -07004383 }
4384
4385 /*
4386 * Writing any value to intr-mbox-0 clears PCI INTA# and
4387 * chip-internal interrupt pending events.
4388 * Writing non-zero to intr-mbox-0 additional tells the
4389 * NIC to stop sending us irqs, engaging "in-intr-handler"
4390 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07004391 *
4392 * Flush the mailbox to de-assert the IRQ immediately to prevent
4393 * spurious interrupts. The flush impacts performance but
4394 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07004395 */
Michael Chanc04cb342007-05-07 00:26:15 -07004396 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chand18edcb2007-03-24 20:57:11 -07004397 if (tg3_irq_sync(tp))
4398 goto out;
4399 sblk->status &= ~SD_STATUS_UPDATED;
4400 if (likely(tg3_has_work(tp))) {
4401 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004402 netif_rx_schedule(dev, &tp->napi);
Michael Chand18edcb2007-03-24 20:57:11 -07004403 } else {
4404 /* No work, shared interrupt perhaps? re-enable
4405 * interrupts, and flush that PCI write
4406 */
4407 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4408 0x00000000);
David S. Millerfac9b832005-05-18 22:46:34 -07004409 }
David S. Millerf47c11e2005-06-24 20:18:35 -07004410out:
David S. Millerfac9b832005-05-18 22:46:34 -07004411 return IRQ_RETVAL(handled);
4412}
4413
David Howells7d12e782006-10-05 14:55:46 +01004414static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
David S. Millerfac9b832005-05-18 22:46:34 -07004415{
4416 struct net_device *dev = dev_id;
4417 struct tg3 *tp = netdev_priv(dev);
4418 struct tg3_hw_status *sblk = tp->hw_status;
David S. Millerfac9b832005-05-18 22:46:34 -07004419 unsigned int handled = 1;
4420
David S. Millerfac9b832005-05-18 22:46:34 -07004421 /* In INTx mode, it is possible for the interrupt to arrive at
4422 * the CPU before the status block posted prior to the interrupt.
4423 * Reading the PCI State register will confirm whether the
4424 * interrupt is ours and will flush the status block.
4425 */
Michael Chand18edcb2007-03-24 20:57:11 -07004426 if (unlikely(sblk->status_tag == tp->last_tag)) {
4427 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4428 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4429 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07004430 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004431 }
Michael Chand18edcb2007-03-24 20:57:11 -07004432 }
4433
4434 /*
4435 * writing any value to intr-mbox-0 clears PCI INTA# and
4436 * chip-internal interrupt pending events.
4437 * writing non-zero to intr-mbox-0 additional tells the
4438 * NIC to stop sending us irqs, engaging "in-intr-handler"
4439 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07004440 *
4441 * Flush the mailbox to de-assert the IRQ immediately to prevent
4442 * spurious interrupts. The flush impacts performance but
4443 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07004444 */
Michael Chanc04cb342007-05-07 00:26:15 -07004445 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chand18edcb2007-03-24 20:57:11 -07004446 if (tg3_irq_sync(tp))
4447 goto out;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004448 if (netif_rx_schedule_prep(dev, &tp->napi)) {
Michael Chand18edcb2007-03-24 20:57:11 -07004449 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4450 /* Update last_tag to mark that this status has been
4451 * seen. Because interrupt may be shared, we may be
4452 * racing with tg3_poll(), so only update last_tag
4453 * if tg3_poll() is not scheduled.
4454 */
4455 tp->last_tag = sblk->status_tag;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004456 __netif_rx_schedule(dev, &tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004457 }
David S. Millerf47c11e2005-06-24 20:18:35 -07004458out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004459 return IRQ_RETVAL(handled);
4460}
4461
Michael Chan79381092005-04-21 17:13:59 -07004462/* ISR for interrupt test */
David Howells7d12e782006-10-05 14:55:46 +01004463static irqreturn_t tg3_test_isr(int irq, void *dev_id)
Michael Chan79381092005-04-21 17:13:59 -07004464{
4465 struct net_device *dev = dev_id;
4466 struct tg3 *tp = netdev_priv(dev);
4467 struct tg3_hw_status *sblk = tp->hw_status;
4468
Michael Chanf9804dd2005-09-27 12:13:10 -07004469 if ((sblk->status & SD_STATUS_UPDATED) ||
4470 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
Michael Chanb16250e2006-09-27 16:10:14 -07004471 tg3_disable_ints(tp);
Michael Chan79381092005-04-21 17:13:59 -07004472 return IRQ_RETVAL(1);
4473 }
4474 return IRQ_RETVAL(0);
4475}
4476
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07004477static int tg3_init_hw(struct tg3 *, int);
Michael Chan944d9802005-05-29 14:57:48 -07004478static int tg3_halt(struct tg3 *, int, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004479
Michael Chanb9ec6c12006-07-25 16:37:27 -07004480/* Restart hardware after configuration changes, self-test, etc.
4481 * Invoked with tp->lock held.
4482 */
4483static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
Eric Dumazet78c61462008-04-24 23:33:06 -07004484 __releases(tp->lock)
4485 __acquires(tp->lock)
Michael Chanb9ec6c12006-07-25 16:37:27 -07004486{
4487 int err;
4488
4489 err = tg3_init_hw(tp, reset_phy);
4490 if (err) {
4491 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4492 "aborting.\n", tp->dev->name);
4493 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4494 tg3_full_unlock(tp);
4495 del_timer_sync(&tp->timer);
4496 tp->irq_sync = 0;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004497 napi_enable(&tp->napi);
Michael Chanb9ec6c12006-07-25 16:37:27 -07004498 dev_close(tp->dev);
4499 tg3_full_lock(tp, 0);
4500 }
4501 return err;
4502}
4503
Linus Torvalds1da177e2005-04-16 15:20:36 -07004504#ifdef CONFIG_NET_POLL_CONTROLLER
4505static void tg3_poll_controller(struct net_device *dev)
4506{
Michael Chan88b06bc22005-04-21 17:13:25 -07004507 struct tg3 *tp = netdev_priv(dev);
4508
David Howells7d12e782006-10-05 14:55:46 +01004509 tg3_interrupt(tp->pdev->irq, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004510}
4511#endif
4512
David Howellsc4028952006-11-22 14:57:56 +00004513static void tg3_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004514{
David Howellsc4028952006-11-22 14:57:56 +00004515 struct tg3 *tp = container_of(work, struct tg3, reset_task);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004516 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004517 unsigned int restart_timer;
4518
Michael Chan7faa0062006-02-02 17:29:28 -08004519 tg3_full_lock(tp, 0);
Michael Chan7faa0062006-02-02 17:29:28 -08004520
4521 if (!netif_running(tp->dev)) {
Michael Chan7faa0062006-02-02 17:29:28 -08004522 tg3_full_unlock(tp);
4523 return;
4524 }
4525
4526 tg3_full_unlock(tp);
4527
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004528 tg3_phy_stop(tp);
4529
Linus Torvalds1da177e2005-04-16 15:20:36 -07004530 tg3_netif_stop(tp);
4531
David S. Millerf47c11e2005-06-24 20:18:35 -07004532 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004533
4534 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4535 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4536
Michael Chandf3e6542006-05-26 17:48:07 -07004537 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4538 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4539 tp->write32_rx_mbox = tg3_write_flush_reg32;
4540 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4541 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4542 }
4543
Michael Chan944d9802005-05-29 14:57:48 -07004544 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004545 err = tg3_init_hw(tp, 1);
4546 if (err)
Michael Chanb9ec6c12006-07-25 16:37:27 -07004547 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004548
4549 tg3_netif_start(tp);
4550
Linus Torvalds1da177e2005-04-16 15:20:36 -07004551 if (restart_timer)
4552 mod_timer(&tp->timer, jiffies + 1);
Michael Chan7faa0062006-02-02 17:29:28 -08004553
Michael Chanb9ec6c12006-07-25 16:37:27 -07004554out:
Michael Chan7faa0062006-02-02 17:29:28 -08004555 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07004556
4557 if (!err)
4558 tg3_phy_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004559}
4560
Michael Chanb0408752007-02-13 12:18:30 -08004561static void tg3_dump_short_state(struct tg3 *tp)
4562{
4563 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4564 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4565 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4566 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4567}
4568
Linus Torvalds1da177e2005-04-16 15:20:36 -07004569static void tg3_tx_timeout(struct net_device *dev)
4570{
4571 struct tg3 *tp = netdev_priv(dev);
4572
Michael Chanb0408752007-02-13 12:18:30 -08004573 if (netif_msg_tx_err(tp)) {
Michael Chan9f88f292006-12-07 00:22:54 -08004574 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4575 dev->name);
Michael Chanb0408752007-02-13 12:18:30 -08004576 tg3_dump_short_state(tp);
4577 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004578
4579 schedule_work(&tp->reset_task);
4580}
4581
Michael Chanc58ec932005-09-17 00:46:27 -07004582/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4583static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4584{
4585 u32 base = (u32) mapping & 0xffffffff;
4586
4587 return ((base > 0xffffdcc0) &&
4588 (base + len + 8 < base));
4589}
4590
Michael Chan72f2afb2006-03-06 19:28:35 -08004591/* Test for DMA addresses > 40-bit */
4592static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4593 int len)
4594{
4595#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
Michael Chan6728a8e2006-03-27 23:16:49 -08004596 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
Michael Chan72f2afb2006-03-06 19:28:35 -08004597 return (((u64) mapping + len) > DMA_40BIT_MASK);
4598 return 0;
4599#else
4600 return 0;
4601#endif
4602}
4603
Linus Torvalds1da177e2005-04-16 15:20:36 -07004604static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4605
Michael Chan72f2afb2006-03-06 19:28:35 -08004606/* Workaround 4GB and 40-bit hardware DMA bugs. */
4607static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
Michael Chanc58ec932005-09-17 00:46:27 -07004608 u32 last_plus_one, u32 *start,
4609 u32 base_flags, u32 mss)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004610{
Matt Carlson41588ba2008-04-19 18:12:33 -07004611 struct sk_buff *new_skb;
Michael Chanc58ec932005-09-17 00:46:27 -07004612 dma_addr_t new_addr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004613 u32 entry = *start;
Michael Chanc58ec932005-09-17 00:46:27 -07004614 int i, ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004615
Matt Carlson41588ba2008-04-19 18:12:33 -07004616 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4617 new_skb = skb_copy(skb, GFP_ATOMIC);
4618 else {
4619 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4620
4621 new_skb = skb_copy_expand(skb,
4622 skb_headroom(skb) + more_headroom,
4623 skb_tailroom(skb), GFP_ATOMIC);
4624 }
4625
Linus Torvalds1da177e2005-04-16 15:20:36 -07004626 if (!new_skb) {
Michael Chanc58ec932005-09-17 00:46:27 -07004627 ret = -1;
4628 } else {
4629 /* New SKB is guaranteed to be linear. */
4630 entry = *start;
David S. Miller90079ce2008-09-11 04:52:51 -07004631 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4632 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4633
Michael Chanc58ec932005-09-17 00:46:27 -07004634 /* Make sure new skb does not cross any 4G boundaries.
4635 * Drop the packet if it does.
4636 */
David S. Miller90079ce2008-09-11 04:52:51 -07004637 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
David S. Miller638266f2008-09-11 15:45:19 -07004638 if (!ret)
4639 skb_dma_unmap(&tp->pdev->dev, new_skb,
4640 DMA_TO_DEVICE);
Michael Chanc58ec932005-09-17 00:46:27 -07004641 ret = -1;
4642 dev_kfree_skb(new_skb);
4643 new_skb = NULL;
4644 } else {
4645 tg3_set_txd(tp, entry, new_addr, new_skb->len,
4646 base_flags, 1 | (mss << 1));
4647 *start = NEXT_TX(entry);
4648 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004649 }
4650
Linus Torvalds1da177e2005-04-16 15:20:36 -07004651 /* Now clean up the sw ring entries. */
4652 i = 0;
4653 while (entry != last_plus_one) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004654 if (i == 0) {
4655 tp->tx_buffers[entry].skb = new_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004656 } else {
4657 tp->tx_buffers[entry].skb = NULL;
4658 }
4659 entry = NEXT_TX(entry);
4660 i++;
4661 }
4662
David S. Miller90079ce2008-09-11 04:52:51 -07004663 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004664 dev_kfree_skb(skb);
4665
Michael Chanc58ec932005-09-17 00:46:27 -07004666 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004667}
4668
4669static void tg3_set_txd(struct tg3 *tp, int entry,
4670 dma_addr_t mapping, int len, u32 flags,
4671 u32 mss_and_is_end)
4672{
4673 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4674 int is_end = (mss_and_is_end & 0x1);
4675 u32 mss = (mss_and_is_end >> 1);
4676 u32 vlan_tag = 0;
4677
4678 if (is_end)
4679 flags |= TXD_FLAG_END;
4680 if (flags & TXD_FLAG_VLAN) {
4681 vlan_tag = flags >> 16;
4682 flags &= 0xffff;
4683 }
4684 vlan_tag |= (mss << TXD_MSS_SHIFT);
4685
4686 txd->addr_hi = ((u64) mapping >> 32);
4687 txd->addr_lo = ((u64) mapping & 0xffffffff);
4688 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4689 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4690}
4691
Michael Chan5a6f3072006-03-20 22:28:05 -08004692/* hard_start_xmit for devices that don't have any bugs and
4693 * support TG3_FLG2_HW_TSO_2 only.
4694 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004695static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4696{
4697 struct tg3 *tp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004698 u32 len, entry, base_flags, mss;
David S. Miller90079ce2008-09-11 04:52:51 -07004699 struct skb_shared_info *sp;
4700 dma_addr_t mapping;
Michael Chan5a6f3072006-03-20 22:28:05 -08004701
4702 len = skb_headlen(skb);
4703
Michael Chan00b70502006-06-17 21:58:45 -07004704 /* We are running in BH disabled context with netif_tx_lock
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004705 * and TX reclaim runs via tp->napi.poll inside of a software
Michael Chan5a6f3072006-03-20 22:28:05 -08004706 * interrupt. Furthermore, IRQ processing runs lockless so we have
4707 * no IRQ context deadlocks to worry about either. Rejoice!
4708 */
Michael Chan1b2a7202006-08-07 21:46:02 -07004709 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004710 if (!netif_queue_stopped(dev)) {
4711 netif_stop_queue(dev);
4712
4713 /* This is a hard error, log it. */
4714 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4715 "queue awake!\n", dev->name);
4716 }
Michael Chan5a6f3072006-03-20 22:28:05 -08004717 return NETDEV_TX_BUSY;
4718 }
4719
4720 entry = tp->tx_prod;
4721 base_flags = 0;
Michael Chan5a6f3072006-03-20 22:28:05 -08004722 mss = 0;
Matt Carlsonc13e3712007-05-05 11:50:04 -07004723 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004724 int tcp_opt_len, ip_tcp_len;
4725
4726 if (skb_header_cloned(skb) &&
4727 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4728 dev_kfree_skb(skb);
4729 goto out_unlock;
4730 }
4731
Michael Chanb0026622006-07-03 19:42:14 -07004732 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4733 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4734 else {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004735 struct iphdr *iph = ip_hdr(skb);
4736
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004737 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -03004738 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
Michael Chanb0026622006-07-03 19:42:14 -07004739
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004740 iph->check = 0;
4741 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
Michael Chanb0026622006-07-03 19:42:14 -07004742 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4743 }
Michael Chan5a6f3072006-03-20 22:28:05 -08004744
4745 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4746 TXD_FLAG_CPU_POST_DMA);
4747
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004748 tcp_hdr(skb)->check = 0;
Michael Chan5a6f3072006-03-20 22:28:05 -08004749
Michael Chan5a6f3072006-03-20 22:28:05 -08004750 }
Patrick McHardy84fa7932006-08-29 16:44:56 -07004751 else if (skb->ip_summed == CHECKSUM_PARTIAL)
Michael Chan5a6f3072006-03-20 22:28:05 -08004752 base_flags |= TXD_FLAG_TCPUDP_CSUM;
Michael Chan5a6f3072006-03-20 22:28:05 -08004753#if TG3_VLAN_TAG_USED
4754 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4755 base_flags |= (TXD_FLAG_VLAN |
4756 (vlan_tx_tag_get(skb) << 16));
4757#endif
4758
David S. Miller90079ce2008-09-11 04:52:51 -07004759 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4760 dev_kfree_skb(skb);
4761 goto out_unlock;
4762 }
4763
4764 sp = skb_shinfo(skb);
4765
4766 mapping = sp->dma_maps[0];
Michael Chan5a6f3072006-03-20 22:28:05 -08004767
4768 tp->tx_buffers[entry].skb = skb;
Michael Chan5a6f3072006-03-20 22:28:05 -08004769
4770 tg3_set_txd(tp, entry, mapping, len, base_flags,
4771 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4772
4773 entry = NEXT_TX(entry);
4774
4775 /* Now loop through additional data fragments, and queue them. */
4776 if (skb_shinfo(skb)->nr_frags > 0) {
4777 unsigned int i, last;
4778
4779 last = skb_shinfo(skb)->nr_frags - 1;
4780 for (i = 0; i <= last; i++) {
4781 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4782
4783 len = frag->size;
David S. Miller90079ce2008-09-11 04:52:51 -07004784 mapping = sp->dma_maps[i + 1];
Michael Chan5a6f3072006-03-20 22:28:05 -08004785 tp->tx_buffers[entry].skb = NULL;
Michael Chan5a6f3072006-03-20 22:28:05 -08004786
4787 tg3_set_txd(tp, entry, mapping, len,
4788 base_flags, (i == last) | (mss << 1));
4789
4790 entry = NEXT_TX(entry);
4791 }
4792 }
4793
4794 /* Packets are ready, update Tx producer idx local and on card. */
4795 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4796
4797 tp->tx_prod = entry;
Michael Chan1b2a7202006-08-07 21:46:02 -07004798 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004799 netif_stop_queue(dev);
Ranjit Manomohan42952232006-10-18 20:54:26 -07004800 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
Michael Chan5a6f3072006-03-20 22:28:05 -08004801 netif_wake_queue(tp->dev);
4802 }
4803
4804out_unlock:
4805 mmiowb();
Michael Chan5a6f3072006-03-20 22:28:05 -08004806
4807 dev->trans_start = jiffies;
4808
4809 return NETDEV_TX_OK;
4810}
4811
Michael Chan52c0fd82006-06-29 20:15:54 -07004812static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4813
4814/* Use GSO to workaround a rare TSO bug that may be triggered when the
4815 * TSO header is greater than 80 bytes.
4816 */
4817static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4818{
4819 struct sk_buff *segs, *nskb;
4820
4821 /* Estimate the number of fragments in the worst case */
Michael Chan1b2a7202006-08-07 21:46:02 -07004822 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
Michael Chan52c0fd82006-06-29 20:15:54 -07004823 netif_stop_queue(tp->dev);
Michael Chan7f62ad52007-02-20 23:25:40 -08004824 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4825 return NETDEV_TX_BUSY;
4826
4827 netif_wake_queue(tp->dev);
Michael Chan52c0fd82006-06-29 20:15:54 -07004828 }
4829
4830 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07004831 if (IS_ERR(segs))
Michael Chan52c0fd82006-06-29 20:15:54 -07004832 goto tg3_tso_bug_end;
4833
4834 do {
4835 nskb = segs;
4836 segs = segs->next;
4837 nskb->next = NULL;
4838 tg3_start_xmit_dma_bug(nskb, tp->dev);
4839 } while (segs);
4840
4841tg3_tso_bug_end:
4842 dev_kfree_skb(skb);
4843
4844 return NETDEV_TX_OK;
4845}
Michael Chan52c0fd82006-06-29 20:15:54 -07004846
Michael Chan5a6f3072006-03-20 22:28:05 -08004847/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4848 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4849 */
4850static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4851{
4852 struct tg3 *tp = netdev_priv(dev);
Michael Chan5a6f3072006-03-20 22:28:05 -08004853 u32 len, entry, base_flags, mss;
David S. Miller90079ce2008-09-11 04:52:51 -07004854 struct skb_shared_info *sp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004855 int would_hit_hwbug;
David S. Miller90079ce2008-09-11 04:52:51 -07004856 dma_addr_t mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004857
4858 len = skb_headlen(skb);
4859
Michael Chan00b70502006-06-17 21:58:45 -07004860 /* We are running in BH disabled context with netif_tx_lock
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004861 * and TX reclaim runs via tp->napi.poll inside of a software
David S. Millerf47c11e2005-06-24 20:18:35 -07004862 * interrupt. Furthermore, IRQ processing runs lockless so we have
4863 * no IRQ context deadlocks to worry about either. Rejoice!
Linus Torvalds1da177e2005-04-16 15:20:36 -07004864 */
Michael Chan1b2a7202006-08-07 21:46:02 -07004865 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
Stephen Hemminger1f064a82005-12-06 17:36:44 -08004866 if (!netif_queue_stopped(dev)) {
4867 netif_stop_queue(dev);
4868
4869 /* This is a hard error, log it. */
4870 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4871 "queue awake!\n", dev->name);
4872 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004873 return NETDEV_TX_BUSY;
4874 }
4875
4876 entry = tp->tx_prod;
4877 base_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07004878 if (skb->ip_summed == CHECKSUM_PARTIAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004879 base_flags |= TXD_FLAG_TCPUDP_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004880 mss = 0;
Matt Carlsonc13e3712007-05-05 11:50:04 -07004881 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004882 struct iphdr *iph;
Michael Chan52c0fd82006-06-29 20:15:54 -07004883 int tcp_opt_len, ip_tcp_len, hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004884
4885 if (skb_header_cloned(skb) &&
4886 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4887 dev_kfree_skb(skb);
4888 goto out_unlock;
4889 }
4890
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004891 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -03004892 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004893
Michael Chan52c0fd82006-06-29 20:15:54 -07004894 hdr_len = ip_tcp_len + tcp_opt_len;
4895 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
Michael Chan7f62ad52007-02-20 23:25:40 -08004896 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
Michael Chan52c0fd82006-06-29 20:15:54 -07004897 return (tg3_tso_bug(tp, skb));
4898
Linus Torvalds1da177e2005-04-16 15:20:36 -07004899 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4900 TXD_FLAG_CPU_POST_DMA);
4901
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004902 iph = ip_hdr(skb);
4903 iph->check = 0;
4904 iph->tot_len = htons(mss + hdr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004905 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004906 tcp_hdr(skb)->check = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004907 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004908 } else
4909 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4910 iph->daddr, 0,
4911 IPPROTO_TCP,
4912 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004913
4914 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4915 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004916 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004917 int tsflags;
4918
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004919 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004920 mss |= (tsflags << 11);
4921 }
4922 } else {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004923 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004924 int tsflags;
4925
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004926 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004927 base_flags |= tsflags << 12;
4928 }
4929 }
4930 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004931#if TG3_VLAN_TAG_USED
4932 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4933 base_flags |= (TXD_FLAG_VLAN |
4934 (vlan_tx_tag_get(skb) << 16));
4935#endif
4936
David S. Miller90079ce2008-09-11 04:52:51 -07004937 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4938 dev_kfree_skb(skb);
4939 goto out_unlock;
4940 }
4941
4942 sp = skb_shinfo(skb);
4943
4944 mapping = sp->dma_maps[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004945
4946 tp->tx_buffers[entry].skb = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004947
4948 would_hit_hwbug = 0;
4949
Matt Carlson41588ba2008-04-19 18:12:33 -07004950 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4951 would_hit_hwbug = 1;
4952 else if (tg3_4g_overflow_test(mapping, len))
Michael Chanc58ec932005-09-17 00:46:27 -07004953 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004954
4955 tg3_set_txd(tp, entry, mapping, len, base_flags,
4956 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4957
4958 entry = NEXT_TX(entry);
4959
4960 /* Now loop through additional data fragments, and queue them. */
4961 if (skb_shinfo(skb)->nr_frags > 0) {
4962 unsigned int i, last;
4963
4964 last = skb_shinfo(skb)->nr_frags - 1;
4965 for (i = 0; i <= last; i++) {
4966 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4967
4968 len = frag->size;
David S. Miller90079ce2008-09-11 04:52:51 -07004969 mapping = sp->dma_maps[i + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004970
4971 tp->tx_buffers[entry].skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004972
Michael Chanc58ec932005-09-17 00:46:27 -07004973 if (tg3_4g_overflow_test(mapping, len))
4974 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004975
Michael Chan72f2afb2006-03-06 19:28:35 -08004976 if (tg3_40bit_overflow_test(tp, mapping, len))
4977 would_hit_hwbug = 1;
4978
Linus Torvalds1da177e2005-04-16 15:20:36 -07004979 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4980 tg3_set_txd(tp, entry, mapping, len,
4981 base_flags, (i == last)|(mss << 1));
4982 else
4983 tg3_set_txd(tp, entry, mapping, len,
4984 base_flags, (i == last));
4985
4986 entry = NEXT_TX(entry);
4987 }
4988 }
4989
4990 if (would_hit_hwbug) {
4991 u32 last_plus_one = entry;
4992 u32 start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004993
Michael Chanc58ec932005-09-17 00:46:27 -07004994 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4995 start &= (TG3_TX_RING_SIZE - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004996
4997 /* If the workaround fails due to memory/mapping
4998 * failure, silently drop this packet.
4999 */
Michael Chan72f2afb2006-03-06 19:28:35 -08005000 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
Michael Chanc58ec932005-09-17 00:46:27 -07005001 &start, base_flags, mss))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005002 goto out_unlock;
5003
5004 entry = start;
5005 }
5006
5007 /* Packets are ready, update Tx producer idx local and on card. */
5008 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5009
5010 tp->tx_prod = entry;
Michael Chan1b2a7202006-08-07 21:46:02 -07005011 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005012 netif_stop_queue(dev);
Ranjit Manomohan42952232006-10-18 20:54:26 -07005013 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
Michael Chan51b91462005-09-01 17:41:28 -07005014 netif_wake_queue(tp->dev);
5015 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005016
5017out_unlock:
5018 mmiowb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005019
5020 dev->trans_start = jiffies;
5021
5022 return NETDEV_TX_OK;
5023}
5024
5025static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5026 int new_mtu)
5027{
5028 dev->mtu = new_mtu;
5029
Michael Chanef7f5ec2005-07-25 12:32:25 -07005030 if (new_mtu > ETH_DATA_LEN) {
Michael Chana4e2b342005-10-26 15:46:52 -07005031 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chanef7f5ec2005-07-25 12:32:25 -07005032 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5033 ethtool_op_set_tso(dev, 0);
5034 }
5035 else
5036 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5037 } else {
Michael Chana4e2b342005-10-26 15:46:52 -07005038 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chanef7f5ec2005-07-25 12:32:25 -07005039 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
Michael Chan0f893dc2005-07-25 12:30:38 -07005040 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
Michael Chanef7f5ec2005-07-25 12:32:25 -07005041 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005042}
5043
5044static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5045{
5046 struct tg3 *tp = netdev_priv(dev);
Michael Chanb9ec6c12006-07-25 16:37:27 -07005047 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005048
5049 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5050 return -EINVAL;
5051
5052 if (!netif_running(dev)) {
5053 /* We'll just catch it later when the
5054 * device is up'd.
5055 */
5056 tg3_set_mtu(dev, tp, new_mtu);
5057 return 0;
5058 }
5059
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07005060 tg3_phy_stop(tp);
5061
Linus Torvalds1da177e2005-04-16 15:20:36 -07005062 tg3_netif_stop(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07005063
5064 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005065
Michael Chan944d9802005-05-29 14:57:48 -07005066 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005067
5068 tg3_set_mtu(dev, tp, new_mtu);
5069
Michael Chanb9ec6c12006-07-25 16:37:27 -07005070 err = tg3_restart_hw(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005071
Michael Chanb9ec6c12006-07-25 16:37:27 -07005072 if (!err)
5073 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005074
David S. Millerf47c11e2005-06-24 20:18:35 -07005075 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005076
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07005077 if (!err)
5078 tg3_phy_start(tp);
5079
Michael Chanb9ec6c12006-07-25 16:37:27 -07005080 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005081}
5082
5083/* Free up pending packets in all rx/tx rings.
5084 *
5085 * The chip has been shut down and the driver detached from
5086 * the networking, so no interrupts or new tx packets will
5087 * end up in the driver. tp->{tx,}lock is not held and we are not
5088 * in an interrupt context and thus may sleep.
5089 */
5090static void tg3_free_rings(struct tg3 *tp)
5091{
5092 struct ring_info *rxp;
5093 int i;
5094
5095 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5096 rxp = &tp->rx_std_buffers[i];
5097
5098 if (rxp->skb == NULL)
5099 continue;
5100 pci_unmap_single(tp->pdev,
5101 pci_unmap_addr(rxp, mapping),
Michael Chan7e72aad2005-07-25 12:31:17 -07005102 tp->rx_pkt_buf_sz - tp->rx_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005103 PCI_DMA_FROMDEVICE);
5104 dev_kfree_skb_any(rxp->skb);
5105 rxp->skb = NULL;
5106 }
5107
5108 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5109 rxp = &tp->rx_jumbo_buffers[i];
5110
5111 if (rxp->skb == NULL)
5112 continue;
5113 pci_unmap_single(tp->pdev,
5114 pci_unmap_addr(rxp, mapping),
5115 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5116 PCI_DMA_FROMDEVICE);
5117 dev_kfree_skb_any(rxp->skb);
5118 rxp->skb = NULL;
5119 }
5120
5121 for (i = 0; i < TG3_TX_RING_SIZE; ) {
5122 struct tx_ring_info *txp;
5123 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005124
5125 txp = &tp->tx_buffers[i];
5126 skb = txp->skb;
5127
5128 if (skb == NULL) {
5129 i++;
5130 continue;
5131 }
5132
David S. Miller90079ce2008-09-11 04:52:51 -07005133 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5134
Linus Torvalds1da177e2005-04-16 15:20:36 -07005135 txp->skb = NULL;
5136
David S. Miller90079ce2008-09-11 04:52:51 -07005137 i += skb_shinfo(skb)->nr_frags + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005138
5139 dev_kfree_skb_any(skb);
5140 }
5141}
5142
5143/* Initialize tx/rx rings for packet processing.
5144 *
5145 * The chip has been shut down and the driver detached from
5146 * the networking, so no interrupts or new tx packets will
5147 * end up in the driver. tp->{tx,}lock are held and thus
5148 * we may not sleep.
5149 */
Michael Chan32d8c572006-07-25 16:38:29 -07005150static int tg3_init_rings(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005151{
5152 u32 i;
5153
5154 /* Free up all the SKBs. */
5155 tg3_free_rings(tp);
5156
5157 /* Zero out all descriptors. */
5158 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5159 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5160 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5161 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5162
Michael Chan7e72aad2005-07-25 12:31:17 -07005163 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
Michael Chana4e2b342005-10-26 15:46:52 -07005164 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
Michael Chan7e72aad2005-07-25 12:31:17 -07005165 (tp->dev->mtu > ETH_DATA_LEN))
5166 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5167
Linus Torvalds1da177e2005-04-16 15:20:36 -07005168 /* Initialize invariants of the rings, we only set this
5169 * stuff once. This works because the card does not
5170 * write into the rx buffer posting rings.
5171 */
5172 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5173 struct tg3_rx_buffer_desc *rxd;
5174
5175 rxd = &tp->rx_std[i];
Michael Chan7e72aad2005-07-25 12:31:17 -07005176 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005177 << RXD_LEN_SHIFT;
5178 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5179 rxd->opaque = (RXD_OPAQUE_RING_STD |
5180 (i << RXD_OPAQUE_INDEX_SHIFT));
5181 }
5182
Michael Chan0f893dc2005-07-25 12:30:38 -07005183 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005184 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5185 struct tg3_rx_buffer_desc *rxd;
5186
5187 rxd = &tp->rx_jumbo[i];
5188 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5189 << RXD_LEN_SHIFT;
5190 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5191 RXD_FLAG_JUMBO;
5192 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5193 (i << RXD_OPAQUE_INDEX_SHIFT));
5194 }
5195 }
5196
5197 /* Now allocate fresh SKBs for each rx ring. */
5198 for (i = 0; i < tp->rx_pending; i++) {
Michael Chan32d8c572006-07-25 16:38:29 -07005199 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5200 printk(KERN_WARNING PFX
5201 "%s: Using a smaller RX standard ring, "
5202 "only %d out of %d buffers were allocated "
5203 "successfully.\n",
5204 tp->dev->name, i, tp->rx_pending);
5205 if (i == 0)
5206 return -ENOMEM;
5207 tp->rx_pending = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005208 break;
Michael Chan32d8c572006-07-25 16:38:29 -07005209 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005210 }
5211
Michael Chan0f893dc2005-07-25 12:30:38 -07005212 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005213 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5214 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
Michael Chan32d8c572006-07-25 16:38:29 -07005215 -1, i) < 0) {
5216 printk(KERN_WARNING PFX
5217 "%s: Using a smaller RX jumbo ring, "
5218 "only %d out of %d buffers were "
5219 "allocated successfully.\n",
5220 tp->dev->name, i, tp->rx_jumbo_pending);
5221 if (i == 0) {
5222 tg3_free_rings(tp);
5223 return -ENOMEM;
5224 }
5225 tp->rx_jumbo_pending = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005226 break;
Michael Chan32d8c572006-07-25 16:38:29 -07005227 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005228 }
5229 }
Michael Chan32d8c572006-07-25 16:38:29 -07005230 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005231}
5232
5233/*
5234 * Must not be invoked with interrupt sources disabled and
5235 * the hardware shutdown down.
5236 */
5237static void tg3_free_consistent(struct tg3 *tp)
5238{
Jesper Juhlb4558ea2005-10-28 16:53:13 -04005239 kfree(tp->rx_std_buffers);
5240 tp->rx_std_buffers = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005241 if (tp->rx_std) {
5242 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5243 tp->rx_std, tp->rx_std_mapping);
5244 tp->rx_std = NULL;
5245 }
5246 if (tp->rx_jumbo) {
5247 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5248 tp->rx_jumbo, tp->rx_jumbo_mapping);
5249 tp->rx_jumbo = NULL;
5250 }
5251 if (tp->rx_rcb) {
5252 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5253 tp->rx_rcb, tp->rx_rcb_mapping);
5254 tp->rx_rcb = NULL;
5255 }
5256 if (tp->tx_ring) {
5257 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5258 tp->tx_ring, tp->tx_desc_mapping);
5259 tp->tx_ring = NULL;
5260 }
5261 if (tp->hw_status) {
5262 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5263 tp->hw_status, tp->status_mapping);
5264 tp->hw_status = NULL;
5265 }
5266 if (tp->hw_stats) {
5267 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5268 tp->hw_stats, tp->stats_mapping);
5269 tp->hw_stats = NULL;
5270 }
5271}
5272
5273/*
5274 * Must not be invoked with interrupt sources disabled and
5275 * the hardware shutdown down. Can sleep.
5276 */
5277static int tg3_alloc_consistent(struct tg3 *tp)
5278{
Yan Burmanbd2b3342006-12-14 15:25:00 -08005279 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005280 (TG3_RX_RING_SIZE +
5281 TG3_RX_JUMBO_RING_SIZE)) +
5282 (sizeof(struct tx_ring_info) *
5283 TG3_TX_RING_SIZE),
5284 GFP_KERNEL);
5285 if (!tp->rx_std_buffers)
5286 return -ENOMEM;
5287
Linus Torvalds1da177e2005-04-16 15:20:36 -07005288 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5289 tp->tx_buffers = (struct tx_ring_info *)
5290 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5291
5292 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5293 &tp->rx_std_mapping);
5294 if (!tp->rx_std)
5295 goto err_out;
5296
5297 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5298 &tp->rx_jumbo_mapping);
5299
5300 if (!tp->rx_jumbo)
5301 goto err_out;
5302
5303 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5304 &tp->rx_rcb_mapping);
5305 if (!tp->rx_rcb)
5306 goto err_out;
5307
5308 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5309 &tp->tx_desc_mapping);
5310 if (!tp->tx_ring)
5311 goto err_out;
5312
5313 tp->hw_status = pci_alloc_consistent(tp->pdev,
5314 TG3_HW_STATUS_SIZE,
5315 &tp->status_mapping);
5316 if (!tp->hw_status)
5317 goto err_out;
5318
5319 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5320 sizeof(struct tg3_hw_stats),
5321 &tp->stats_mapping);
5322 if (!tp->hw_stats)
5323 goto err_out;
5324
5325 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5326 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5327
5328 return 0;
5329
5330err_out:
5331 tg3_free_consistent(tp);
5332 return -ENOMEM;
5333}
5334
5335#define MAX_WAIT_CNT 1000
5336
5337/* To stop a block, clear the enable bit and poll till it
5338 * clears. tp->lock is held.
5339 */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005340static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005341{
5342 unsigned int i;
5343 u32 val;
5344
5345 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5346 switch (ofs) {
5347 case RCVLSC_MODE:
5348 case DMAC_MODE:
5349 case MBFREE_MODE:
5350 case BUFMGR_MODE:
5351 case MEMARB_MODE:
5352 /* We can't enable/disable these bits of the
5353 * 5705/5750, just say success.
5354 */
5355 return 0;
5356
5357 default:
5358 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005359 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005360 }
5361
5362 val = tr32(ofs);
5363 val &= ~enable_bit;
5364 tw32_f(ofs, val);
5365
5366 for (i = 0; i < MAX_WAIT_CNT; i++) {
5367 udelay(100);
5368 val = tr32(ofs);
5369 if ((val & enable_bit) == 0)
5370 break;
5371 }
5372
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005373 if (i == MAX_WAIT_CNT && !silent) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005374 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5375 "ofs=%lx enable_bit=%x\n",
5376 ofs, enable_bit);
5377 return -ENODEV;
5378 }
5379
5380 return 0;
5381}
5382
5383/* tp->lock is held. */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005384static int tg3_abort_hw(struct tg3 *tp, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005385{
5386 int i, err;
5387
5388 tg3_disable_ints(tp);
5389
5390 tp->rx_mode &= ~RX_MODE_ENABLE;
5391 tw32_f(MAC_RX_MODE, tp->rx_mode);
5392 udelay(10);
5393
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005394 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5395 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5396 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5397 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5398 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5399 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005400
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005401 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5402 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5403 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5404 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5405 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5406 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5407 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005408
5409 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5410 tw32_f(MAC_MODE, tp->mac_mode);
5411 udelay(40);
5412
5413 tp->tx_mode &= ~TX_MODE_ENABLE;
5414 tw32_f(MAC_TX_MODE, tp->tx_mode);
5415
5416 for (i = 0; i < MAX_WAIT_CNT; i++) {
5417 udelay(100);
5418 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5419 break;
5420 }
5421 if (i >= MAX_WAIT_CNT) {
5422 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5423 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5424 tp->dev->name, tr32(MAC_TX_MODE));
Michael Chane6de8ad2005-05-05 14:42:41 -07005425 err |= -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005426 }
5427
Michael Chane6de8ad2005-05-05 14:42:41 -07005428 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005429 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5430 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005431
5432 tw32(FTQ_RESET, 0xffffffff);
5433 tw32(FTQ_RESET, 0x00000000);
5434
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005435 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5436 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005437
5438 if (tp->hw_status)
5439 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5440 if (tp->hw_stats)
5441 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5442
Linus Torvalds1da177e2005-04-16 15:20:36 -07005443 return err;
5444}
5445
5446/* tp->lock is held. */
5447static int tg3_nvram_lock(struct tg3 *tp)
5448{
5449 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5450 int i;
5451
Michael Chanec41c7d2006-01-17 02:40:55 -08005452 if (tp->nvram_lock_cnt == 0) {
5453 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5454 for (i = 0; i < 8000; i++) {
5455 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5456 break;
5457 udelay(20);
5458 }
5459 if (i == 8000) {
5460 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5461 return -ENODEV;
5462 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005463 }
Michael Chanec41c7d2006-01-17 02:40:55 -08005464 tp->nvram_lock_cnt++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005465 }
5466 return 0;
5467}
5468
5469/* tp->lock is held. */
5470static void tg3_nvram_unlock(struct tg3 *tp)
5471{
Michael Chanec41c7d2006-01-17 02:40:55 -08005472 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5473 if (tp->nvram_lock_cnt > 0)
5474 tp->nvram_lock_cnt--;
5475 if (tp->nvram_lock_cnt == 0)
5476 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5477 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005478}
5479
5480/* tp->lock is held. */
Michael Chane6af3012005-04-21 17:12:05 -07005481static void tg3_enable_nvram_access(struct tg3 *tp)
5482{
5483 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5484 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5485 u32 nvaccess = tr32(NVRAM_ACCESS);
5486
5487 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5488 }
5489}
5490
5491/* tp->lock is held. */
5492static void tg3_disable_nvram_access(struct tg3 *tp)
5493{
5494 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5495 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5496 u32 nvaccess = tr32(NVRAM_ACCESS);
5497
5498 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5499 }
5500}
5501
Matt Carlson0d3031d2007-10-10 18:02:43 -07005502static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5503{
5504 int i;
5505 u32 apedata;
5506
5507 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5508 if (apedata != APE_SEG_SIG_MAGIC)
5509 return;
5510
5511 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
Matt Carlson731fd792008-08-15 14:07:51 -07005512 if (!(apedata & APE_FW_STATUS_READY))
Matt Carlson0d3031d2007-10-10 18:02:43 -07005513 return;
5514
5515 /* Wait for up to 1 millisecond for APE to service previous event. */
5516 for (i = 0; i < 10; i++) {
5517 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5518 return;
5519
5520 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5521
5522 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5523 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5524 event | APE_EVENT_STATUS_EVENT_PENDING);
5525
5526 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5527
5528 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5529 break;
5530
5531 udelay(100);
5532 }
5533
5534 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5535 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5536}
5537
5538static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5539{
5540 u32 event;
5541 u32 apedata;
5542
5543 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5544 return;
5545
5546 switch (kind) {
5547 case RESET_KIND_INIT:
5548 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5549 APE_HOST_SEG_SIG_MAGIC);
5550 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5551 APE_HOST_SEG_LEN_MAGIC);
5552 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5553 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5554 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5555 APE_HOST_DRIVER_ID_MAGIC);
5556 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5557 APE_HOST_BEHAV_NO_PHYLOCK);
5558
5559 event = APE_EVENT_STATUS_STATE_START;
5560 break;
5561 case RESET_KIND_SHUTDOWN:
5562 event = APE_EVENT_STATUS_STATE_UNLOAD;
5563 break;
5564 case RESET_KIND_SUSPEND:
5565 event = APE_EVENT_STATUS_STATE_SUSPEND;
5566 break;
5567 default:
5568 return;
5569 }
5570
5571 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5572
5573 tg3_ape_send_event(tp, event);
5574}
5575
Michael Chane6af3012005-04-21 17:12:05 -07005576/* tp->lock is held. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005577static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5578{
David S. Millerf49639e2006-06-09 11:58:36 -07005579 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5580 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005581
5582 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5583 switch (kind) {
5584 case RESET_KIND_INIT:
5585 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5586 DRV_STATE_START);
5587 break;
5588
5589 case RESET_KIND_SHUTDOWN:
5590 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5591 DRV_STATE_UNLOAD);
5592 break;
5593
5594 case RESET_KIND_SUSPEND:
5595 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5596 DRV_STATE_SUSPEND);
5597 break;
5598
5599 default:
5600 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005601 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005602 }
Matt Carlson0d3031d2007-10-10 18:02:43 -07005603
5604 if (kind == RESET_KIND_INIT ||
5605 kind == RESET_KIND_SUSPEND)
5606 tg3_ape_driver_state_change(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005607}
5608
5609/* tp->lock is held. */
5610static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5611{
5612 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5613 switch (kind) {
5614 case RESET_KIND_INIT:
5615 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5616 DRV_STATE_START_DONE);
5617 break;
5618
5619 case RESET_KIND_SHUTDOWN:
5620 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5621 DRV_STATE_UNLOAD_DONE);
5622 break;
5623
5624 default:
5625 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005626 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005627 }
Matt Carlson0d3031d2007-10-10 18:02:43 -07005628
5629 if (kind == RESET_KIND_SHUTDOWN)
5630 tg3_ape_driver_state_change(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005631}
5632
5633/* tp->lock is held. */
5634static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5635{
5636 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5637 switch (kind) {
5638 case RESET_KIND_INIT:
5639 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5640 DRV_STATE_START);
5641 break;
5642
5643 case RESET_KIND_SHUTDOWN:
5644 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5645 DRV_STATE_UNLOAD);
5646 break;
5647
5648 case RESET_KIND_SUSPEND:
5649 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5650 DRV_STATE_SUSPEND);
5651 break;
5652
5653 default:
5654 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07005655 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005656 }
5657}
5658
Michael Chan7a6f4362006-09-27 16:03:31 -07005659static int tg3_poll_fw(struct tg3 *tp)
5660{
5661 int i;
5662 u32 val;
5663
Michael Chanb5d37722006-09-27 16:06:21 -07005664 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Gary Zambrano0ccead12006-11-14 16:34:00 -08005665 /* Wait up to 20ms for init done. */
5666 for (i = 0; i < 200; i++) {
Michael Chanb5d37722006-09-27 16:06:21 -07005667 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5668 return 0;
Gary Zambrano0ccead12006-11-14 16:34:00 -08005669 udelay(100);
Michael Chanb5d37722006-09-27 16:06:21 -07005670 }
5671 return -ENODEV;
5672 }
5673
Michael Chan7a6f4362006-09-27 16:03:31 -07005674 /* Wait for firmware initialization to complete. */
5675 for (i = 0; i < 100000; i++) {
5676 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5677 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5678 break;
5679 udelay(10);
5680 }
5681
5682 /* Chip might not be fitted with firmware. Some Sun onboard
5683 * parts are configured like that. So don't signal the timeout
5684 * of the above loop as an error, but do report the lack of
5685 * running firmware once.
5686 */
5687 if (i >= 100000 &&
5688 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5689 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5690
5691 printk(KERN_INFO PFX "%s: No firmware running.\n",
5692 tp->dev->name);
5693 }
5694
5695 return 0;
5696}
5697
Michael Chanee6a99b2007-07-18 21:49:10 -07005698/* Save PCI command register before chip reset */
5699static void tg3_save_pci_state(struct tg3 *tp)
5700{
Matt Carlson8a6eac92007-10-21 16:17:55 -07005701 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07005702}
5703
5704/* Restore PCI state after chip reset */
5705static void tg3_restore_pci_state(struct tg3 *tp)
5706{
5707 u32 val;
5708
5709 /* Re-enable indirect register accesses. */
5710 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5711 tp->misc_host_ctrl);
5712
5713 /* Set MAX PCI retry to zero. */
5714 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5715 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5716 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5717 val |= PCISTATE_RETRY_SAME_DMA;
Matt Carlson0d3031d2007-10-10 18:02:43 -07005718 /* Allow reads and writes to the APE register and memory space. */
5719 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5720 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5721 PCISTATE_ALLOW_APE_SHMEM_WR;
Michael Chanee6a99b2007-07-18 21:49:10 -07005722 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5723
Matt Carlson8a6eac92007-10-21 16:17:55 -07005724 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07005725
Matt Carlson5f5c51e2007-11-12 21:19:37 -08005726 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5727 pcie_set_readrq(tp->pdev, 4096);
5728 else {
Michael Chan114342f2007-10-15 02:12:26 -07005729 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5730 tp->pci_cacheline_sz);
5731 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5732 tp->pci_lat_timer);
5733 }
Matt Carlson5f5c51e2007-11-12 21:19:37 -08005734
Michael Chanee6a99b2007-07-18 21:49:10 -07005735 /* Make sure PCI-X relaxed ordering bit is clear. */
Matt Carlson9974a352007-10-07 23:27:28 -07005736 if (tp->pcix_cap) {
5737 u16 pcix_cmd;
5738
5739 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5740 &pcix_cmd);
5741 pcix_cmd &= ~PCI_X_CMD_ERO;
5742 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5743 pcix_cmd);
5744 }
Michael Chanee6a99b2007-07-18 21:49:10 -07005745
5746 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chanee6a99b2007-07-18 21:49:10 -07005747
5748 /* Chip reset on 5780 will reset MSI enable bit,
5749 * so need to restore it.
5750 */
5751 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5752 u16 ctrl;
5753
5754 pci_read_config_word(tp->pdev,
5755 tp->msi_cap + PCI_MSI_FLAGS,
5756 &ctrl);
5757 pci_write_config_word(tp->pdev,
5758 tp->msi_cap + PCI_MSI_FLAGS,
5759 ctrl | PCI_MSI_FLAGS_ENABLE);
5760 val = tr32(MSGINT_MODE);
5761 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5762 }
5763 }
5764}
5765
Linus Torvalds1da177e2005-04-16 15:20:36 -07005766static void tg3_stop_fw(struct tg3 *);
5767
5768/* tp->lock is held. */
5769static int tg3_chip_reset(struct tg3 *tp)
5770{
5771 u32 val;
Michael Chan1ee582d2005-08-09 20:16:46 -07005772 void (*write_op)(struct tg3 *, u32, u32);
Michael Chan7a6f4362006-09-27 16:03:31 -07005773 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005774
David S. Millerf49639e2006-06-09 11:58:36 -07005775 tg3_nvram_lock(tp);
5776
Matt Carlson158d7ab2008-05-29 01:37:54 -07005777 tg3_mdio_stop(tp);
5778
Matt Carlson77b483f2008-08-15 14:07:24 -07005779 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5780
David S. Millerf49639e2006-06-09 11:58:36 -07005781 /* No matching tg3_nvram_unlock() after this because
5782 * chip reset below will undo the nvram lock.
5783 */
5784 tp->nvram_lock_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005785
Michael Chanee6a99b2007-07-18 21:49:10 -07005786 /* GRC_MISC_CFG core clock reset will clear the memory
5787 * enable bit in PCI register 4 and the MSI enable bit
5788 * on some chips, so we save relevant registers here.
5789 */
5790 tg3_save_pci_state(tp);
5791
Michael Chand9ab5ad2006-03-20 22:27:35 -08005792 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -08005793 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07005794 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07005795 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -07005796 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5797 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chand9ab5ad2006-03-20 22:27:35 -08005798 tw32(GRC_FASTBOOT_PC, 0);
5799
Linus Torvalds1da177e2005-04-16 15:20:36 -07005800 /*
5801 * We must avoid the readl() that normally takes place.
5802 * It locks machines, causes machine checks, and other
5803 * fun things. So, temporarily disable the 5701
5804 * hardware workaround, while we do the reset.
5805 */
Michael Chan1ee582d2005-08-09 20:16:46 -07005806 write_op = tp->write32;
5807 if (write_op == tg3_write_flush_reg32)
5808 tp->write32 = tg3_write32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005809
Michael Chand18edcb2007-03-24 20:57:11 -07005810 /* Prevent the irq handler from reading or writing PCI registers
5811 * during chip reset when the memory enable bit in the PCI command
5812 * register may be cleared. The chip does not generate interrupt
5813 * at this time, but the irq handler may still be called due to irq
5814 * sharing or irqpoll.
5815 */
5816 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
Michael Chanb8fa2f32007-04-06 17:35:37 -07005817 if (tp->hw_status) {
5818 tp->hw_status->status = 0;
5819 tp->hw_status->status_tag = 0;
5820 }
Michael Chand18edcb2007-03-24 20:57:11 -07005821 tp->last_tag = 0;
5822 smp_mb();
5823 synchronize_irq(tp->pdev->irq);
5824
Linus Torvalds1da177e2005-04-16 15:20:36 -07005825 /* do the reset */
5826 val = GRC_MISC_CFG_CORECLK_RESET;
5827
5828 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5829 if (tr32(0x7e2c) == 0x60) {
5830 tw32(0x7e2c, 0x20);
5831 }
5832 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5833 tw32(GRC_MISC_CFG, (1 << 29));
5834 val |= (1 << 29);
5835 }
5836 }
5837
Michael Chanb5d37722006-09-27 16:06:21 -07005838 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5839 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5840 tw32(GRC_VCPU_EXT_CTRL,
5841 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5842 }
5843
Linus Torvalds1da177e2005-04-16 15:20:36 -07005844 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5845 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5846 tw32(GRC_MISC_CFG, val);
5847
Michael Chan1ee582d2005-08-09 20:16:46 -07005848 /* restore 5701 hardware bug workaround write method */
5849 tp->write32 = write_op;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005850
5851 /* Unfortunately, we have to delay before the PCI read back.
5852 * Some 575X chips even will not respond to a PCI cfg access
5853 * when the reset command is given to the chip.
5854 *
5855 * How do these hardware designers expect things to work
5856 * properly if the PCI write is posted for a long period
5857 * of time? It is always necessary to have some method by
5858 * which a register read back can occur to push the write
5859 * out which does the reset.
5860 *
5861 * For most tg3 variants the trick below was working.
5862 * Ho hum...
5863 */
5864 udelay(120);
5865
5866 /* Flush PCI posted writes. The normal MMIO registers
5867 * are inaccessible at this time so this is the only
5868 * way to make this reliably (actually, this is no longer
5869 * the case, see above). I tried to use indirect
5870 * register read/write but this upset some 5701 variants.
5871 */
5872 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5873
5874 udelay(120);
5875
5876 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5877 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5878 int i;
5879 u32 cfg_val;
5880
5881 /* Wait for link training to complete. */
5882 for (i = 0; i < 5000; i++)
5883 udelay(100);
5884
5885 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5886 pci_write_config_dword(tp->pdev, 0xc4,
5887 cfg_val | (1 << 15));
5888 }
5889 /* Set PCIE max payload size and clear error status. */
5890 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5891 }
5892
Michael Chanee6a99b2007-07-18 21:49:10 -07005893 tg3_restore_pci_state(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005894
Michael Chand18edcb2007-03-24 20:57:11 -07005895 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5896
Michael Chanee6a99b2007-07-18 21:49:10 -07005897 val = 0;
5898 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan4cf78e42005-07-25 12:29:19 -07005899 val = tr32(MEMARB_MODE);
Michael Chanee6a99b2007-07-18 21:49:10 -07005900 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005901
5902 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5903 tg3_stop_fw(tp);
5904 tw32(0x5000, 0x400);
5905 }
5906
5907 tw32(GRC_MODE, tp->grc_mode);
5908
5909 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01005910 val = tr32(0xc4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005911
5912 tw32(0xc4, val | (1 << 15));
5913 }
5914
5915 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5916 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5917 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5918 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5919 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5920 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5921 }
5922
5923 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5924 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5925 tw32_f(MAC_MODE, tp->mac_mode);
Michael Chan747e8f82005-07-25 12:33:22 -07005926 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5927 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5928 tw32_f(MAC_MODE, tp->mac_mode);
Matt Carlson3bda1252008-08-15 14:08:22 -07005929 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
5930 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
5931 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
5932 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
5933 tw32_f(MAC_MODE, tp->mac_mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005934 } else
5935 tw32_f(MAC_MODE, 0);
5936 udelay(40);
5937
Matt Carlson158d7ab2008-05-29 01:37:54 -07005938 tg3_mdio_start(tp);
5939
Matt Carlson77b483f2008-08-15 14:07:24 -07005940 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
5941
Michael Chan7a6f4362006-09-27 16:03:31 -07005942 err = tg3_poll_fw(tp);
5943 if (err)
5944 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005945
5946 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5947 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01005948 val = tr32(0x7c00);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005949
5950 tw32(0x7c00, val | (1 << 25));
5951 }
5952
5953 /* Reprobe ASF enable state. */
5954 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5955 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5956 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5957 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5958 u32 nic_cfg;
5959
5960 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5961 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5962 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
Matt Carlson4ba526c2008-08-15 14:10:04 -07005963 tp->last_event_jiffies = jiffies;
John W. Linvillecbf46852005-04-21 17:01:29 -07005964 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005965 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5966 }
5967 }
5968
5969 return 0;
5970}
5971
5972/* tp->lock is held. */
5973static void tg3_stop_fw(struct tg3 *tp)
5974{
Matt Carlson0d3031d2007-10-10 18:02:43 -07005975 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5976 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Matt Carlson7c5026a2008-05-02 16:49:29 -07005977 /* Wait for RX cpu to ACK the previous event. */
5978 tg3_wait_for_event_ack(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005979
5980 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
Matt Carlson4ba526c2008-08-15 14:10:04 -07005981
5982 tg3_generate_fw_event(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005983
Matt Carlson7c5026a2008-05-02 16:49:29 -07005984 /* Wait for RX cpu to ACK this event. */
5985 tg3_wait_for_event_ack(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005986 }
5987}
5988
5989/* tp->lock is held. */
Michael Chan944d9802005-05-29 14:57:48 -07005990static int tg3_halt(struct tg3 *tp, int kind, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005991{
5992 int err;
5993
5994 tg3_stop_fw(tp);
5995
Michael Chan944d9802005-05-29 14:57:48 -07005996 tg3_write_sig_pre_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005997
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005998 tg3_abort_hw(tp, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005999 err = tg3_chip_reset(tp);
6000
Michael Chan944d9802005-05-29 14:57:48 -07006001 tg3_write_sig_legacy(tp, kind);
6002 tg3_write_sig_post_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006003
6004 if (err)
6005 return err;
6006
6007 return 0;
6008}
6009
6010#define TG3_FW_RELEASE_MAJOR 0x0
6011#define TG3_FW_RELASE_MINOR 0x0
6012#define TG3_FW_RELEASE_FIX 0x0
6013#define TG3_FW_START_ADDR 0x08000000
6014#define TG3_FW_TEXT_ADDR 0x08000000
6015#define TG3_FW_TEXT_LEN 0x9c0
6016#define TG3_FW_RODATA_ADDR 0x080009c0
6017#define TG3_FW_RODATA_LEN 0x60
6018#define TG3_FW_DATA_ADDR 0x08000a40
6019#define TG3_FW_DATA_LEN 0x20
6020#define TG3_FW_SBSS_ADDR 0x08000a60
6021#define TG3_FW_SBSS_LEN 0xc
6022#define TG3_FW_BSS_ADDR 0x08000a70
6023#define TG3_FW_BSS_LEN 0x10
6024
Andreas Mohr50da8592006-08-14 23:54:30 -07006025static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006026 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6027 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6028 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6029 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6030 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6031 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6032 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6033 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6034 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6035 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6036 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6037 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6038 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6039 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6040 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6041 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6042 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6043 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6044 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6045 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6046 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6047 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6048 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6049 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6050 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6051 0, 0, 0, 0, 0, 0,
6052 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6053 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6054 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6055 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6056 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6057 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6058 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6059 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6060 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6061 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6062 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6063 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6064 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6065 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6066 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6067 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6068 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6069 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6070 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6071 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6072 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6073 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6074 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6075 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6076 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6077 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6078 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6079 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6080 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6081 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6082 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6083 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6084 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6085 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6086 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6087 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6088 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6089 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6090 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6091 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6092 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6093 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6094 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6095 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6096 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6097 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6098 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6099 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6100 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6101 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6102 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6103 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6104 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6105 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6106 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6107 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6108 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6109 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6110 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6111 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6112 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6113 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6114 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6115 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6116 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6117};
6118
Andreas Mohr50da8592006-08-14 23:54:30 -07006119static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006120 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6121 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6122 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6123 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6124 0x00000000
6125};
6126
6127#if 0 /* All zeros, don't eat up space with it. */
6128u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6129 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6130 0x00000000, 0x00000000, 0x00000000, 0x00000000
6131};
6132#endif
6133
6134#define RX_CPU_SCRATCH_BASE 0x30000
6135#define RX_CPU_SCRATCH_SIZE 0x04000
6136#define TX_CPU_SCRATCH_BASE 0x34000
6137#define TX_CPU_SCRATCH_SIZE 0x04000
6138
6139/* tp->lock is held. */
6140static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6141{
6142 int i;
6143
Eric Sesterhenn5d9428d2006-04-02 13:52:48 +02006144 BUG_ON(offset == TX_CPU_BASE &&
6145 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006146
Michael Chanb5d37722006-09-27 16:06:21 -07006147 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6148 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6149
6150 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6151 return 0;
6152 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006153 if (offset == RX_CPU_BASE) {
6154 for (i = 0; i < 10000; i++) {
6155 tw32(offset + CPU_STATE, 0xffffffff);
6156 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6157 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6158 break;
6159 }
6160
6161 tw32(offset + CPU_STATE, 0xffffffff);
6162 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
6163 udelay(10);
6164 } else {
6165 for (i = 0; i < 10000; i++) {
6166 tw32(offset + CPU_STATE, 0xffffffff);
6167 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6168 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6169 break;
6170 }
6171 }
6172
6173 if (i >= 10000) {
6174 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6175 "and %s CPU\n",
6176 tp->dev->name,
6177 (offset == RX_CPU_BASE ? "RX" : "TX"));
6178 return -ENODEV;
6179 }
Michael Chanec41c7d2006-01-17 02:40:55 -08006180
6181 /* Clear firmware's nvram arbitration. */
6182 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6183 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006184 return 0;
6185}
6186
6187struct fw_info {
6188 unsigned int text_base;
6189 unsigned int text_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006190 const u32 *text_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006191 unsigned int rodata_base;
6192 unsigned int rodata_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006193 const u32 *rodata_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006194 unsigned int data_base;
6195 unsigned int data_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07006196 const u32 *data_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006197};
6198
6199/* tp->lock is held. */
6200static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6201 int cpu_scratch_size, struct fw_info *info)
6202{
Michael Chanec41c7d2006-01-17 02:40:55 -08006203 int err, lock_err, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006204 void (*write_op)(struct tg3 *, u32, u32);
6205
6206 if (cpu_base == TX_CPU_BASE &&
6207 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6208 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6209 "TX cpu firmware on %s which is 5705.\n",
6210 tp->dev->name);
6211 return -EINVAL;
6212 }
6213
6214 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6215 write_op = tg3_write_mem;
6216 else
6217 write_op = tg3_write_indirect_reg32;
6218
Michael Chan1b628152005-05-29 14:59:49 -07006219 /* It is possible that bootcode is still loading at this point.
6220 * Get the nvram lock first before halting the cpu.
6221 */
Michael Chanec41c7d2006-01-17 02:40:55 -08006222 lock_err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006223 err = tg3_halt_cpu(tp, cpu_base);
Michael Chanec41c7d2006-01-17 02:40:55 -08006224 if (!lock_err)
6225 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006226 if (err)
6227 goto out;
6228
6229 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6230 write_op(tp, cpu_scratch_base + i, 0);
6231 tw32(cpu_base + CPU_STATE, 0xffffffff);
6232 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6233 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6234 write_op(tp, (cpu_scratch_base +
6235 (info->text_base & 0xffff) +
6236 (i * sizeof(u32))),
6237 (info->text_data ?
6238 info->text_data[i] : 0));
6239 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6240 write_op(tp, (cpu_scratch_base +
6241 (info->rodata_base & 0xffff) +
6242 (i * sizeof(u32))),
6243 (info->rodata_data ?
6244 info->rodata_data[i] : 0));
6245 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6246 write_op(tp, (cpu_scratch_base +
6247 (info->data_base & 0xffff) +
6248 (i * sizeof(u32))),
6249 (info->data_data ?
6250 info->data_data[i] : 0));
6251
6252 err = 0;
6253
6254out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006255 return err;
6256}
6257
6258/* tp->lock is held. */
6259static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6260{
6261 struct fw_info info;
6262 int err, i;
6263
6264 info.text_base = TG3_FW_TEXT_ADDR;
6265 info.text_len = TG3_FW_TEXT_LEN;
6266 info.text_data = &tg3FwText[0];
6267 info.rodata_base = TG3_FW_RODATA_ADDR;
6268 info.rodata_len = TG3_FW_RODATA_LEN;
6269 info.rodata_data = &tg3FwRodata[0];
6270 info.data_base = TG3_FW_DATA_ADDR;
6271 info.data_len = TG3_FW_DATA_LEN;
6272 info.data_data = NULL;
6273
6274 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6275 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6276 &info);
6277 if (err)
6278 return err;
6279
6280 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6281 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6282 &info);
6283 if (err)
6284 return err;
6285
6286 /* Now startup only the RX cpu. */
6287 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6288 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6289
6290 for (i = 0; i < 5; i++) {
6291 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6292 break;
6293 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6294 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
6295 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6296 udelay(1000);
6297 }
6298 if (i >= 5) {
6299 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6300 "to set RX CPU PC, is %08x should be %08x\n",
6301 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6302 TG3_FW_TEXT_ADDR);
6303 return -ENODEV;
6304 }
6305 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6306 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
6307
6308 return 0;
6309}
6310
Linus Torvalds1da177e2005-04-16 15:20:36 -07006311
6312#define TG3_TSO_FW_RELEASE_MAJOR 0x1
6313#define TG3_TSO_FW_RELASE_MINOR 0x6
6314#define TG3_TSO_FW_RELEASE_FIX 0x0
6315#define TG3_TSO_FW_START_ADDR 0x08000000
6316#define TG3_TSO_FW_TEXT_ADDR 0x08000000
6317#define TG3_TSO_FW_TEXT_LEN 0x1aa0
6318#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
6319#define TG3_TSO_FW_RODATA_LEN 0x60
6320#define TG3_TSO_FW_DATA_ADDR 0x08001b20
6321#define TG3_TSO_FW_DATA_LEN 0x30
6322#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
6323#define TG3_TSO_FW_SBSS_LEN 0x2c
6324#define TG3_TSO_FW_BSS_ADDR 0x08001b80
6325#define TG3_TSO_FW_BSS_LEN 0x894
6326
Andreas Mohr50da8592006-08-14 23:54:30 -07006327static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006328 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6329 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6330 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6331 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6332 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6333 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6334 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6335 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6336 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6337 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6338 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6339 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6340 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6341 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6342 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6343 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6344 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6345 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6346 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6347 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6348 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6349 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6350 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6351 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6352 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6353 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6354 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6355 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6356 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6357 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6358 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6359 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6360 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6361 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6362 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6363 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6364 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6365 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6366 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6367 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6368 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6369 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6370 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6371 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6372 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6373 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6374 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6375 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6376 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6377 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6378 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6379 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6380 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6381 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6382 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6383 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6384 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6385 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6386 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6387 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6388 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6389 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6390 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6391 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6392 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6393 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6394 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6395 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6396 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6397 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6398 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6399 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6400 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6401 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6402 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6403 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6404 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6405 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6406 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6407 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6408 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6409 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6410 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6411 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6412 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6413 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6414 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6415 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6416 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6417 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6418 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6419 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6420 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6421 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6422 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6423 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6424 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6425 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6426 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6427 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6428 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6429 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6430 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6431 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6432 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6433 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6434 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6435 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6436 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6437 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6438 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6439 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6440 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6441 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6442 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6443 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6444 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6445 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6446 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6447 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6448 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6449 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6450 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6451 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6452 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6453 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6454 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6455 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6456 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6457 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6458 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6459 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6460 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6461 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6462 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6463 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6464 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6465 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6466 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6467 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6468 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6469 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6470 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6471 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6472 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6473 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6474 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6475 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6476 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6477 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6478 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6479 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6480 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6481 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6482 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6483 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6484 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6485 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6486 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6487 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6488 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6489 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6490 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6491 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6492 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6493 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6494 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6495 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6496 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6497 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6498 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6499 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6500 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6501 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6502 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6503 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6504 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6505 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6506 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6507 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6508 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6509 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6510 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6511 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6512 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6513 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6514 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6515 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6516 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6517 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6518 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6519 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6520 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6521 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6522 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6523 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6524 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6525 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6526 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6527 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6528 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6529 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6530 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6531 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6532 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6533 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6534 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6535 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6536 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6537 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6538 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6539 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6540 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6541 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6542 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6543 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6544 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6545 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6546 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6547 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6548 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6549 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6550 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6551 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6552 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6553 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6554 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6555 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6556 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6557 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6558 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6559 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6560 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6561 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6562 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6563 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6564 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6565 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6566 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6567 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6568 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6569 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6570 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6571 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6572 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6573 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6574 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6575 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6576 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6577 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6578 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6579 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6580 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6581 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6582 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6583 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6584 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6585 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6586 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6587 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6588 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6589 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6590 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6591 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6592 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6593 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6594 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6595 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6596 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6597 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6598 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6599 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6600 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6601 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6602 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6603 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6604 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6605 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6606 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6607 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6608 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6609 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6610 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6611 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6612};
6613
Andreas Mohr50da8592006-08-14 23:54:30 -07006614static const u32 tg3TsoFwRodata[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006615 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6616 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6617 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6618 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6619 0x00000000,
6620};
6621
Andreas Mohr50da8592006-08-14 23:54:30 -07006622static const u32 tg3TsoFwData[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006623 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6624 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6625 0x00000000,
6626};
6627
6628/* 5705 needs a special version of the TSO firmware. */
6629#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
6630#define TG3_TSO5_FW_RELASE_MINOR 0x2
6631#define TG3_TSO5_FW_RELEASE_FIX 0x0
6632#define TG3_TSO5_FW_START_ADDR 0x00010000
6633#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
6634#define TG3_TSO5_FW_TEXT_LEN 0xe90
6635#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
6636#define TG3_TSO5_FW_RODATA_LEN 0x50
6637#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
6638#define TG3_TSO5_FW_DATA_LEN 0x20
6639#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
6640#define TG3_TSO5_FW_SBSS_LEN 0x28
6641#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
6642#define TG3_TSO5_FW_BSS_LEN 0x88
6643
Andreas Mohr50da8592006-08-14 23:54:30 -07006644static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006645 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6646 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6647 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6648 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6649 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6650 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6651 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6652 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6653 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6654 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6655 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6656 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6657 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6658 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6659 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6660 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6661 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6662 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6663 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6664 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6665 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6666 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6667 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6668 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6669 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6670 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6671 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6672 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6673 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6674 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6675 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6676 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6677 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6678 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6679 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6680 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6681 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6682 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6683 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6684 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6685 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6686 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6687 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6688 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6689 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6690 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6691 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6692 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6693 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6694 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6695 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6696 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6697 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6698 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6699 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6700 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6701 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6702 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6703 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6704 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6705 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6706 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6707 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6708 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6709 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6710 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6711 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6712 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6713 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6714 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6715 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6716 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6717 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6718 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6719 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6720 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6721 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6722 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6723 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6724 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6725 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6726 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6727 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6728 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6729 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6730 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6731 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6732 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6733 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6734 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6735 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6736 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6737 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6738 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6739 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6740 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6741 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6742 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6743 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6744 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6745 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6746 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6747 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6748 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6749 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6750 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6751 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6752 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6753 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6754 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6755 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6756 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6757 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6758 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6759 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6760 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6761 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6762 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6763 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6764 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6765 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6766 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6767 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6768 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6769 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6770 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6771 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6772 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6773 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6774 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6775 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6776 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6777 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6778 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6779 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6780 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6781 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6782 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6783 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6784 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6785 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6786 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6787 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6788 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6789 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6790 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6791 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6792 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6793 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6794 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6795 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6796 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6797 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6798 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6799 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6800 0x00000000, 0x00000000, 0x00000000,
6801};
6802
Andreas Mohr50da8592006-08-14 23:54:30 -07006803static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006804 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6805 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6806 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6807 0x00000000, 0x00000000, 0x00000000,
6808};
6809
Andreas Mohr50da8592006-08-14 23:54:30 -07006810static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006811 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6812 0x00000000, 0x00000000, 0x00000000,
6813};
6814
6815/* tp->lock is held. */
6816static int tg3_load_tso_firmware(struct tg3 *tp)
6817{
6818 struct fw_info info;
6819 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6820 int err, i;
6821
6822 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6823 return 0;
6824
6825 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6826 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6827 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6828 info.text_data = &tg3Tso5FwText[0];
6829 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6830 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6831 info.rodata_data = &tg3Tso5FwRodata[0];
6832 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6833 info.data_len = TG3_TSO5_FW_DATA_LEN;
6834 info.data_data = &tg3Tso5FwData[0];
6835 cpu_base = RX_CPU_BASE;
6836 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6837 cpu_scratch_size = (info.text_len +
6838 info.rodata_len +
6839 info.data_len +
6840 TG3_TSO5_FW_SBSS_LEN +
6841 TG3_TSO5_FW_BSS_LEN);
6842 } else {
6843 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6844 info.text_len = TG3_TSO_FW_TEXT_LEN;
6845 info.text_data = &tg3TsoFwText[0];
6846 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6847 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6848 info.rodata_data = &tg3TsoFwRodata[0];
6849 info.data_base = TG3_TSO_FW_DATA_ADDR;
6850 info.data_len = TG3_TSO_FW_DATA_LEN;
6851 info.data_data = &tg3TsoFwData[0];
6852 cpu_base = TX_CPU_BASE;
6853 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6854 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6855 }
6856
6857 err = tg3_load_firmware_cpu(tp, cpu_base,
6858 cpu_scratch_base, cpu_scratch_size,
6859 &info);
6860 if (err)
6861 return err;
6862
6863 /* Now startup the cpu. */
6864 tw32(cpu_base + CPU_STATE, 0xffffffff);
6865 tw32_f(cpu_base + CPU_PC, info.text_base);
6866
6867 for (i = 0; i < 5; i++) {
6868 if (tr32(cpu_base + CPU_PC) == info.text_base)
6869 break;
6870 tw32(cpu_base + CPU_STATE, 0xffffffff);
6871 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6872 tw32_f(cpu_base + CPU_PC, info.text_base);
6873 udelay(1000);
6874 }
6875 if (i >= 5) {
6876 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6877 "to set CPU PC, is %08x should be %08x\n",
6878 tp->dev->name, tr32(cpu_base + CPU_PC),
6879 info.text_base);
6880 return -ENODEV;
6881 }
6882 tw32(cpu_base + CPU_STATE, 0xffffffff);
6883 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6884 return 0;
6885}
6886
Linus Torvalds1da177e2005-04-16 15:20:36 -07006887
6888/* tp->lock is held. */
Michael Chan986e0ae2007-05-05 12:10:20 -07006889static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006890{
6891 u32 addr_high, addr_low;
6892 int i;
6893
6894 addr_high = ((tp->dev->dev_addr[0] << 8) |
6895 tp->dev->dev_addr[1]);
6896 addr_low = ((tp->dev->dev_addr[2] << 24) |
6897 (tp->dev->dev_addr[3] << 16) |
6898 (tp->dev->dev_addr[4] << 8) |
6899 (tp->dev->dev_addr[5] << 0));
6900 for (i = 0; i < 4; i++) {
Michael Chan986e0ae2007-05-05 12:10:20 -07006901 if (i == 1 && skip_mac_1)
6902 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006903 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6904 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6905 }
6906
6907 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6908 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6909 for (i = 0; i < 12; i++) {
6910 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6911 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6912 }
6913 }
6914
6915 addr_high = (tp->dev->dev_addr[0] +
6916 tp->dev->dev_addr[1] +
6917 tp->dev->dev_addr[2] +
6918 tp->dev->dev_addr[3] +
6919 tp->dev->dev_addr[4] +
6920 tp->dev->dev_addr[5]) &
6921 TX_BACKOFF_SEED_MASK;
6922 tw32(MAC_TX_BACKOFF_SEED, addr_high);
6923}
6924
6925static int tg3_set_mac_addr(struct net_device *dev, void *p)
6926{
6927 struct tg3 *tp = netdev_priv(dev);
6928 struct sockaddr *addr = p;
Michael Chan986e0ae2007-05-05 12:10:20 -07006929 int err = 0, skip_mac_1 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006930
Michael Chanf9804dd2005-09-27 12:13:10 -07006931 if (!is_valid_ether_addr(addr->sa_data))
6932 return -EINVAL;
6933
Linus Torvalds1da177e2005-04-16 15:20:36 -07006934 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6935
Michael Chane75f7c92006-03-20 21:33:26 -08006936 if (!netif_running(dev))
6937 return 0;
6938
Michael Chan58712ef2006-04-29 18:58:01 -07006939 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
Michael Chan986e0ae2007-05-05 12:10:20 -07006940 u32 addr0_high, addr0_low, addr1_high, addr1_low;
Michael Chan58712ef2006-04-29 18:58:01 -07006941
Michael Chan986e0ae2007-05-05 12:10:20 -07006942 addr0_high = tr32(MAC_ADDR_0_HIGH);
6943 addr0_low = tr32(MAC_ADDR_0_LOW);
6944 addr1_high = tr32(MAC_ADDR_1_HIGH);
6945 addr1_low = tr32(MAC_ADDR_1_LOW);
6946
6947 /* Skip MAC addr 1 if ASF is using it. */
6948 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6949 !(addr1_high == 0 && addr1_low == 0))
6950 skip_mac_1 = 1;
Michael Chan58712ef2006-04-29 18:58:01 -07006951 }
Michael Chan986e0ae2007-05-05 12:10:20 -07006952 spin_lock_bh(&tp->lock);
6953 __tg3_set_mac_addr(tp, skip_mac_1);
6954 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006955
Michael Chanb9ec6c12006-07-25 16:37:27 -07006956 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006957}
6958
6959/* tp->lock is held. */
6960static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6961 dma_addr_t mapping, u32 maxlen_flags,
6962 u32 nic_addr)
6963{
6964 tg3_write_mem(tp,
6965 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6966 ((u64) mapping >> 32));
6967 tg3_write_mem(tp,
6968 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6969 ((u64) mapping & 0xffffffff));
6970 tg3_write_mem(tp,
6971 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6972 maxlen_flags);
6973
6974 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6975 tg3_write_mem(tp,
6976 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6977 nic_addr);
6978}
6979
6980static void __tg3_set_rx_mode(struct net_device *);
Michael Chand244c892005-07-05 14:42:33 -07006981static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
David S. Miller15f98502005-05-18 22:49:26 -07006982{
6983 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6984 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6985 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6986 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6987 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6988 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6989 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6990 }
6991 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6992 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6993 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6994 u32 val = ec->stats_block_coalesce_usecs;
6995
6996 if (!netif_carrier_ok(tp->dev))
6997 val = 0;
6998
6999 tw32(HOSTCC_STAT_COAL_TICKS, val);
7000 }
7001}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007002
7003/* tp->lock is held. */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007004static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007005{
7006 u32 val, rdmac_mode;
7007 int i, err, limit;
7008
7009 tg3_disable_ints(tp);
7010
7011 tg3_stop_fw(tp);
7012
7013 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7014
7015 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
Michael Chane6de8ad2005-05-05 14:42:41 -07007016 tg3_abort_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007017 }
7018
Matt Carlsondd477002008-05-25 23:45:58 -07007019 if (reset_phy &&
7020 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
Michael Chand4d2c552006-03-20 17:47:20 -08007021 tg3_phy_reset(tp);
7022
Linus Torvalds1da177e2005-04-16 15:20:36 -07007023 err = tg3_chip_reset(tp);
7024 if (err)
7025 return err;
7026
7027 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7028
Matt Carlsonb5af7122007-11-12 21:22:02 -08007029 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
7030 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07007031 val = tr32(TG3_CPMU_CTRL);
7032 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7033 tw32(TG3_CPMU_CTRL, val);
Matt Carlson9acb9612007-11-12 21:10:06 -08007034
7035 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7036 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7037 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7038 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7039
7040 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7041 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7042 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7043 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7044
7045 val = tr32(TG3_CPMU_HST_ACC);
7046 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7047 val |= CPMU_HST_ACC_MACCLK_6_25;
7048 tw32(TG3_CPMU_HST_ACC, val);
Matt Carlsond30cdd22007-10-07 23:28:35 -07007049 }
7050
Linus Torvalds1da177e2005-04-16 15:20:36 -07007051 /* This works around an issue with Athlon chipsets on
7052 * B3 tigon3 silicon. This bit has no effect on any
7053 * other revision. But do not set this on PCI Express
Matt Carlson795d01c2007-10-07 23:28:17 -07007054 * chips and don't even touch the clocks if the CPMU is present.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007055 */
Matt Carlson795d01c2007-10-07 23:28:17 -07007056 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7057 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7058 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7059 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7060 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007061
7062 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7063 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7064 val = tr32(TG3PCI_PCISTATE);
7065 val |= PCISTATE_RETRY_SAME_DMA;
7066 tw32(TG3PCI_PCISTATE, val);
7067 }
7068
Matt Carlson0d3031d2007-10-10 18:02:43 -07007069 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7070 /* Allow reads and writes to the
7071 * APE register and memory space.
7072 */
7073 val = tr32(TG3PCI_PCISTATE);
7074 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7075 PCISTATE_ALLOW_APE_SHMEM_WR;
7076 tw32(TG3PCI_PCISTATE, val);
7077 }
7078
Linus Torvalds1da177e2005-04-16 15:20:36 -07007079 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7080 /* Enable some hw fixes. */
7081 val = tr32(TG3PCI_MSI_DATA);
7082 val |= (1 << 26) | (1 << 28) | (1 << 29);
7083 tw32(TG3PCI_MSI_DATA, val);
7084 }
7085
7086 /* Descriptor ring init may make accesses to the
7087 * NIC SRAM area to setup the TX descriptors, so we
7088 * can only do this after the hardware has been
7089 * successfully reset.
7090 */
Michael Chan32d8c572006-07-25 16:38:29 -07007091 err = tg3_init_rings(tp);
7092 if (err)
7093 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007094
Matt Carlson9936bcf2007-10-10 18:03:07 -07007095 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
Matt Carlson57e69832008-05-25 23:48:31 -07007096 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
7097 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07007098 /* This value is determined during the probe time DMA
7099 * engine test, tg3_test_dma.
7100 */
7101 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7102 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007103
7104 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7105 GRC_MODE_4X_NIC_SEND_RINGS |
7106 GRC_MODE_NO_TX_PHDR_CSUM |
7107 GRC_MODE_NO_RX_PHDR_CSUM);
7108 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
Michael Chand2d746f2006-04-06 21:45:39 -07007109
7110 /* Pseudo-header checksum is done by hardware logic and not
7111 * the offload processers, so make the chip do the pseudo-
7112 * header checksums on receive. For transmit it is more
7113 * convenient to do the pseudo-header checksum in software
7114 * as Linux does that on transmit for us in all cases.
7115 */
7116 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007117
7118 tw32(GRC_MODE,
7119 tp->grc_mode |
7120 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7121
7122 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7123 val = tr32(GRC_MISC_CFG);
7124 val &= ~0xff;
7125 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7126 tw32(GRC_MISC_CFG, val);
7127
7128 /* Initialize MBUF/DESC pool. */
John W. Linvillecbf46852005-04-21 17:01:29 -07007129 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007130 /* Do nothing. */
7131 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7132 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7133 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7134 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7135 else
7136 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7137 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7138 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7139 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007140 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7141 int fw_len;
7142
7143 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7144 TG3_TSO5_FW_RODATA_LEN +
7145 TG3_TSO5_FW_DATA_LEN +
7146 TG3_TSO5_FW_SBSS_LEN +
7147 TG3_TSO5_FW_BSS_LEN);
7148 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7149 tw32(BUFMGR_MB_POOL_ADDR,
7150 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7151 tw32(BUFMGR_MB_POOL_SIZE,
7152 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7153 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007154
Michael Chan0f893dc2005-07-25 12:30:38 -07007155 if (tp->dev->mtu <= ETH_DATA_LEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007156 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7157 tp->bufmgr_config.mbuf_read_dma_low_water);
7158 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7159 tp->bufmgr_config.mbuf_mac_rx_low_water);
7160 tw32(BUFMGR_MB_HIGH_WATER,
7161 tp->bufmgr_config.mbuf_high_water);
7162 } else {
7163 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7164 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7165 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7166 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7167 tw32(BUFMGR_MB_HIGH_WATER,
7168 tp->bufmgr_config.mbuf_high_water_jumbo);
7169 }
7170 tw32(BUFMGR_DMA_LOW_WATER,
7171 tp->bufmgr_config.dma_low_water);
7172 tw32(BUFMGR_DMA_HIGH_WATER,
7173 tp->bufmgr_config.dma_high_water);
7174
7175 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7176 for (i = 0; i < 2000; i++) {
7177 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7178 break;
7179 udelay(10);
7180 }
7181 if (i >= 2000) {
7182 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7183 tp->dev->name);
7184 return -ENODEV;
7185 }
7186
7187 /* Setup replenish threshold. */
Michael Chanf92905d2006-06-29 20:14:29 -07007188 val = tp->rx_pending / 8;
7189 if (val == 0)
7190 val = 1;
7191 else if (val > tp->rx_std_max_post)
7192 val = tp->rx_std_max_post;
Michael Chanb5d37722006-09-27 16:06:21 -07007193 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7194 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7195 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7196
7197 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7198 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7199 }
Michael Chanf92905d2006-06-29 20:14:29 -07007200
7201 tw32(RCVBDI_STD_THRESH, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007202
7203 /* Initialize TG3_BDINFO's at:
7204 * RCVDBDI_STD_BD: standard eth size rx ring
7205 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7206 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7207 *
7208 * like so:
7209 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7210 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7211 * ring attribute flags
7212 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7213 *
7214 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7215 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7216 *
7217 * The size of each ring is fixed in the firmware, but the location is
7218 * configurable.
7219 */
7220 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7221 ((u64) tp->rx_std_mapping >> 32));
7222 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7223 ((u64) tp->rx_std_mapping & 0xffffffff));
7224 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7225 NIC_SRAM_RX_BUFFER_DESC);
7226
7227 /* Don't even try to program the JUMBO/MINI buffer descriptor
7228 * configs on 5705.
7229 */
7230 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7231 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7232 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7233 } else {
7234 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7235 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7236
7237 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7238 BDINFO_FLAGS_DISABLED);
7239
7240 /* Setup replenish threshold. */
7241 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7242
Michael Chan0f893dc2005-07-25 12:30:38 -07007243 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007244 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7245 ((u64) tp->rx_jumbo_mapping >> 32));
7246 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7247 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7248 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7249 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7250 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7251 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7252 } else {
7253 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7254 BDINFO_FLAGS_DISABLED);
7255 }
7256
7257 }
7258
7259 /* There is only one send ring on 5705/5750, no need to explicitly
7260 * disable the others.
7261 */
7262 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7263 /* Clear out send RCB ring in SRAM. */
7264 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7265 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7266 BDINFO_FLAGS_DISABLED);
7267 }
7268
7269 tp->tx_prod = 0;
7270 tp->tx_cons = 0;
7271 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7272 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7273
7274 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7275 tp->tx_desc_mapping,
7276 (TG3_TX_RING_SIZE <<
7277 BDINFO_FLAGS_MAXLEN_SHIFT),
7278 NIC_SRAM_TX_BUFFER_DESC);
7279
7280 /* There is only one receive return ring on 5705/5750, no need
7281 * to explicitly disable the others.
7282 */
7283 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7284 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7285 i += TG3_BDINFO_SIZE) {
7286 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7287 BDINFO_FLAGS_DISABLED);
7288 }
7289 }
7290
7291 tp->rx_rcb_ptr = 0;
7292 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7293
7294 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7295 tp->rx_rcb_mapping,
7296 (TG3_RX_RCB_RING_SIZE(tp) <<
7297 BDINFO_FLAGS_MAXLEN_SHIFT),
7298 0);
7299
7300 tp->rx_std_ptr = tp->rx_pending;
7301 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7302 tp->rx_std_ptr);
7303
Michael Chan0f893dc2005-07-25 12:30:38 -07007304 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
Linus Torvalds1da177e2005-04-16 15:20:36 -07007305 tp->rx_jumbo_pending : 0;
7306 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7307 tp->rx_jumbo_ptr);
7308
7309 /* Initialize MAC address and backoff seed. */
Michael Chan986e0ae2007-05-05 12:10:20 -07007310 __tg3_set_mac_addr(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007311
7312 /* MTU + ethernet header + FCS + optional VLAN tag */
7313 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7314
7315 /* The slot time is changed by tg3_setup_phy if we
7316 * run at gigabit with half duplex.
7317 */
7318 tw32(MAC_TX_LENGTHS,
7319 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7320 (6 << TX_LENGTHS_IPG_SHIFT) |
7321 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7322
7323 /* Receive rules. */
7324 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7325 tw32(RCVLPC_CONFIG, 0x0181);
7326
7327 /* Calculate RDMAC_MODE setting early, we need it to determine
7328 * the RCVLPC_STATE_ENABLE mask.
7329 */
7330 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7331 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7332 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7333 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7334 RDMAC_MODE_LNGREAD_ENAB);
Michael Chan85e94ce2005-04-21 17:05:28 -07007335
Matt Carlson57e69832008-05-25 23:48:31 -07007336 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7337 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsond30cdd22007-10-07 23:28:35 -07007338 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7339 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7340 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7341
Michael Chan85e94ce2005-04-21 17:05:28 -07007342 /* If statement applies to 5705 and 5750 PCI devices only */
7343 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7344 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7345 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007346 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
Matt Carlsonc13e3712007-05-05 11:50:04 -07007347 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007348 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7349 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7350 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7351 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7352 }
7353 }
7354
Michael Chan85e94ce2005-04-21 17:05:28 -07007355 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7356 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7357
Linus Torvalds1da177e2005-04-16 15:20:36 -07007358 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7359 rdmac_mode |= (1 << 27);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007360
7361 /* Receive/send statistics. */
Michael Chan16613942006-06-29 20:15:13 -07007362 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7363 val = tr32(RCVLPC_STATS_ENABLE);
7364 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7365 tw32(RCVLPC_STATS_ENABLE, val);
7366 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7367 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007368 val = tr32(RCVLPC_STATS_ENABLE);
7369 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7370 tw32(RCVLPC_STATS_ENABLE, val);
7371 } else {
7372 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7373 }
7374 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7375 tw32(SNDDATAI_STATSENAB, 0xffffff);
7376 tw32(SNDDATAI_STATSCTRL,
7377 (SNDDATAI_SCTRL_ENABLE |
7378 SNDDATAI_SCTRL_FASTUPD));
7379
7380 /* Setup host coalescing engine. */
7381 tw32(HOSTCC_MODE, 0);
7382 for (i = 0; i < 2000; i++) {
7383 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7384 break;
7385 udelay(10);
7386 }
7387
Michael Chand244c892005-07-05 14:42:33 -07007388 __tg3_set_coalesce(tp, &tp->coal);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007389
7390 /* set status block DMA address */
7391 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7392 ((u64) tp->status_mapping >> 32));
7393 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7394 ((u64) tp->status_mapping & 0xffffffff));
7395
7396 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7397 /* Status/statistics block address. See tg3_timer,
7398 * the tg3_periodic_fetch_stats call there, and
7399 * tg3_get_stats to see how this works for 5705/5750 chips.
7400 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007401 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7402 ((u64) tp->stats_mapping >> 32));
7403 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7404 ((u64) tp->stats_mapping & 0xffffffff));
7405 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7406 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7407 }
7408
7409 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7410
7411 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7412 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7413 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7414 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7415
7416 /* Clear statistics/status block in chip, and status block in ram. */
7417 for (i = NIC_SRAM_STATS_BLK;
7418 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7419 i += sizeof(u32)) {
7420 tg3_write_mem(tp, i, 0);
7421 udelay(40);
7422 }
7423 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7424
Michael Chanc94e3942005-09-27 12:12:42 -07007425 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7426 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7427 /* reset to prevent losing 1st rx packet intermittently */
7428 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7429 udelay(10);
7430 }
7431
Matt Carlson3bda1252008-08-15 14:08:22 -07007432 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7433 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7434 else
7435 tp->mac_mode = 0;
7436 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
Linus Torvalds1da177e2005-04-16 15:20:36 -07007437 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07007438 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7439 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7440 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7441 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007442 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7443 udelay(40);
7444
Michael Chan314fba32005-04-21 17:07:04 -07007445 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
Michael Chan9d26e212006-12-07 00:21:14 -08007446 * If TG3_FLG2_IS_NIC is zero, we should read the
Michael Chan314fba32005-04-21 17:07:04 -07007447 * register to preserve the GPIO settings for LOMs. The GPIOs,
7448 * whether used as inputs or outputs, are set by boot code after
7449 * reset.
7450 */
Michael Chan9d26e212006-12-07 00:21:14 -08007451 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
Michael Chan314fba32005-04-21 17:07:04 -07007452 u32 gpio_mask;
7453
Michael Chan9d26e212006-12-07 00:21:14 -08007454 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7455 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7456 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chan3e7d83b2005-04-21 17:10:36 -07007457
7458 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7459 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7460 GRC_LCLCTRL_GPIO_OUTPUT3;
7461
Michael Chanaf36e6b2006-03-23 01:28:06 -08007462 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7463 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7464
Gary Zambranoaaf84462007-05-05 11:51:45 -07007465 tp->grc_local_ctrl &= ~gpio_mask;
Michael Chan314fba32005-04-21 17:07:04 -07007466 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7467
7468 /* GPIO1 must be driven high for eeprom write protect */
Michael Chan9d26e212006-12-07 00:21:14 -08007469 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7470 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7471 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan314fba32005-04-21 17:07:04 -07007472 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007473 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7474 udelay(100);
7475
Michael Chan09ee9292005-08-09 20:17:00 -07007476 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
David S. Millerfac9b832005-05-18 22:46:34 -07007477 tp->last_tag = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007478
7479 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7480 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7481 udelay(40);
7482 }
7483
7484 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7485 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7486 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7487 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7488 WDMAC_MODE_LNGREAD_ENAB);
7489
Michael Chan85e94ce2005-04-21 17:05:28 -07007490 /* If statement applies to 5705 and 5750 PCI devices only */
7491 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7492 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7493 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007494 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7495 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7496 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7497 /* nothing */
7498 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7499 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7500 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7501 val |= WDMAC_MODE_RX_ACCEL;
7502 }
7503 }
7504
Michael Chand9ab5ad2006-03-20 22:27:35 -08007505 /* Enable host coalescing bug fix */
Michael Chanaf36e6b2006-03-23 01:28:06 -08007506 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07007507 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07007508 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
Matt Carlson57e69832008-05-25 23:48:31 -07007509 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7510 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
Matt Carlsonf51f3562008-05-25 23:45:08 -07007511 val |= WDMAC_MODE_STATUS_TAG_FIX;
Michael Chand9ab5ad2006-03-20 22:27:35 -08007512
Linus Torvalds1da177e2005-04-16 15:20:36 -07007513 tw32_f(WDMAC_MODE, val);
7514 udelay(40);
7515
Matt Carlson9974a352007-10-07 23:27:28 -07007516 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7517 u16 pcix_cmd;
7518
7519 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7520 &pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007521 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
Matt Carlson9974a352007-10-07 23:27:28 -07007522 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7523 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007524 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
Matt Carlson9974a352007-10-07 23:27:28 -07007525 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7526 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007527 }
Matt Carlson9974a352007-10-07 23:27:28 -07007528 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7529 pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007530 }
7531
7532 tw32_f(RDMAC_MODE, rdmac_mode);
7533 udelay(40);
7534
7535 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7536 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7537 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
Matt Carlson9936bcf2007-10-10 18:03:07 -07007538
7539 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7540 tw32(SNDDATAC_MODE,
7541 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7542 else
7543 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7544
Linus Torvalds1da177e2005-04-16 15:20:36 -07007545 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7546 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7547 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7548 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007549 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7550 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007551 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7552 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7553
7554 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7555 err = tg3_load_5701_a0_firmware_fix(tp);
7556 if (err)
7557 return err;
7558 }
7559
Linus Torvalds1da177e2005-04-16 15:20:36 -07007560 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7561 err = tg3_load_tso_firmware(tp);
7562 if (err)
7563 return err;
7564 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007565
7566 tp->tx_mode = TX_MODE_ENABLE;
7567 tw32_f(MAC_TX_MODE, tp->tx_mode);
7568 udelay(100);
7569
7570 tp->rx_mode = RX_MODE_ENABLE;
Matt Carlson9936bcf2007-10-10 18:03:07 -07007571 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlson57e69832008-05-25 23:48:31 -07007572 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7573 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7574 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chanaf36e6b2006-03-23 01:28:06 -08007575 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7576
Linus Torvalds1da177e2005-04-16 15:20:36 -07007577 tw32_f(MAC_RX_MODE, tp->rx_mode);
7578 udelay(10);
7579
Linus Torvalds1da177e2005-04-16 15:20:36 -07007580 tw32(MAC_LED_CTRL, tp->led_ctrl);
7581
7582 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
Michael Chanc94e3942005-09-27 12:12:42 -07007583 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007584 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7585 udelay(10);
7586 }
7587 tw32_f(MAC_RX_MODE, tp->rx_mode);
7588 udelay(10);
7589
7590 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7591 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7592 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7593 /* Set drive transmission level to 1.2V */
7594 /* only if the signal pre-emphasis bit is not set */
7595 val = tr32(MAC_SERDES_CFG);
7596 val &= 0xfffff000;
7597 val |= 0x880;
7598 tw32(MAC_SERDES_CFG, val);
7599 }
7600 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7601 tw32(MAC_SERDES_CFG, 0x616000);
7602 }
7603
7604 /* Prevent chip from dropping frames when flow control
7605 * is enabled.
7606 */
7607 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7608
7609 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7610 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7611 /* Use hardware link auto-negotiation */
7612 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7613 }
7614
Michael Chand4d2c552006-03-20 17:47:20 -08007615 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7616 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7617 u32 tmp;
7618
7619 tmp = tr32(SERDES_RX_CTRL);
7620 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7621 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7622 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7623 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7624 }
7625
Matt Carlsondd477002008-05-25 23:45:58 -07007626 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7627 if (tp->link_config.phy_is_low_power) {
7628 tp->link_config.phy_is_low_power = 0;
7629 tp->link_config.speed = tp->link_config.orig_speed;
7630 tp->link_config.duplex = tp->link_config.orig_duplex;
7631 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7632 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007633
Matt Carlsondd477002008-05-25 23:45:58 -07007634 err = tg3_setup_phy(tp, 0);
7635 if (err)
7636 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007637
Matt Carlsondd477002008-05-25 23:45:58 -07007638 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7639 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7640 u32 tmp;
7641
7642 /* Clear CRC stats. */
7643 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7644 tg3_writephy(tp, MII_TG3_TEST1,
7645 tmp | MII_TG3_TEST1_CRC_EN);
7646 tg3_readphy(tp, 0x14, &tmp);
7647 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007648 }
7649 }
7650
7651 __tg3_set_rx_mode(tp->dev);
7652
7653 /* Initialize receive rules. */
7654 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7655 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7656 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7657 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7658
Michael Chan4cf78e42005-07-25 12:29:19 -07007659 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
Michael Chana4e2b342005-10-26 15:46:52 -07007660 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007661 limit = 8;
7662 else
7663 limit = 16;
7664 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7665 limit -= 4;
7666 switch (limit) {
7667 case 16:
7668 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7669 case 15:
7670 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7671 case 14:
7672 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7673 case 13:
7674 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7675 case 12:
7676 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7677 case 11:
7678 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7679 case 10:
7680 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7681 case 9:
7682 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7683 case 8:
7684 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7685 case 7:
7686 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7687 case 6:
7688 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7689 case 5:
7690 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7691 case 4:
7692 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7693 case 3:
7694 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7695 case 2:
7696 case 1:
7697
7698 default:
7699 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -07007700 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007701
Matt Carlson9ce768e2007-10-11 19:49:11 -07007702 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7703 /* Write our heartbeat update interval to APE. */
7704 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7705 APE_HOST_HEARTBEAT_INT_DISABLE);
Matt Carlson0d3031d2007-10-10 18:02:43 -07007706
Linus Torvalds1da177e2005-04-16 15:20:36 -07007707 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7708
Linus Torvalds1da177e2005-04-16 15:20:36 -07007709 return 0;
7710}
7711
7712/* Called at device open time to get the chip ready for
7713 * packet processing. Invoked with tp->lock held.
7714 */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007715static int tg3_init_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007716{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007717 tg3_switch_clocks(tp);
7718
7719 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7720
Matt Carlson2f751b62008-08-04 23:17:34 -07007721 return tg3_reset_hw(tp, reset_phy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007722}
7723
7724#define TG3_STAT_ADD32(PSTAT, REG) \
7725do { u32 __val = tr32(REG); \
7726 (PSTAT)->low += __val; \
7727 if ((PSTAT)->low < __val) \
7728 (PSTAT)->high += 1; \
7729} while (0)
7730
7731static void tg3_periodic_fetch_stats(struct tg3 *tp)
7732{
7733 struct tg3_hw_stats *sp = tp->hw_stats;
7734
7735 if (!netif_carrier_ok(tp->dev))
7736 return;
7737
7738 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7739 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7740 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7741 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7742 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7743 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7744 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7745 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7746 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7747 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7748 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7749 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7750 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7751
7752 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7753 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7754 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7755 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7756 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7757 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7758 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7759 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7760 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7761 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7762 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7763 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7764 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7765 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
Michael Chan463d3052006-05-22 16:36:27 -07007766
7767 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7768 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7769 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007770}
7771
7772static void tg3_timer(unsigned long __opaque)
7773{
7774 struct tg3 *tp = (struct tg3 *) __opaque;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007775
Michael Chanf475f162006-03-27 23:20:14 -08007776 if (tp->irq_sync)
7777 goto restart_timer;
7778
David S. Millerf47c11e2005-06-24 20:18:35 -07007779 spin_lock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007780
David S. Millerfac9b832005-05-18 22:46:34 -07007781 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7782 /* All of this garbage is because when using non-tagged
7783 * IRQ status the mailbox/status_block protocol the chip
7784 * uses with the cpu is race prone.
7785 */
7786 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7787 tw32(GRC_LOCAL_CTRL,
7788 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7789 } else {
7790 tw32(HOSTCC_MODE, tp->coalesce_mode |
7791 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7792 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007793
David S. Millerfac9b832005-05-18 22:46:34 -07007794 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7795 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
David S. Millerf47c11e2005-06-24 20:18:35 -07007796 spin_unlock(&tp->lock);
David S. Millerfac9b832005-05-18 22:46:34 -07007797 schedule_work(&tp->reset_task);
7798 return;
7799 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007800 }
7801
Linus Torvalds1da177e2005-04-16 15:20:36 -07007802 /* This part only runs once per second. */
7803 if (!--tp->timer_counter) {
David S. Millerfac9b832005-05-18 22:46:34 -07007804 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7805 tg3_periodic_fetch_stats(tp);
7806
Linus Torvalds1da177e2005-04-16 15:20:36 -07007807 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7808 u32 mac_stat;
7809 int phy_event;
7810
7811 mac_stat = tr32(MAC_STATUS);
7812
7813 phy_event = 0;
7814 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7815 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7816 phy_event = 1;
7817 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7818 phy_event = 1;
7819
7820 if (phy_event)
7821 tg3_setup_phy(tp, 0);
7822 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7823 u32 mac_stat = tr32(MAC_STATUS);
7824 int need_setup = 0;
7825
7826 if (netif_carrier_ok(tp->dev) &&
7827 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7828 need_setup = 1;
7829 }
7830 if (! netif_carrier_ok(tp->dev) &&
7831 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7832 MAC_STATUS_SIGNAL_DET))) {
7833 need_setup = 1;
7834 }
7835 if (need_setup) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07007836 if (!tp->serdes_counter) {
7837 tw32_f(MAC_MODE,
7838 (tp->mac_mode &
7839 ~MAC_MODE_PORT_MODE_MASK));
7840 udelay(40);
7841 tw32_f(MAC_MODE, tp->mac_mode);
7842 udelay(40);
7843 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007844 tg3_setup_phy(tp, 0);
7845 }
Michael Chan747e8f82005-07-25 12:33:22 -07007846 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7847 tg3_serdes_parallel_detect(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007848
7849 tp->timer_counter = tp->timer_multiplier;
7850 }
7851
Michael Chan130b8e42006-09-27 16:00:40 -07007852 /* Heartbeat is only sent once every 2 seconds.
7853 *
7854 * The heartbeat is to tell the ASF firmware that the host
7855 * driver is still alive. In the event that the OS crashes,
7856 * ASF needs to reset the hardware to free up the FIFO space
7857 * that may be filled with rx packets destined for the host.
7858 * If the FIFO is full, ASF will no longer function properly.
7859 *
7860 * Unintended resets have been reported on real time kernels
7861 * where the timer doesn't run on time. Netpoll will also have
7862 * same problem.
7863 *
7864 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7865 * to check the ring condition when the heartbeat is expiring
7866 * before doing the reset. This will prevent most unintended
7867 * resets.
7868 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007869 if (!--tp->asf_counter) {
Matt Carlsonbc7959b2008-08-15 14:08:55 -07007870 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7871 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Matt Carlson7c5026a2008-05-02 16:49:29 -07007872 tg3_wait_for_event_ack(tp);
7873
Michael Chanbbadf502006-04-06 21:46:34 -07007874 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
Michael Chan130b8e42006-09-27 16:00:40 -07007875 FWCMD_NICDRV_ALIVE3);
Michael Chanbbadf502006-04-06 21:46:34 -07007876 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
Michael Chan28fbef72005-10-26 15:48:35 -07007877 /* 5 seconds timeout */
Michael Chanbbadf502006-04-06 21:46:34 -07007878 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
Matt Carlson4ba526c2008-08-15 14:10:04 -07007879
7880 tg3_generate_fw_event(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007881 }
7882 tp->asf_counter = tp->asf_multiplier;
7883 }
7884
David S. Millerf47c11e2005-06-24 20:18:35 -07007885 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007886
Michael Chanf475f162006-03-27 23:20:14 -08007887restart_timer:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007888 tp->timer.expires = jiffies + tp->timer_offset;
7889 add_timer(&tp->timer);
7890}
7891
Adrian Bunk81789ef2006-03-20 23:00:14 -08007892static int tg3_request_irq(struct tg3 *tp)
Michael Chanfcfa0a32006-03-20 22:28:41 -08007893{
David Howells7d12e782006-10-05 14:55:46 +01007894 irq_handler_t fn;
Michael Chanfcfa0a32006-03-20 22:28:41 -08007895 unsigned long flags;
7896 struct net_device *dev = tp->dev;
7897
7898 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7899 fn = tg3_msi;
7900 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7901 fn = tg3_msi_1shot;
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07007902 flags = IRQF_SAMPLE_RANDOM;
Michael Chanfcfa0a32006-03-20 22:28:41 -08007903 } else {
7904 fn = tg3_interrupt;
7905 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7906 fn = tg3_interrupt_tagged;
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07007907 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
Michael Chanfcfa0a32006-03-20 22:28:41 -08007908 }
7909 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7910}
7911
Michael Chan79381092005-04-21 17:13:59 -07007912static int tg3_test_interrupt(struct tg3 *tp)
7913{
7914 struct net_device *dev = tp->dev;
Michael Chanb16250e2006-09-27 16:10:14 -07007915 int err, i, intr_ok = 0;
Michael Chan79381092005-04-21 17:13:59 -07007916
Michael Chand4bc3922005-05-29 14:59:20 -07007917 if (!netif_running(dev))
7918 return -ENODEV;
7919
Michael Chan79381092005-04-21 17:13:59 -07007920 tg3_disable_ints(tp);
7921
7922 free_irq(tp->pdev->irq, dev);
7923
7924 err = request_irq(tp->pdev->irq, tg3_test_isr,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07007925 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
Michael Chan79381092005-04-21 17:13:59 -07007926 if (err)
7927 return err;
7928
Michael Chan38f38432005-09-05 17:53:32 -07007929 tp->hw_status->status &= ~SD_STATUS_UPDATED;
Michael Chan79381092005-04-21 17:13:59 -07007930 tg3_enable_ints(tp);
7931
7932 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7933 HOSTCC_MODE_NOW);
7934
7935 for (i = 0; i < 5; i++) {
Michael Chanb16250e2006-09-27 16:10:14 -07007936 u32 int_mbox, misc_host_ctrl;
7937
Michael Chan09ee9292005-08-09 20:17:00 -07007938 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7939 TG3_64BIT_REG_LOW);
Michael Chanb16250e2006-09-27 16:10:14 -07007940 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7941
7942 if ((int_mbox != 0) ||
7943 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7944 intr_ok = 1;
Michael Chan79381092005-04-21 17:13:59 -07007945 break;
Michael Chanb16250e2006-09-27 16:10:14 -07007946 }
7947
Michael Chan79381092005-04-21 17:13:59 -07007948 msleep(10);
7949 }
7950
7951 tg3_disable_ints(tp);
7952
7953 free_irq(tp->pdev->irq, dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04007954
Michael Chanfcfa0a32006-03-20 22:28:41 -08007955 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07007956
7957 if (err)
7958 return err;
7959
Michael Chanb16250e2006-09-27 16:10:14 -07007960 if (intr_ok)
Michael Chan79381092005-04-21 17:13:59 -07007961 return 0;
7962
7963 return -EIO;
7964}
7965
7966/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7967 * successfully restored
7968 */
7969static int tg3_test_msi(struct tg3 *tp)
7970{
7971 struct net_device *dev = tp->dev;
7972 int err;
7973 u16 pci_cmd;
7974
7975 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7976 return 0;
7977
7978 /* Turn off SERR reporting in case MSI terminates with Master
7979 * Abort.
7980 */
7981 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7982 pci_write_config_word(tp->pdev, PCI_COMMAND,
7983 pci_cmd & ~PCI_COMMAND_SERR);
7984
7985 err = tg3_test_interrupt(tp);
7986
7987 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7988
7989 if (!err)
7990 return 0;
7991
7992 /* other failures */
7993 if (err != -EIO)
7994 return err;
7995
7996 /* MSI test failed, go back to INTx mode */
7997 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7998 "switching to INTx mode. Please report this failure to "
7999 "the PCI maintainer and include system chipset information.\n",
8000 tp->dev->name);
8001
8002 free_irq(tp->pdev->irq, dev);
8003 pci_disable_msi(tp->pdev);
8004
8005 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8006
Michael Chanfcfa0a32006-03-20 22:28:41 -08008007 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07008008 if (err)
8009 return err;
8010
8011 /* Need to reset the chip because the MSI cycle may have terminated
8012 * with Master Abort.
8013 */
David S. Millerf47c11e2005-06-24 20:18:35 -07008014 tg3_full_lock(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07008015
Michael Chan944d9802005-05-29 14:57:48 -07008016 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07008017 err = tg3_init_hw(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07008018
David S. Millerf47c11e2005-06-24 20:18:35 -07008019 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07008020
8021 if (err)
8022 free_irq(tp->pdev->irq, dev);
8023
8024 return err;
8025}
8026
Linus Torvalds1da177e2005-04-16 15:20:36 -07008027static int tg3_open(struct net_device *dev)
8028{
8029 struct tg3 *tp = netdev_priv(dev);
8030 int err;
8031
Michael Chanc49a1562006-12-17 17:07:29 -08008032 netif_carrier_off(tp->dev);
8033
Michael Chanbc1c7562006-03-20 17:48:03 -08008034 err = tg3_set_power_state(tp, PCI_D0);
Matt Carlson2f751b62008-08-04 23:17:34 -07008035 if (err)
Michael Chanbc1c7562006-03-20 17:48:03 -08008036 return err;
Matt Carlson2f751b62008-08-04 23:17:34 -07008037
8038 tg3_full_lock(tp, 0);
Michael Chanbc1c7562006-03-20 17:48:03 -08008039
Linus Torvalds1da177e2005-04-16 15:20:36 -07008040 tg3_disable_ints(tp);
8041 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8042
David S. Millerf47c11e2005-06-24 20:18:35 -07008043 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008044
8045 /* The placement of this call is tied
8046 * to the setup and use of Host TX descriptors.
8047 */
8048 err = tg3_alloc_consistent(tp);
8049 if (err)
8050 return err;
8051
Michael Chan7544b092007-05-05 13:08:32 -07008052 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
David S. Millerfac9b832005-05-18 22:46:34 -07008053 /* All MSI supporting chips should support tagged
8054 * status. Assert that this is the case.
8055 */
8056 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8057 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8058 "Not using MSI.\n", tp->dev->name);
8059 } else if (pci_enable_msi(tp->pdev) == 0) {
Michael Chan88b06bc22005-04-21 17:13:25 -07008060 u32 msi_mode;
8061
8062 msi_mode = tr32(MSGINT_MODE);
8063 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8064 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8065 }
8066 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08008067 err = tg3_request_irq(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008068
8069 if (err) {
Michael Chan88b06bc22005-04-21 17:13:25 -07008070 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8071 pci_disable_msi(tp->pdev);
8072 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8073 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008074 tg3_free_consistent(tp);
8075 return err;
8076 }
8077
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008078 napi_enable(&tp->napi);
8079
David S. Millerf47c11e2005-06-24 20:18:35 -07008080 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008081
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07008082 err = tg3_init_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008083 if (err) {
Michael Chan944d9802005-05-29 14:57:48 -07008084 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008085 tg3_free_rings(tp);
8086 } else {
David S. Millerfac9b832005-05-18 22:46:34 -07008087 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8088 tp->timer_offset = HZ;
8089 else
8090 tp->timer_offset = HZ / 10;
8091
8092 BUG_ON(tp->timer_offset > HZ);
8093 tp->timer_counter = tp->timer_multiplier =
8094 (HZ / tp->timer_offset);
8095 tp->asf_counter = tp->asf_multiplier =
Michael Chan28fbef72005-10-26 15:48:35 -07008096 ((HZ / tp->timer_offset) * 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008097
8098 init_timer(&tp->timer);
8099 tp->timer.expires = jiffies + tp->timer_offset;
8100 tp->timer.data = (unsigned long) tp;
8101 tp->timer.function = tg3_timer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008102 }
8103
David S. Millerf47c11e2005-06-24 20:18:35 -07008104 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008105
8106 if (err) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008107 napi_disable(&tp->napi);
Michael Chan88b06bc22005-04-21 17:13:25 -07008108 free_irq(tp->pdev->irq, dev);
8109 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8110 pci_disable_msi(tp->pdev);
8111 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8112 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008113 tg3_free_consistent(tp);
8114 return err;
8115 }
8116
Michael Chan79381092005-04-21 17:13:59 -07008117 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8118 err = tg3_test_msi(tp);
David S. Millerfac9b832005-05-18 22:46:34 -07008119
Michael Chan79381092005-04-21 17:13:59 -07008120 if (err) {
David S. Millerf47c11e2005-06-24 20:18:35 -07008121 tg3_full_lock(tp, 0);
Michael Chan79381092005-04-21 17:13:59 -07008122
8123 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8124 pci_disable_msi(tp->pdev);
8125 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8126 }
Michael Chan944d9802005-05-29 14:57:48 -07008127 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan79381092005-04-21 17:13:59 -07008128 tg3_free_rings(tp);
8129 tg3_free_consistent(tp);
8130
David S. Millerf47c11e2005-06-24 20:18:35 -07008131 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07008132
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008133 napi_disable(&tp->napi);
8134
Michael Chan79381092005-04-21 17:13:59 -07008135 return err;
8136 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08008137
8138 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8139 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
Michael Chanb5d37722006-09-27 16:06:21 -07008140 u32 val = tr32(PCIE_TRANSACTION_CFG);
Michael Chanfcfa0a32006-03-20 22:28:41 -08008141
Michael Chanb5d37722006-09-27 16:06:21 -07008142 tw32(PCIE_TRANSACTION_CFG,
8143 val | PCIE_TRANS_CFG_1SHOT_MSI);
Michael Chanfcfa0a32006-03-20 22:28:41 -08008144 }
8145 }
Michael Chan79381092005-04-21 17:13:59 -07008146 }
8147
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07008148 tg3_phy_start(tp);
8149
David S. Millerf47c11e2005-06-24 20:18:35 -07008150 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008151
Michael Chan79381092005-04-21 17:13:59 -07008152 add_timer(&tp->timer);
8153 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008154 tg3_enable_ints(tp);
8155
David S. Millerf47c11e2005-06-24 20:18:35 -07008156 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008157
8158 netif_start_queue(dev);
8159
8160 return 0;
8161}
8162
8163#if 0
8164/*static*/ void tg3_dump_state(struct tg3 *tp)
8165{
8166 u32 val32, val32_2, val32_3, val32_4, val32_5;
8167 u16 val16;
8168 int i;
8169
8170 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8171 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8172 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8173 val16, val32);
8174
8175 /* MAC block */
8176 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8177 tr32(MAC_MODE), tr32(MAC_STATUS));
8178 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8179 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8180 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8181 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8182 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8183 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8184
8185 /* Send data initiator control block */
8186 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8187 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8188 printk(" SNDDATAI_STATSCTRL[%08x]\n",
8189 tr32(SNDDATAI_STATSCTRL));
8190
8191 /* Send data completion control block */
8192 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8193
8194 /* Send BD ring selector block */
8195 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8196 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8197
8198 /* Send BD initiator control block */
8199 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8200 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8201
8202 /* Send BD completion control block */
8203 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8204
8205 /* Receive list placement control block */
8206 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8207 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8208 printk(" RCVLPC_STATSCTRL[%08x]\n",
8209 tr32(RCVLPC_STATSCTRL));
8210
8211 /* Receive data and receive BD initiator control block */
8212 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8213 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8214
8215 /* Receive data completion control block */
8216 printk("DEBUG: RCVDCC_MODE[%08x]\n",
8217 tr32(RCVDCC_MODE));
8218
8219 /* Receive BD initiator control block */
8220 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8221 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8222
8223 /* Receive BD completion control block */
8224 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8225 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8226
8227 /* Receive list selector control block */
8228 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8229 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8230
8231 /* Mbuf cluster free block */
8232 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8233 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8234
8235 /* Host coalescing control block */
8236 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8237 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8238 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8239 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8240 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8241 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8242 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8243 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8244 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8245 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8246 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8247 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8248
8249 /* Memory arbiter control block */
8250 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8251 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8252
8253 /* Buffer manager control block */
8254 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8255 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8256 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8257 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8258 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8259 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8260 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8261 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8262
8263 /* Read DMA control block */
8264 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8265 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8266
8267 /* Write DMA control block */
8268 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8269 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8270
8271 /* DMA completion block */
8272 printk("DEBUG: DMAC_MODE[%08x]\n",
8273 tr32(DMAC_MODE));
8274
8275 /* GRC block */
8276 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8277 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8278 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8279 tr32(GRC_LOCAL_CTRL));
8280
8281 /* TG3_BDINFOs */
8282 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8283 tr32(RCVDBDI_JUMBO_BD + 0x0),
8284 tr32(RCVDBDI_JUMBO_BD + 0x4),
8285 tr32(RCVDBDI_JUMBO_BD + 0x8),
8286 tr32(RCVDBDI_JUMBO_BD + 0xc));
8287 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8288 tr32(RCVDBDI_STD_BD + 0x0),
8289 tr32(RCVDBDI_STD_BD + 0x4),
8290 tr32(RCVDBDI_STD_BD + 0x8),
8291 tr32(RCVDBDI_STD_BD + 0xc));
8292 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8293 tr32(RCVDBDI_MINI_BD + 0x0),
8294 tr32(RCVDBDI_MINI_BD + 0x4),
8295 tr32(RCVDBDI_MINI_BD + 0x8),
8296 tr32(RCVDBDI_MINI_BD + 0xc));
8297
8298 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8299 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8300 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8301 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8302 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8303 val32, val32_2, val32_3, val32_4);
8304
8305 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8306 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8307 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8308 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8309 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8310 val32, val32_2, val32_3, val32_4);
8311
8312 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8313 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8314 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8315 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8316 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8317 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8318 val32, val32_2, val32_3, val32_4, val32_5);
8319
8320 /* SW status block */
8321 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8322 tp->hw_status->status,
8323 tp->hw_status->status_tag,
8324 tp->hw_status->rx_jumbo_consumer,
8325 tp->hw_status->rx_consumer,
8326 tp->hw_status->rx_mini_consumer,
8327 tp->hw_status->idx[0].rx_producer,
8328 tp->hw_status->idx[0].tx_consumer);
8329
8330 /* SW statistics block */
8331 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8332 ((u32 *)tp->hw_stats)[0],
8333 ((u32 *)tp->hw_stats)[1],
8334 ((u32 *)tp->hw_stats)[2],
8335 ((u32 *)tp->hw_stats)[3]);
8336
8337 /* Mailboxes */
8338 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
Michael Chan09ee9292005-08-09 20:17:00 -07008339 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8340 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8341 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8342 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008343
8344 /* NIC side send descriptors. */
8345 for (i = 0; i < 6; i++) {
8346 unsigned long txd;
8347
8348 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8349 + (i * sizeof(struct tg3_tx_buffer_desc));
8350 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8351 i,
8352 readl(txd + 0x0), readl(txd + 0x4),
8353 readl(txd + 0x8), readl(txd + 0xc));
8354 }
8355
8356 /* NIC side RX descriptors. */
8357 for (i = 0; i < 6; i++) {
8358 unsigned long rxd;
8359
8360 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8361 + (i * sizeof(struct tg3_rx_buffer_desc));
8362 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8363 i,
8364 readl(rxd + 0x0), readl(rxd + 0x4),
8365 readl(rxd + 0x8), readl(rxd + 0xc));
8366 rxd += (4 * sizeof(u32));
8367 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8368 i,
8369 readl(rxd + 0x0), readl(rxd + 0x4),
8370 readl(rxd + 0x8), readl(rxd + 0xc));
8371 }
8372
8373 for (i = 0; i < 6; i++) {
8374 unsigned long rxd;
8375
8376 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8377 + (i * sizeof(struct tg3_rx_buffer_desc));
8378 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8379 i,
8380 readl(rxd + 0x0), readl(rxd + 0x4),
8381 readl(rxd + 0x8), readl(rxd + 0xc));
8382 rxd += (4 * sizeof(u32));
8383 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8384 i,
8385 readl(rxd + 0x0), readl(rxd + 0x4),
8386 readl(rxd + 0x8), readl(rxd + 0xc));
8387 }
8388}
8389#endif
8390
8391static struct net_device_stats *tg3_get_stats(struct net_device *);
8392static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8393
8394static int tg3_close(struct net_device *dev)
8395{
8396 struct tg3 *tp = netdev_priv(dev);
8397
Stephen Hemmingerbea33482007-10-03 16:41:36 -07008398 napi_disable(&tp->napi);
Oleg Nesterov28e53bd2007-05-09 02:34:22 -07008399 cancel_work_sync(&tp->reset_task);
Michael Chan7faa0062006-02-02 17:29:28 -08008400
Linus Torvalds1da177e2005-04-16 15:20:36 -07008401 netif_stop_queue(dev);
8402
8403 del_timer_sync(&tp->timer);
8404
David S. Millerf47c11e2005-06-24 20:18:35 -07008405 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008406#if 0
8407 tg3_dump_state(tp);
8408#endif
8409
8410 tg3_disable_ints(tp);
8411
Michael Chan944d9802005-05-29 14:57:48 -07008412 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008413 tg3_free_rings(tp);
Michael Chan5cf64b8a2007-05-05 12:11:21 -07008414 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008415
David S. Millerf47c11e2005-06-24 20:18:35 -07008416 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008417
Michael Chan88b06bc22005-04-21 17:13:25 -07008418 free_irq(tp->pdev->irq, dev);
8419 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8420 pci_disable_msi(tp->pdev);
8421 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8422 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008423
8424 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8425 sizeof(tp->net_stats_prev));
8426 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8427 sizeof(tp->estats_prev));
8428
8429 tg3_free_consistent(tp);
8430
Michael Chanbc1c7562006-03-20 17:48:03 -08008431 tg3_set_power_state(tp, PCI_D3hot);
8432
8433 netif_carrier_off(tp->dev);
8434
Linus Torvalds1da177e2005-04-16 15:20:36 -07008435 return 0;
8436}
8437
8438static inline unsigned long get_stat64(tg3_stat64_t *val)
8439{
8440 unsigned long ret;
8441
8442#if (BITS_PER_LONG == 32)
8443 ret = val->low;
8444#else
8445 ret = ((u64)val->high << 32) | ((u64)val->low);
8446#endif
8447 return ret;
8448}
8449
Stefan Buehler816f8b82008-08-15 14:10:54 -07008450static inline u64 get_estat64(tg3_stat64_t *val)
8451{
8452 return ((u64)val->high << 32) | ((u64)val->low);
8453}
8454
Linus Torvalds1da177e2005-04-16 15:20:36 -07008455static unsigned long calc_crc_errors(struct tg3 *tp)
8456{
8457 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8458
8459 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8460 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8461 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008462 u32 val;
8463
David S. Millerf47c11e2005-06-24 20:18:35 -07008464 spin_lock_bh(&tp->lock);
Michael Chan569a5df2007-02-13 12:18:15 -08008465 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8466 tg3_writephy(tp, MII_TG3_TEST1,
8467 val | MII_TG3_TEST1_CRC_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008468 tg3_readphy(tp, 0x14, &val);
8469 } else
8470 val = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07008471 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008472
8473 tp->phy_crc_errors += val;
8474
8475 return tp->phy_crc_errors;
8476 }
8477
8478 return get_stat64(&hw_stats->rx_fcs_errors);
8479}
8480
8481#define ESTAT_ADD(member) \
8482 estats->member = old_estats->member + \
Stefan Buehler816f8b82008-08-15 14:10:54 -07008483 get_estat64(&hw_stats->member)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008484
8485static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8486{
8487 struct tg3_ethtool_stats *estats = &tp->estats;
8488 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8489 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8490
8491 if (!hw_stats)
8492 return old_estats;
8493
8494 ESTAT_ADD(rx_octets);
8495 ESTAT_ADD(rx_fragments);
8496 ESTAT_ADD(rx_ucast_packets);
8497 ESTAT_ADD(rx_mcast_packets);
8498 ESTAT_ADD(rx_bcast_packets);
8499 ESTAT_ADD(rx_fcs_errors);
8500 ESTAT_ADD(rx_align_errors);
8501 ESTAT_ADD(rx_xon_pause_rcvd);
8502 ESTAT_ADD(rx_xoff_pause_rcvd);
8503 ESTAT_ADD(rx_mac_ctrl_rcvd);
8504 ESTAT_ADD(rx_xoff_entered);
8505 ESTAT_ADD(rx_frame_too_long_errors);
8506 ESTAT_ADD(rx_jabbers);
8507 ESTAT_ADD(rx_undersize_packets);
8508 ESTAT_ADD(rx_in_length_errors);
8509 ESTAT_ADD(rx_out_length_errors);
8510 ESTAT_ADD(rx_64_or_less_octet_packets);
8511 ESTAT_ADD(rx_65_to_127_octet_packets);
8512 ESTAT_ADD(rx_128_to_255_octet_packets);
8513 ESTAT_ADD(rx_256_to_511_octet_packets);
8514 ESTAT_ADD(rx_512_to_1023_octet_packets);
8515 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8516 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8517 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8518 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8519 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8520
8521 ESTAT_ADD(tx_octets);
8522 ESTAT_ADD(tx_collisions);
8523 ESTAT_ADD(tx_xon_sent);
8524 ESTAT_ADD(tx_xoff_sent);
8525 ESTAT_ADD(tx_flow_control);
8526 ESTAT_ADD(tx_mac_errors);
8527 ESTAT_ADD(tx_single_collisions);
8528 ESTAT_ADD(tx_mult_collisions);
8529 ESTAT_ADD(tx_deferred);
8530 ESTAT_ADD(tx_excessive_collisions);
8531 ESTAT_ADD(tx_late_collisions);
8532 ESTAT_ADD(tx_collide_2times);
8533 ESTAT_ADD(tx_collide_3times);
8534 ESTAT_ADD(tx_collide_4times);
8535 ESTAT_ADD(tx_collide_5times);
8536 ESTAT_ADD(tx_collide_6times);
8537 ESTAT_ADD(tx_collide_7times);
8538 ESTAT_ADD(tx_collide_8times);
8539 ESTAT_ADD(tx_collide_9times);
8540 ESTAT_ADD(tx_collide_10times);
8541 ESTAT_ADD(tx_collide_11times);
8542 ESTAT_ADD(tx_collide_12times);
8543 ESTAT_ADD(tx_collide_13times);
8544 ESTAT_ADD(tx_collide_14times);
8545 ESTAT_ADD(tx_collide_15times);
8546 ESTAT_ADD(tx_ucast_packets);
8547 ESTAT_ADD(tx_mcast_packets);
8548 ESTAT_ADD(tx_bcast_packets);
8549 ESTAT_ADD(tx_carrier_sense_errors);
8550 ESTAT_ADD(tx_discards);
8551 ESTAT_ADD(tx_errors);
8552
8553 ESTAT_ADD(dma_writeq_full);
8554 ESTAT_ADD(dma_write_prioq_full);
8555 ESTAT_ADD(rxbds_empty);
8556 ESTAT_ADD(rx_discards);
8557 ESTAT_ADD(rx_errors);
8558 ESTAT_ADD(rx_threshold_hit);
8559
8560 ESTAT_ADD(dma_readq_full);
8561 ESTAT_ADD(dma_read_prioq_full);
8562 ESTAT_ADD(tx_comp_queue_full);
8563
8564 ESTAT_ADD(ring_set_send_prod_index);
8565 ESTAT_ADD(ring_status_update);
8566 ESTAT_ADD(nic_irqs);
8567 ESTAT_ADD(nic_avoided_irqs);
8568 ESTAT_ADD(nic_tx_threshold_hit);
8569
8570 return estats;
8571}
8572
8573static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8574{
8575 struct tg3 *tp = netdev_priv(dev);
8576 struct net_device_stats *stats = &tp->net_stats;
8577 struct net_device_stats *old_stats = &tp->net_stats_prev;
8578 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8579
8580 if (!hw_stats)
8581 return old_stats;
8582
8583 stats->rx_packets = old_stats->rx_packets +
8584 get_stat64(&hw_stats->rx_ucast_packets) +
8585 get_stat64(&hw_stats->rx_mcast_packets) +
8586 get_stat64(&hw_stats->rx_bcast_packets);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008587
Linus Torvalds1da177e2005-04-16 15:20:36 -07008588 stats->tx_packets = old_stats->tx_packets +
8589 get_stat64(&hw_stats->tx_ucast_packets) +
8590 get_stat64(&hw_stats->tx_mcast_packets) +
8591 get_stat64(&hw_stats->tx_bcast_packets);
8592
8593 stats->rx_bytes = old_stats->rx_bytes +
8594 get_stat64(&hw_stats->rx_octets);
8595 stats->tx_bytes = old_stats->tx_bytes +
8596 get_stat64(&hw_stats->tx_octets);
8597
8598 stats->rx_errors = old_stats->rx_errors +
John W. Linville4f63b872005-09-12 14:43:18 -07008599 get_stat64(&hw_stats->rx_errors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008600 stats->tx_errors = old_stats->tx_errors +
8601 get_stat64(&hw_stats->tx_errors) +
8602 get_stat64(&hw_stats->tx_mac_errors) +
8603 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8604 get_stat64(&hw_stats->tx_discards);
8605
8606 stats->multicast = old_stats->multicast +
8607 get_stat64(&hw_stats->rx_mcast_packets);
8608 stats->collisions = old_stats->collisions +
8609 get_stat64(&hw_stats->tx_collisions);
8610
8611 stats->rx_length_errors = old_stats->rx_length_errors +
8612 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8613 get_stat64(&hw_stats->rx_undersize_packets);
8614
8615 stats->rx_over_errors = old_stats->rx_over_errors +
8616 get_stat64(&hw_stats->rxbds_empty);
8617 stats->rx_frame_errors = old_stats->rx_frame_errors +
8618 get_stat64(&hw_stats->rx_align_errors);
8619 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8620 get_stat64(&hw_stats->tx_discards);
8621 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8622 get_stat64(&hw_stats->tx_carrier_sense_errors);
8623
8624 stats->rx_crc_errors = old_stats->rx_crc_errors +
8625 calc_crc_errors(tp);
8626
John W. Linville4f63b872005-09-12 14:43:18 -07008627 stats->rx_missed_errors = old_stats->rx_missed_errors +
8628 get_stat64(&hw_stats->rx_discards);
8629
Linus Torvalds1da177e2005-04-16 15:20:36 -07008630 return stats;
8631}
8632
8633static inline u32 calc_crc(unsigned char *buf, int len)
8634{
8635 u32 reg;
8636 u32 tmp;
8637 int j, k;
8638
8639 reg = 0xffffffff;
8640
8641 for (j = 0; j < len; j++) {
8642 reg ^= buf[j];
8643
8644 for (k = 0; k < 8; k++) {
8645 tmp = reg & 0x01;
8646
8647 reg >>= 1;
8648
8649 if (tmp) {
8650 reg ^= 0xedb88320;
8651 }
8652 }
8653 }
8654
8655 return ~reg;
8656}
8657
8658static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8659{
8660 /* accept or reject all multicast frames */
8661 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8662 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8663 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8664 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8665}
8666
8667static void __tg3_set_rx_mode(struct net_device *dev)
8668{
8669 struct tg3 *tp = netdev_priv(dev);
8670 u32 rx_mode;
8671
8672 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8673 RX_MODE_KEEP_VLAN_TAG);
8674
8675 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8676 * flag clear.
8677 */
8678#if TG3_VLAN_TAG_USED
8679 if (!tp->vlgrp &&
8680 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8681 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8682#else
8683 /* By definition, VLAN is disabled always in this
8684 * case.
8685 */
8686 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8687 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8688#endif
8689
8690 if (dev->flags & IFF_PROMISC) {
8691 /* Promiscuous mode. */
8692 rx_mode |= RX_MODE_PROMISC;
8693 } else if (dev->flags & IFF_ALLMULTI) {
8694 /* Accept all multicast. */
8695 tg3_set_multi (tp, 1);
8696 } else if (dev->mc_count < 1) {
8697 /* Reject all multicast. */
8698 tg3_set_multi (tp, 0);
8699 } else {
8700 /* Accept one or more multicast(s). */
8701 struct dev_mc_list *mclist;
8702 unsigned int i;
8703 u32 mc_filter[4] = { 0, };
8704 u32 regidx;
8705 u32 bit;
8706 u32 crc;
8707
8708 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8709 i++, mclist = mclist->next) {
8710
8711 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8712 bit = ~crc & 0x7f;
8713 regidx = (bit & 0x60) >> 5;
8714 bit &= 0x1f;
8715 mc_filter[regidx] |= (1 << bit);
8716 }
8717
8718 tw32(MAC_HASH_REG_0, mc_filter[0]);
8719 tw32(MAC_HASH_REG_1, mc_filter[1]);
8720 tw32(MAC_HASH_REG_2, mc_filter[2]);
8721 tw32(MAC_HASH_REG_3, mc_filter[3]);
8722 }
8723
8724 if (rx_mode != tp->rx_mode) {
8725 tp->rx_mode = rx_mode;
8726 tw32_f(MAC_RX_MODE, rx_mode);
8727 udelay(10);
8728 }
8729}
8730
8731static void tg3_set_rx_mode(struct net_device *dev)
8732{
8733 struct tg3 *tp = netdev_priv(dev);
8734
Michael Chane75f7c92006-03-20 21:33:26 -08008735 if (!netif_running(dev))
8736 return;
8737
David S. Millerf47c11e2005-06-24 20:18:35 -07008738 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008739 __tg3_set_rx_mode(dev);
David S. Millerf47c11e2005-06-24 20:18:35 -07008740 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008741}
8742
8743#define TG3_REGDUMP_LEN (32 * 1024)
8744
8745static int tg3_get_regs_len(struct net_device *dev)
8746{
8747 return TG3_REGDUMP_LEN;
8748}
8749
8750static void tg3_get_regs(struct net_device *dev,
8751 struct ethtool_regs *regs, void *_p)
8752{
8753 u32 *p = _p;
8754 struct tg3 *tp = netdev_priv(dev);
8755 u8 *orig_p = _p;
8756 int i;
8757
8758 regs->version = 0;
8759
8760 memset(p, 0, TG3_REGDUMP_LEN);
8761
Michael Chanbc1c7562006-03-20 17:48:03 -08008762 if (tp->link_config.phy_is_low_power)
8763 return;
8764
David S. Millerf47c11e2005-06-24 20:18:35 -07008765 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008766
8767#define __GET_REG32(reg) (*(p)++ = tr32(reg))
8768#define GET_REG32_LOOP(base,len) \
8769do { p = (u32 *)(orig_p + (base)); \
8770 for (i = 0; i < len; i += 4) \
8771 __GET_REG32((base) + i); \
8772} while (0)
8773#define GET_REG32_1(reg) \
8774do { p = (u32 *)(orig_p + (reg)); \
8775 __GET_REG32((reg)); \
8776} while (0)
8777
8778 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8779 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8780 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8781 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8782 GET_REG32_1(SNDDATAC_MODE);
8783 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8784 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8785 GET_REG32_1(SNDBDC_MODE);
8786 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8787 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8788 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8789 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8790 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8791 GET_REG32_1(RCVDCC_MODE);
8792 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8793 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8794 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8795 GET_REG32_1(MBFREE_MODE);
8796 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8797 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8798 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8799 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8800 GET_REG32_LOOP(WDMAC_MODE, 0x08);
Chris Elmquist091465d2005-12-20 13:25:19 -08008801 GET_REG32_1(RX_CPU_MODE);
8802 GET_REG32_1(RX_CPU_STATE);
8803 GET_REG32_1(RX_CPU_PGMCTR);
8804 GET_REG32_1(RX_CPU_HWBKPT);
8805 GET_REG32_1(TX_CPU_MODE);
8806 GET_REG32_1(TX_CPU_STATE);
8807 GET_REG32_1(TX_CPU_PGMCTR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008808 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8809 GET_REG32_LOOP(FTQ_RESET, 0x120);
8810 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8811 GET_REG32_1(DMAC_MODE);
8812 GET_REG32_LOOP(GRC_MODE, 0x4c);
8813 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8814 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8815
8816#undef __GET_REG32
8817#undef GET_REG32_LOOP
8818#undef GET_REG32_1
8819
David S. Millerf47c11e2005-06-24 20:18:35 -07008820 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008821}
8822
8823static int tg3_get_eeprom_len(struct net_device *dev)
8824{
8825 struct tg3 *tp = netdev_priv(dev);
8826
8827 return tp->nvram_size;
8828}
8829
8830static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
Al Virob9fc7dc2007-12-17 22:59:57 -08008831static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
Michael Chan18201802006-03-20 22:29:15 -08008832static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008833
8834static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8835{
8836 struct tg3 *tp = netdev_priv(dev);
8837 int ret;
8838 u8 *pd;
Al Virob9fc7dc2007-12-17 22:59:57 -08008839 u32 i, offset, len, b_offset, b_count;
8840 __le32 val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008841
Michael Chanbc1c7562006-03-20 17:48:03 -08008842 if (tp->link_config.phy_is_low_power)
8843 return -EAGAIN;
8844
Linus Torvalds1da177e2005-04-16 15:20:36 -07008845 offset = eeprom->offset;
8846 len = eeprom->len;
8847 eeprom->len = 0;
8848
8849 eeprom->magic = TG3_EEPROM_MAGIC;
8850
8851 if (offset & 3) {
8852 /* adjustments to start on required 4 byte boundary */
8853 b_offset = offset & 3;
8854 b_count = 4 - b_offset;
8855 if (b_count > len) {
8856 /* i.e. offset=1 len=2 */
8857 b_count = len;
8858 }
Al Virob9fc7dc2007-12-17 22:59:57 -08008859 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008860 if (ret)
8861 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008862 memcpy(data, ((char*)&val) + b_offset, b_count);
8863 len -= b_count;
8864 offset += b_count;
8865 eeprom->len += b_count;
8866 }
8867
8868 /* read bytes upto the last 4 byte boundary */
8869 pd = &data[eeprom->len];
8870 for (i = 0; i < (len - (len & 3)); i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -08008871 ret = tg3_nvram_read_le(tp, offset + i, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008872 if (ret) {
8873 eeprom->len += i;
8874 return ret;
8875 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008876 memcpy(pd + i, &val, 4);
8877 }
8878 eeprom->len += i;
8879
8880 if (len & 3) {
8881 /* read last bytes not ending on 4 byte boundary */
8882 pd = &data[eeprom->len];
8883 b_count = len & 3;
8884 b_offset = offset + len - b_count;
Al Virob9fc7dc2007-12-17 22:59:57 -08008885 ret = tg3_nvram_read_le(tp, b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008886 if (ret)
8887 return ret;
Al Virob9fc7dc2007-12-17 22:59:57 -08008888 memcpy(pd, &val, b_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008889 eeprom->len += b_count;
8890 }
8891 return 0;
8892}
8893
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008894static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008895
8896static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8897{
8898 struct tg3 *tp = netdev_priv(dev);
8899 int ret;
Al Virob9fc7dc2007-12-17 22:59:57 -08008900 u32 offset, len, b_offset, odd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008901 u8 *buf;
Al Virob9fc7dc2007-12-17 22:59:57 -08008902 __le32 start, end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008903
Michael Chanbc1c7562006-03-20 17:48:03 -08008904 if (tp->link_config.phy_is_low_power)
8905 return -EAGAIN;
8906
Linus Torvalds1da177e2005-04-16 15:20:36 -07008907 if (eeprom->magic != TG3_EEPROM_MAGIC)
8908 return -EINVAL;
8909
8910 offset = eeprom->offset;
8911 len = eeprom->len;
8912
8913 if ((b_offset = (offset & 3))) {
8914 /* adjustments to start on required 4 byte boundary */
Al Virob9fc7dc2007-12-17 22:59:57 -08008915 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008916 if (ret)
8917 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008918 len += b_offset;
8919 offset &= ~3;
Michael Chan1c8594b2005-04-21 17:12:46 -07008920 if (len < 4)
8921 len = 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008922 }
8923
8924 odd_len = 0;
Michael Chan1c8594b2005-04-21 17:12:46 -07008925 if (len & 3) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008926 /* adjustments to end on required 4 byte boundary */
8927 odd_len = 1;
8928 len = (len + 3) & ~3;
Al Virob9fc7dc2007-12-17 22:59:57 -08008929 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008930 if (ret)
8931 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008932 }
8933
8934 buf = data;
8935 if (b_offset || odd_len) {
8936 buf = kmalloc(len, GFP_KERNEL);
Andy Gospodarekab0049b2007-09-06 20:42:14 +01008937 if (!buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008938 return -ENOMEM;
8939 if (b_offset)
8940 memcpy(buf, &start, 4);
8941 if (odd_len)
8942 memcpy(buf+len-4, &end, 4);
8943 memcpy(buf + b_offset, data, eeprom->len);
8944 }
8945
8946 ret = tg3_nvram_write_block(tp, offset, len, buf);
8947
8948 if (buf != data)
8949 kfree(buf);
8950
8951 return ret;
8952}
8953
8954static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8955{
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07008956 struct tg3 *tp = netdev_priv(dev);
8957
8958 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8959 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8960 return -EAGAIN;
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -07008961 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07008962 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008963
Linus Torvalds1da177e2005-04-16 15:20:36 -07008964 cmd->supported = (SUPPORTED_Autoneg);
8965
8966 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8967 cmd->supported |= (SUPPORTED_1000baseT_Half |
8968 SUPPORTED_1000baseT_Full);
8969
Karsten Keilef348142006-05-12 12:49:08 -07008970 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008971 cmd->supported |= (SUPPORTED_100baseT_Half |
8972 SUPPORTED_100baseT_Full |
8973 SUPPORTED_10baseT_Half |
8974 SUPPORTED_10baseT_Full |
Matt Carlson3bebab52007-11-12 21:22:40 -08008975 SUPPORTED_TP);
Karsten Keilef348142006-05-12 12:49:08 -07008976 cmd->port = PORT_TP;
8977 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008978 cmd->supported |= SUPPORTED_FIBRE;
Karsten Keilef348142006-05-12 12:49:08 -07008979 cmd->port = PORT_FIBRE;
8980 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008981
Linus Torvalds1da177e2005-04-16 15:20:36 -07008982 cmd->advertising = tp->link_config.advertising;
8983 if (netif_running(dev)) {
8984 cmd->speed = tp->link_config.active_speed;
8985 cmd->duplex = tp->link_config.active_duplex;
8986 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008987 cmd->phy_address = PHY_ADDR;
8988 cmd->transceiver = 0;
8989 cmd->autoneg = tp->link_config.autoneg;
8990 cmd->maxtxpkt = 0;
8991 cmd->maxrxpkt = 0;
8992 return 0;
8993}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008994
Linus Torvalds1da177e2005-04-16 15:20:36 -07008995static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8996{
8997 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008998
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07008999 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9000 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9001 return -EAGAIN;
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -07009002 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009003 }
9004
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009005 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009006 /* These are the only valid advertisement bits allowed. */
9007 if (cmd->autoneg == AUTONEG_ENABLE &&
9008 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
9009 ADVERTISED_1000baseT_Full |
9010 ADVERTISED_Autoneg |
9011 ADVERTISED_FIBRE)))
9012 return -EINVAL;
Michael Chan37ff2382005-10-26 15:49:51 -07009013 /* Fiber can only do SPEED_1000. */
9014 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9015 (cmd->speed != SPEED_1000))
9016 return -EINVAL;
9017 /* Copper cannot force SPEED_1000. */
9018 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9019 (cmd->speed == SPEED_1000))
9020 return -EINVAL;
9021 else if ((cmd->speed == SPEED_1000) &&
Matt Carlson0ba11fb2008-06-09 15:40:26 -07009022 (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
Michael Chan37ff2382005-10-26 15:49:51 -07009023 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009024
David S. Millerf47c11e2005-06-24 20:18:35 -07009025 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009026
9027 tp->link_config.autoneg = cmd->autoneg;
9028 if (cmd->autoneg == AUTONEG_ENABLE) {
Andy Gospodarek405d8e52007-10-08 01:08:47 -07009029 tp->link_config.advertising = (cmd->advertising |
9030 ADVERTISED_Autoneg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009031 tp->link_config.speed = SPEED_INVALID;
9032 tp->link_config.duplex = DUPLEX_INVALID;
9033 } else {
9034 tp->link_config.advertising = 0;
9035 tp->link_config.speed = cmd->speed;
9036 tp->link_config.duplex = cmd->duplex;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009037 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009038
Michael Chan24fcad62006-12-17 17:06:46 -08009039 tp->link_config.orig_speed = tp->link_config.speed;
9040 tp->link_config.orig_duplex = tp->link_config.duplex;
9041 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9042
Linus Torvalds1da177e2005-04-16 15:20:36 -07009043 if (netif_running(dev))
9044 tg3_setup_phy(tp, 1);
9045
David S. Millerf47c11e2005-06-24 20:18:35 -07009046 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009047
Linus Torvalds1da177e2005-04-16 15:20:36 -07009048 return 0;
9049}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009050
Linus Torvalds1da177e2005-04-16 15:20:36 -07009051static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9052{
9053 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009054
Linus Torvalds1da177e2005-04-16 15:20:36 -07009055 strcpy(info->driver, DRV_MODULE_NAME);
9056 strcpy(info->version, DRV_MODULE_VERSION);
Michael Chanc4e65752006-03-20 22:29:32 -08009057 strcpy(info->fw_version, tp->fw_ver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009058 strcpy(info->bus_info, pci_name(tp->pdev));
9059}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009060
Linus Torvalds1da177e2005-04-16 15:20:36 -07009061static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9062{
9063 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009064
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009065 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9066 device_can_wakeup(&tp->pdev->dev))
Gary Zambranoa85feb82007-05-05 11:52:19 -07009067 wol->supported = WAKE_MAGIC;
9068 else
9069 wol->supported = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009070 wol->wolopts = 0;
9071 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
9072 wol->wolopts = WAKE_MAGIC;
9073 memset(&wol->sopass, 0, sizeof(wol->sopass));
9074}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009075
Linus Torvalds1da177e2005-04-16 15:20:36 -07009076static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9077{
9078 struct tg3 *tp = netdev_priv(dev);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009079 struct device *dp = &tp->pdev->dev;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009080
Linus Torvalds1da177e2005-04-16 15:20:36 -07009081 if (wol->wolopts & ~WAKE_MAGIC)
9082 return -EINVAL;
9083 if ((wol->wolopts & WAKE_MAGIC) &&
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009084 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009085 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009086
David S. Millerf47c11e2005-06-24 20:18:35 -07009087 spin_lock_bh(&tp->lock);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009088 if (wol->wolopts & WAKE_MAGIC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009089 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009090 device_set_wakeup_enable(dp, true);
9091 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009092 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
Rafael J. Wysocki12dac072008-07-30 16:37:33 -07009093 device_set_wakeup_enable(dp, false);
9094 }
David S. Millerf47c11e2005-06-24 20:18:35 -07009095 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009096
Linus Torvalds1da177e2005-04-16 15:20:36 -07009097 return 0;
9098}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009099
Linus Torvalds1da177e2005-04-16 15:20:36 -07009100static u32 tg3_get_msglevel(struct net_device *dev)
9101{
9102 struct tg3 *tp = netdev_priv(dev);
9103 return tp->msg_enable;
9104}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009105
Linus Torvalds1da177e2005-04-16 15:20:36 -07009106static void tg3_set_msglevel(struct net_device *dev, u32 value)
9107{
9108 struct tg3 *tp = netdev_priv(dev);
9109 tp->msg_enable = value;
9110}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009111
Linus Torvalds1da177e2005-04-16 15:20:36 -07009112static int tg3_set_tso(struct net_device *dev, u32 value)
9113{
9114 struct tg3 *tp = netdev_priv(dev);
9115
9116 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9117 if (value)
9118 return -EINVAL;
9119 return 0;
9120 }
Michael Chanb5d37722006-09-27 16:06:21 -07009121 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
9122 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
Matt Carlson9936bcf2007-10-10 18:03:07 -07009123 if (value) {
Michael Chanb0026622006-07-03 19:42:14 -07009124 dev->features |= NETIF_F_TSO6;
Matt Carlson57e69832008-05-25 23:48:31 -07009125 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9126 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9127 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9128 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlson9936bcf2007-10-10 18:03:07 -07009129 dev->features |= NETIF_F_TSO_ECN;
9130 } else
9131 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
Michael Chanb0026622006-07-03 19:42:14 -07009132 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009133 return ethtool_op_set_tso(dev, value);
9134}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009135
Linus Torvalds1da177e2005-04-16 15:20:36 -07009136static int tg3_nway_reset(struct net_device *dev)
9137{
9138 struct tg3 *tp = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009139 int r;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009140
Linus Torvalds1da177e2005-04-16 15:20:36 -07009141 if (!netif_running(dev))
9142 return -EAGAIN;
9143
Michael Chanc94e3942005-09-27 12:12:42 -07009144 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9145 return -EINVAL;
9146
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009147 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9148 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9149 return -EAGAIN;
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -07009150 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009151 } else {
9152 u32 bmcr;
9153
9154 spin_lock_bh(&tp->lock);
9155 r = -EINVAL;
9156 tg3_readphy(tp, MII_BMCR, &bmcr);
9157 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9158 ((bmcr & BMCR_ANENABLE) ||
9159 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9160 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9161 BMCR_ANENABLE);
9162 r = 0;
9163 }
9164 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009165 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009166
Linus Torvalds1da177e2005-04-16 15:20:36 -07009167 return r;
9168}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009169
Linus Torvalds1da177e2005-04-16 15:20:36 -07009170static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9171{
9172 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009173
Linus Torvalds1da177e2005-04-16 15:20:36 -07009174 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9175 ering->rx_mini_max_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08009176 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9177 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9178 else
9179 ering->rx_jumbo_max_pending = 0;
9180
9181 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009182
9183 ering->rx_pending = tp->rx_pending;
9184 ering->rx_mini_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08009185 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9186 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9187 else
9188 ering->rx_jumbo_pending = 0;
9189
Linus Torvalds1da177e2005-04-16 15:20:36 -07009190 ering->tx_pending = tp->tx_pending;
9191}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009192
Linus Torvalds1da177e2005-04-16 15:20:36 -07009193static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9194{
9195 struct tg3 *tp = netdev_priv(dev);
Michael Chanb9ec6c12006-07-25 16:37:27 -07009196 int irq_sync = 0, err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009197
Linus Torvalds1da177e2005-04-16 15:20:36 -07009198 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9199 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
Michael Chanbc3a9252006-10-18 20:55:18 -07009200 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9201 (ering->tx_pending <= MAX_SKB_FRAGS) ||
Michael Chan7f62ad52007-02-20 23:25:40 -08009202 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
Michael Chanbc3a9252006-10-18 20:55:18 -07009203 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009204 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009205
Michael Chanbbe832c2005-06-24 20:20:04 -07009206 if (netif_running(dev)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009207 tg3_phy_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009208 tg3_netif_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07009209 irq_sync = 1;
9210 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009211
Michael Chanbbe832c2005-06-24 20:20:04 -07009212 tg3_full_lock(tp, irq_sync);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009213
Linus Torvalds1da177e2005-04-16 15:20:36 -07009214 tp->rx_pending = ering->rx_pending;
9215
9216 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9217 tp->rx_pending > 63)
9218 tp->rx_pending = 63;
9219 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9220 tp->tx_pending = ering->tx_pending;
9221
9222 if (netif_running(dev)) {
Michael Chan944d9802005-05-29 14:57:48 -07009223 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chanb9ec6c12006-07-25 16:37:27 -07009224 err = tg3_restart_hw(tp, 1);
9225 if (!err)
9226 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009227 }
9228
David S. Millerf47c11e2005-06-24 20:18:35 -07009229 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009230
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009231 if (irq_sync && !err)
9232 tg3_phy_start(tp);
9233
Michael Chanb9ec6c12006-07-25 16:37:27 -07009234 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009235}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009236
Linus Torvalds1da177e2005-04-16 15:20:36 -07009237static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9238{
9239 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009240
Linus Torvalds1da177e2005-04-16 15:20:36 -07009241 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
Matt Carlson8d018622007-12-20 20:05:44 -08009242
9243 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
9244 epause->rx_pause = 1;
9245 else
9246 epause->rx_pause = 0;
9247
9248 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
9249 epause->tx_pause = 1;
9250 else
9251 epause->tx_pause = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009252}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009253
Linus Torvalds1da177e2005-04-16 15:20:36 -07009254static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9255{
9256 struct tg3 *tp = netdev_priv(dev);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009257 int err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009258
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009259 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9260 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9261 return -EAGAIN;
9262
9263 if (epause->autoneg) {
9264 u32 newadv;
9265 struct phy_device *phydev;
9266
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -07009267 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
Matt Carlsonb02fd9e2008-05-25 23:47:41 -07009268
9269 if (epause->rx_pause) {
9270 if (epause->tx_pause)
9271 newadv = ADVERTISED_Pause;
9272 else
9273 newadv = ADVERTISED_Pause |
9274 ADVERTISED_Asym_Pause;
9275 } else if (epause->tx_pause) {
9276 newadv = ADVERTISED_Asym_Pause;
9277 } else
9278 newadv = 0;
9279
9280 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9281 u32 oldadv = phydev->advertising &
9282 (ADVERTISED_Pause |
9283 ADVERTISED_Asym_Pause);
9284 if (oldadv != newadv) {
9285 phydev->advertising &=
9286 ~(ADVERTISED_Pause |
9287 ADVERTISED_Asym_Pause);
9288 phydev->advertising |= newadv;
9289 err = phy_start_aneg(phydev);
9290 }
9291 } else {
9292 tp->link_config.advertising &=
9293 ~(ADVERTISED_Pause |
9294 ADVERTISED_Asym_Pause);
9295 tp->link_config.advertising |= newadv;
9296 }
9297 } else {
9298 if (epause->rx_pause)
9299 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9300 else
9301 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9302
9303 if (epause->tx_pause)
9304 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9305 else
9306 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9307
9308 if (netif_running(dev))
9309 tg3_setup_flow_control(tp, 0, 0);
9310 }
9311 } else {
9312 int irq_sync = 0;
9313
9314 if (netif_running(dev)) {
9315 tg3_netif_stop(tp);
9316 irq_sync = 1;
9317 }
9318
9319 tg3_full_lock(tp, irq_sync);
9320
9321 if (epause->autoneg)
9322 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9323 else
9324 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9325 if (epause->rx_pause)
9326 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9327 else
9328 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9329 if (epause->tx_pause)
9330 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9331 else
9332 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9333
9334 if (netif_running(dev)) {
9335 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9336 err = tg3_restart_hw(tp, 1);
9337 if (!err)
9338 tg3_netif_start(tp);
9339 }
9340
9341 tg3_full_unlock(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07009342 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009343
Michael Chanb9ec6c12006-07-25 16:37:27 -07009344 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009345}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009346
Linus Torvalds1da177e2005-04-16 15:20:36 -07009347static u32 tg3_get_rx_csum(struct net_device *dev)
9348{
9349 struct tg3 *tp = netdev_priv(dev);
9350 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9351}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009352
Linus Torvalds1da177e2005-04-16 15:20:36 -07009353static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9354{
9355 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009356
Linus Torvalds1da177e2005-04-16 15:20:36 -07009357 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9358 if (data != 0)
9359 return -EINVAL;
9360 return 0;
9361 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009362
David S. Millerf47c11e2005-06-24 20:18:35 -07009363 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009364 if (data)
9365 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9366 else
9367 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
David S. Millerf47c11e2005-06-24 20:18:35 -07009368 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009369
Linus Torvalds1da177e2005-04-16 15:20:36 -07009370 return 0;
9371}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009372
Linus Torvalds1da177e2005-04-16 15:20:36 -07009373static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9374{
9375 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009376
Linus Torvalds1da177e2005-04-16 15:20:36 -07009377 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9378 if (data != 0)
9379 return -EINVAL;
9380 return 0;
9381 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009382
Michael Chanaf36e6b2006-03-23 01:28:06 -08009383 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07009384 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07009385 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -07009386 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9387 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan6460d942007-07-14 19:07:52 -07009388 ethtool_op_set_tx_ipv6_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009389 else
Michael Chan9c27dbd2006-03-20 22:28:27 -08009390 ethtool_op_set_tx_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009391
9392 return 0;
9393}
9394
Jeff Garzikb9f2c042007-10-03 18:07:32 -07009395static int tg3_get_sset_count (struct net_device *dev, int sset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009396{
Jeff Garzikb9f2c042007-10-03 18:07:32 -07009397 switch (sset) {
9398 case ETH_SS_TEST:
9399 return TG3_NUM_TEST;
9400 case ETH_SS_STATS:
9401 return TG3_NUM_STATS;
9402 default:
9403 return -EOPNOTSUPP;
9404 }
Michael Chan4cafd3f2005-05-29 14:56:34 -07009405}
9406
Linus Torvalds1da177e2005-04-16 15:20:36 -07009407static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9408{
9409 switch (stringset) {
9410 case ETH_SS_STATS:
9411 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9412 break;
Michael Chan4cafd3f2005-05-29 14:56:34 -07009413 case ETH_SS_TEST:
9414 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9415 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009416 default:
9417 WARN_ON(1); /* we need a WARN() */
9418 break;
9419 }
9420}
9421
Michael Chan4009a932005-09-05 17:52:54 -07009422static int tg3_phys_id(struct net_device *dev, u32 data)
9423{
9424 struct tg3 *tp = netdev_priv(dev);
9425 int i;
9426
9427 if (!netif_running(tp->dev))
9428 return -EAGAIN;
9429
9430 if (data == 0)
Stephen Hemminger759afc32008-02-23 19:51:59 -08009431 data = UINT_MAX / 2;
Michael Chan4009a932005-09-05 17:52:54 -07009432
9433 for (i = 0; i < (data * 2); i++) {
9434 if ((i % 2) == 0)
9435 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9436 LED_CTRL_1000MBPS_ON |
9437 LED_CTRL_100MBPS_ON |
9438 LED_CTRL_10MBPS_ON |
9439 LED_CTRL_TRAFFIC_OVERRIDE |
9440 LED_CTRL_TRAFFIC_BLINK |
9441 LED_CTRL_TRAFFIC_LED);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009442
Michael Chan4009a932005-09-05 17:52:54 -07009443 else
9444 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9445 LED_CTRL_TRAFFIC_OVERRIDE);
9446
9447 if (msleep_interruptible(500))
9448 break;
9449 }
9450 tw32(MAC_LED_CTRL, tp->led_ctrl);
9451 return 0;
9452}
9453
Linus Torvalds1da177e2005-04-16 15:20:36 -07009454static void tg3_get_ethtool_stats (struct net_device *dev,
9455 struct ethtool_stats *estats, u64 *tmp_stats)
9456{
9457 struct tg3 *tp = netdev_priv(dev);
9458 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9459}
9460
Michael Chan566f86a2005-05-29 14:56:58 -07009461#define NVRAM_TEST_SIZE 0x100
Matt Carlsona5767de2007-11-12 21:10:58 -08009462#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
9463#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
9464#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
Michael Chanb16250e2006-09-27 16:10:14 -07009465#define NVRAM_SELFBOOT_HW_SIZE 0x20
9466#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
Michael Chan566f86a2005-05-29 14:56:58 -07009467
9468static int tg3_test_nvram(struct tg3 *tp)
9469{
Al Virob9fc7dc2007-12-17 22:59:57 -08009470 u32 csum, magic;
9471 __le32 *buf;
Andy Gospodarekab0049b2007-09-06 20:42:14 +01009472 int i, j, k, err = 0, size;
Michael Chan566f86a2005-05-29 14:56:58 -07009473
Michael Chan18201802006-03-20 22:29:15 -08009474 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Michael Chan1b277772006-03-20 22:27:48 -08009475 return -EIO;
9476
Michael Chan1b277772006-03-20 22:27:48 -08009477 if (magic == TG3_EEPROM_MAGIC)
9478 size = NVRAM_TEST_SIZE;
Michael Chanb16250e2006-09-27 16:10:14 -07009479 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
Matt Carlsona5767de2007-11-12 21:10:58 -08009480 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9481 TG3_EEPROM_SB_FORMAT_1) {
9482 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9483 case TG3_EEPROM_SB_REVISION_0:
9484 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9485 break;
9486 case TG3_EEPROM_SB_REVISION_2:
9487 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9488 break;
9489 case TG3_EEPROM_SB_REVISION_3:
9490 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9491 break;
9492 default:
9493 return 0;
9494 }
9495 } else
Michael Chan1b277772006-03-20 22:27:48 -08009496 return 0;
Michael Chanb16250e2006-09-27 16:10:14 -07009497 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9498 size = NVRAM_SELFBOOT_HW_SIZE;
9499 else
Michael Chan1b277772006-03-20 22:27:48 -08009500 return -EIO;
9501
9502 buf = kmalloc(size, GFP_KERNEL);
Michael Chan566f86a2005-05-29 14:56:58 -07009503 if (buf == NULL)
9504 return -ENOMEM;
9505
Michael Chan1b277772006-03-20 22:27:48 -08009506 err = -EIO;
9507 for (i = 0, j = 0; i < size; i += 4, j++) {
Al Virob9fc7dc2007-12-17 22:59:57 -08009508 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
Michael Chan566f86a2005-05-29 14:56:58 -07009509 break;
Michael Chan566f86a2005-05-29 14:56:58 -07009510 }
Michael Chan1b277772006-03-20 22:27:48 -08009511 if (i < size)
Michael Chan566f86a2005-05-29 14:56:58 -07009512 goto out;
9513
Michael Chan1b277772006-03-20 22:27:48 -08009514 /* Selfboot format */
Al Virob9fc7dc2007-12-17 22:59:57 -08009515 magic = swab32(le32_to_cpu(buf[0]));
9516 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -07009517 TG3_EEPROM_MAGIC_FW) {
Michael Chan1b277772006-03-20 22:27:48 -08009518 u8 *buf8 = (u8 *) buf, csum8 = 0;
9519
Al Virob9fc7dc2007-12-17 22:59:57 -08009520 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
Matt Carlsona5767de2007-11-12 21:10:58 -08009521 TG3_EEPROM_SB_REVISION_2) {
9522 /* For rev 2, the csum doesn't include the MBA. */
9523 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9524 csum8 += buf8[i];
9525 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9526 csum8 += buf8[i];
9527 } else {
9528 for (i = 0; i < size; i++)
9529 csum8 += buf8[i];
9530 }
Michael Chan1b277772006-03-20 22:27:48 -08009531
Adrian Bunkad96b482006-04-05 22:21:04 -07009532 if (csum8 == 0) {
9533 err = 0;
9534 goto out;
9535 }
9536
9537 err = -EIO;
9538 goto out;
Michael Chan1b277772006-03-20 22:27:48 -08009539 }
Michael Chan566f86a2005-05-29 14:56:58 -07009540
Al Virob9fc7dc2007-12-17 22:59:57 -08009541 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -07009542 TG3_EEPROM_MAGIC_HW) {
9543 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9544 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9545 u8 *buf8 = (u8 *) buf;
Michael Chanb16250e2006-09-27 16:10:14 -07009546
9547 /* Separate the parity bits and the data bytes. */
9548 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9549 if ((i == 0) || (i == 8)) {
9550 int l;
9551 u8 msk;
9552
9553 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9554 parity[k++] = buf8[i] & msk;
9555 i++;
9556 }
9557 else if (i == 16) {
9558 int l;
9559 u8 msk;
9560
9561 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9562 parity[k++] = buf8[i] & msk;
9563 i++;
9564
9565 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9566 parity[k++] = buf8[i] & msk;
9567 i++;
9568 }
9569 data[j++] = buf8[i];
9570 }
9571
9572 err = -EIO;
9573 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9574 u8 hw8 = hweight8(data[i]);
9575
9576 if ((hw8 & 0x1) && parity[i])
9577 goto out;
9578 else if (!(hw8 & 0x1) && !parity[i])
9579 goto out;
9580 }
9581 err = 0;
9582 goto out;
9583 }
9584
Michael Chan566f86a2005-05-29 14:56:58 -07009585 /* Bootstrap checksum at offset 0x10 */
9586 csum = calc_crc((unsigned char *) buf, 0x10);
Al Virob9fc7dc2007-12-17 22:59:57 -08009587 if(csum != le32_to_cpu(buf[0x10/4]))
Michael Chan566f86a2005-05-29 14:56:58 -07009588 goto out;
9589
9590 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9591 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
Al Virob9fc7dc2007-12-17 22:59:57 -08009592 if (csum != le32_to_cpu(buf[0xfc/4]))
Michael Chan566f86a2005-05-29 14:56:58 -07009593 goto out;
9594
9595 err = 0;
9596
9597out:
9598 kfree(buf);
9599 return err;
9600}
9601
Michael Chanca430072005-05-29 14:57:23 -07009602#define TG3_SERDES_TIMEOUT_SEC 2
9603#define TG3_COPPER_TIMEOUT_SEC 6
9604
9605static int tg3_test_link(struct tg3 *tp)
9606{
9607 int i, max;
9608
9609 if (!netif_running(tp->dev))
9610 return -ENODEV;
9611
Michael Chan4c987482005-09-05 17:52:38 -07009612 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Michael Chanca430072005-05-29 14:57:23 -07009613 max = TG3_SERDES_TIMEOUT_SEC;
9614 else
9615 max = TG3_COPPER_TIMEOUT_SEC;
9616
9617 for (i = 0; i < max; i++) {
9618 if (netif_carrier_ok(tp->dev))
9619 return 0;
9620
9621 if (msleep_interruptible(1000))
9622 break;
9623 }
9624
9625 return -EIO;
9626}
9627
Michael Chana71116d2005-05-29 14:58:11 -07009628/* Only test the commonly used registers */
David S. Miller30ca3e32006-03-20 23:02:36 -08009629static int tg3_test_registers(struct tg3 *tp)
Michael Chana71116d2005-05-29 14:58:11 -07009630{
Michael Chanb16250e2006-09-27 16:10:14 -07009631 int i, is_5705, is_5750;
Michael Chana71116d2005-05-29 14:58:11 -07009632 u32 offset, read_mask, write_mask, val, save_val, read_val;
9633 static struct {
9634 u16 offset;
9635 u16 flags;
9636#define TG3_FL_5705 0x1
9637#define TG3_FL_NOT_5705 0x2
9638#define TG3_FL_NOT_5788 0x4
Michael Chanb16250e2006-09-27 16:10:14 -07009639#define TG3_FL_NOT_5750 0x8
Michael Chana71116d2005-05-29 14:58:11 -07009640 u32 read_mask;
9641 u32 write_mask;
9642 } reg_tbl[] = {
9643 /* MAC Control Registers */
9644 { MAC_MODE, TG3_FL_NOT_5705,
9645 0x00000000, 0x00ef6f8c },
9646 { MAC_MODE, TG3_FL_5705,
9647 0x00000000, 0x01ef6b8c },
9648 { MAC_STATUS, TG3_FL_NOT_5705,
9649 0x03800107, 0x00000000 },
9650 { MAC_STATUS, TG3_FL_5705,
9651 0x03800100, 0x00000000 },
9652 { MAC_ADDR_0_HIGH, 0x0000,
9653 0x00000000, 0x0000ffff },
9654 { MAC_ADDR_0_LOW, 0x0000,
9655 0x00000000, 0xffffffff },
9656 { MAC_RX_MTU_SIZE, 0x0000,
9657 0x00000000, 0x0000ffff },
9658 { MAC_TX_MODE, 0x0000,
9659 0x00000000, 0x00000070 },
9660 { MAC_TX_LENGTHS, 0x0000,
9661 0x00000000, 0x00003fff },
9662 { MAC_RX_MODE, TG3_FL_NOT_5705,
9663 0x00000000, 0x000007fc },
9664 { MAC_RX_MODE, TG3_FL_5705,
9665 0x00000000, 0x000007dc },
9666 { MAC_HASH_REG_0, 0x0000,
9667 0x00000000, 0xffffffff },
9668 { MAC_HASH_REG_1, 0x0000,
9669 0x00000000, 0xffffffff },
9670 { MAC_HASH_REG_2, 0x0000,
9671 0x00000000, 0xffffffff },
9672 { MAC_HASH_REG_3, 0x0000,
9673 0x00000000, 0xffffffff },
9674
9675 /* Receive Data and Receive BD Initiator Control Registers. */
9676 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9677 0x00000000, 0xffffffff },
9678 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9679 0x00000000, 0xffffffff },
9680 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9681 0x00000000, 0x00000003 },
9682 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9683 0x00000000, 0xffffffff },
9684 { RCVDBDI_STD_BD+0, 0x0000,
9685 0x00000000, 0xffffffff },
9686 { RCVDBDI_STD_BD+4, 0x0000,
9687 0x00000000, 0xffffffff },
9688 { RCVDBDI_STD_BD+8, 0x0000,
9689 0x00000000, 0xffff0002 },
9690 { RCVDBDI_STD_BD+0xc, 0x0000,
9691 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009692
Michael Chana71116d2005-05-29 14:58:11 -07009693 /* Receive BD Initiator Control Registers. */
9694 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9695 0x00000000, 0xffffffff },
9696 { RCVBDI_STD_THRESH, TG3_FL_5705,
9697 0x00000000, 0x000003ff },
9698 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9699 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009700
Michael Chana71116d2005-05-29 14:58:11 -07009701 /* Host Coalescing Control Registers. */
9702 { HOSTCC_MODE, TG3_FL_NOT_5705,
9703 0x00000000, 0x00000004 },
9704 { HOSTCC_MODE, TG3_FL_5705,
9705 0x00000000, 0x000000f6 },
9706 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9707 0x00000000, 0xffffffff },
9708 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9709 0x00000000, 0x000003ff },
9710 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9711 0x00000000, 0xffffffff },
9712 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9713 0x00000000, 0x000003ff },
9714 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9715 0x00000000, 0xffffffff },
9716 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9717 0x00000000, 0x000000ff },
9718 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9719 0x00000000, 0xffffffff },
9720 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9721 0x00000000, 0x000000ff },
9722 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9723 0x00000000, 0xffffffff },
9724 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9725 0x00000000, 0xffffffff },
9726 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9727 0x00000000, 0xffffffff },
9728 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9729 0x00000000, 0x000000ff },
9730 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9731 0x00000000, 0xffffffff },
9732 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9733 0x00000000, 0x000000ff },
9734 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9735 0x00000000, 0xffffffff },
9736 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9737 0x00000000, 0xffffffff },
9738 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9739 0x00000000, 0xffffffff },
9740 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9741 0x00000000, 0xffffffff },
9742 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9743 0x00000000, 0xffffffff },
9744 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9745 0xffffffff, 0x00000000 },
9746 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9747 0xffffffff, 0x00000000 },
9748
9749 /* Buffer Manager Control Registers. */
Michael Chanb16250e2006-09-27 16:10:14 -07009750 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -07009751 0x00000000, 0x007fff80 },
Michael Chanb16250e2006-09-27 16:10:14 -07009752 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -07009753 0x00000000, 0x007fffff },
9754 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9755 0x00000000, 0x0000003f },
9756 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9757 0x00000000, 0x000001ff },
9758 { BUFMGR_MB_HIGH_WATER, 0x0000,
9759 0x00000000, 0x000001ff },
9760 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9761 0xffffffff, 0x00000000 },
9762 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9763 0xffffffff, 0x00000000 },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009764
Michael Chana71116d2005-05-29 14:58:11 -07009765 /* Mailbox Registers */
9766 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9767 0x00000000, 0x000001ff },
9768 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9769 0x00000000, 0x000001ff },
9770 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9771 0x00000000, 0x000007ff },
9772 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9773 0x00000000, 0x000001ff },
9774
9775 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9776 };
9777
Michael Chanb16250e2006-09-27 16:10:14 -07009778 is_5705 = is_5750 = 0;
9779 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
Michael Chana71116d2005-05-29 14:58:11 -07009780 is_5705 = 1;
Michael Chanb16250e2006-09-27 16:10:14 -07009781 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9782 is_5750 = 1;
9783 }
Michael Chana71116d2005-05-29 14:58:11 -07009784
9785 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9786 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9787 continue;
9788
9789 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9790 continue;
9791
9792 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9793 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9794 continue;
9795
Michael Chanb16250e2006-09-27 16:10:14 -07009796 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9797 continue;
9798
Michael Chana71116d2005-05-29 14:58:11 -07009799 offset = (u32) reg_tbl[i].offset;
9800 read_mask = reg_tbl[i].read_mask;
9801 write_mask = reg_tbl[i].write_mask;
9802
9803 /* Save the original register content */
9804 save_val = tr32(offset);
9805
9806 /* Determine the read-only value. */
9807 read_val = save_val & read_mask;
9808
9809 /* Write zero to the register, then make sure the read-only bits
9810 * are not changed and the read/write bits are all zeros.
9811 */
9812 tw32(offset, 0);
9813
9814 val = tr32(offset);
9815
9816 /* Test the read-only and read/write bits. */
9817 if (((val & read_mask) != read_val) || (val & write_mask))
9818 goto out;
9819
9820 /* Write ones to all the bits defined by RdMask and WrMask, then
9821 * make sure the read-only bits are not changed and the
9822 * read/write bits are all ones.
9823 */
9824 tw32(offset, read_mask | write_mask);
9825
9826 val = tr32(offset);
9827
9828 /* Test the read-only bits. */
9829 if ((val & read_mask) != read_val)
9830 goto out;
9831
9832 /* Test the read/write bits. */
9833 if ((val & write_mask) != write_mask)
9834 goto out;
9835
9836 tw32(offset, save_val);
9837 }
9838
9839 return 0;
9840
9841out:
Michael Chan9f88f292006-12-07 00:22:54 -08009842 if (netif_msg_hw(tp))
9843 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9844 offset);
Michael Chana71116d2005-05-29 14:58:11 -07009845 tw32(offset, save_val);
9846 return -EIO;
9847}
9848
Michael Chan7942e1d2005-05-29 14:58:36 -07009849static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9850{
Arjan van de Venf71e1302006-03-03 21:33:57 -05009851 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
Michael Chan7942e1d2005-05-29 14:58:36 -07009852 int i;
9853 u32 j;
9854
Alejandro Martinez Ruize9edda62007-10-15 03:37:43 +02009855 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
Michael Chan7942e1d2005-05-29 14:58:36 -07009856 for (j = 0; j < len; j += 4) {
9857 u32 val;
9858
9859 tg3_write_mem(tp, offset + j, test_pattern[i]);
9860 tg3_read_mem(tp, offset + j, &val);
9861 if (val != test_pattern[i])
9862 return -EIO;
9863 }
9864 }
9865 return 0;
9866}
9867
9868static int tg3_test_memory(struct tg3 *tp)
9869{
9870 static struct mem_entry {
9871 u32 offset;
9872 u32 len;
9873 } mem_tbl_570x[] = {
Michael Chan38690192005-12-19 16:27:28 -08009874 { 0x00000000, 0x00b50},
Michael Chan7942e1d2005-05-29 14:58:36 -07009875 { 0x00002000, 0x1c000},
9876 { 0xffffffff, 0x00000}
9877 }, mem_tbl_5705[] = {
9878 { 0x00000100, 0x0000c},
9879 { 0x00000200, 0x00008},
Michael Chan7942e1d2005-05-29 14:58:36 -07009880 { 0x00004000, 0x00800},
9881 { 0x00006000, 0x01000},
9882 { 0x00008000, 0x02000},
9883 { 0x00010000, 0x0e000},
9884 { 0xffffffff, 0x00000}
Michael Chan79f4d132006-03-20 22:28:57 -08009885 }, mem_tbl_5755[] = {
9886 { 0x00000200, 0x00008},
9887 { 0x00004000, 0x00800},
9888 { 0x00006000, 0x00800},
9889 { 0x00008000, 0x02000},
9890 { 0x00010000, 0x0c000},
9891 { 0xffffffff, 0x00000}
Michael Chanb16250e2006-09-27 16:10:14 -07009892 }, mem_tbl_5906[] = {
9893 { 0x00000200, 0x00008},
9894 { 0x00004000, 0x00400},
9895 { 0x00006000, 0x00400},
9896 { 0x00008000, 0x01000},
9897 { 0x00010000, 0x01000},
9898 { 0xffffffff, 0x00000}
Michael Chan7942e1d2005-05-29 14:58:36 -07009899 };
9900 struct mem_entry *mem_tbl;
9901 int err = 0;
9902 int i;
9903
Michael Chan79f4d132006-03-20 22:28:57 -08009904 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
Michael Chanaf36e6b2006-03-23 01:28:06 -08009905 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07009906 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07009907 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -07009908 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9909 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan79f4d132006-03-20 22:28:57 -08009910 mem_tbl = mem_tbl_5755;
Michael Chanb16250e2006-09-27 16:10:14 -07009911 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9912 mem_tbl = mem_tbl_5906;
Michael Chan79f4d132006-03-20 22:28:57 -08009913 else
9914 mem_tbl = mem_tbl_5705;
9915 } else
Michael Chan7942e1d2005-05-29 14:58:36 -07009916 mem_tbl = mem_tbl_570x;
9917
9918 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9919 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9920 mem_tbl[i].len)) != 0)
9921 break;
9922 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009923
Michael Chan7942e1d2005-05-29 14:58:36 -07009924 return err;
9925}
9926
Michael Chan9f40dea2005-09-05 17:53:06 -07009927#define TG3_MAC_LOOPBACK 0
9928#define TG3_PHY_LOOPBACK 1
9929
9930static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
Michael Chanc76949a2005-05-29 14:58:59 -07009931{
Michael Chan9f40dea2005-09-05 17:53:06 -07009932 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
Michael Chanc76949a2005-05-29 14:58:59 -07009933 u32 desc_idx;
9934 struct sk_buff *skb, *rx_skb;
9935 u8 *tx_data;
9936 dma_addr_t map;
9937 int num_pkts, tx_len, rx_len, i, err;
9938 struct tg3_rx_buffer_desc *desc;
9939
Michael Chan9f40dea2005-09-05 17:53:06 -07009940 if (loopback_mode == TG3_MAC_LOOPBACK) {
Michael Chanc94e3942005-09-27 12:12:42 -07009941 /* HW errata - mac loopback fails in some cases on 5780.
9942 * Normal traffic and PHY loopback are not affected by
9943 * errata.
9944 */
9945 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9946 return 0;
9947
Michael Chan9f40dea2005-09-05 17:53:06 -07009948 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07009949 MAC_MODE_PORT_INT_LPBACK;
9950 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9951 mac_mode |= MAC_MODE_LINK_POLARITY;
Michael Chan3f7045c2006-09-27 16:02:29 -07009952 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9953 mac_mode |= MAC_MODE_PORT_MODE_MII;
9954 else
9955 mac_mode |= MAC_MODE_PORT_MODE_GMII;
Michael Chan9f40dea2005-09-05 17:53:06 -07009956 tw32(MAC_MODE, mac_mode);
9957 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
Michael Chan3f7045c2006-09-27 16:02:29 -07009958 u32 val;
9959
Michael Chanb16250e2006-09-27 16:10:14 -07009960 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9961 u32 phytest;
9962
9963 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9964 u32 phy;
9965
9966 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9967 phytest | MII_TG3_EPHY_SHADOW_EN);
9968 if (!tg3_readphy(tp, 0x1b, &phy))
9969 tg3_writephy(tp, 0x1b, phy & ~0x20);
Michael Chanb16250e2006-09-27 16:10:14 -07009970 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9971 }
Michael Chan5d64ad32006-12-07 00:19:40 -08009972 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9973 } else
9974 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
Michael Chan3f7045c2006-09-27 16:02:29 -07009975
Matt Carlson9ef8ca92007-07-11 19:48:29 -07009976 tg3_phy_toggle_automdix(tp, 0);
9977
Michael Chan3f7045c2006-09-27 16:02:29 -07009978 tg3_writephy(tp, MII_BMCR, val);
Michael Chanc94e3942005-09-27 12:12:42 -07009979 udelay(40);
Michael Chan5d64ad32006-12-07 00:19:40 -08009980
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07009981 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
Michael Chan5d64ad32006-12-07 00:19:40 -08009982 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chanb16250e2006-09-27 16:10:14 -07009983 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
Michael Chan5d64ad32006-12-07 00:19:40 -08009984 mac_mode |= MAC_MODE_PORT_MODE_MII;
9985 } else
9986 mac_mode |= MAC_MODE_PORT_MODE_GMII;
Michael Chanb16250e2006-09-27 16:10:14 -07009987
Michael Chanc94e3942005-09-27 12:12:42 -07009988 /* reset to prevent losing 1st rx packet intermittently */
9989 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9990 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9991 udelay(10);
9992 tw32_f(MAC_RX_MODE, tp->rx_mode);
9993 }
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07009994 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9995 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9996 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9997 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9998 mac_mode |= MAC_MODE_LINK_POLARITY;
Michael Chanff18ff02006-03-27 23:17:27 -08009999 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10000 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10001 }
Michael Chan9f40dea2005-09-05 17:53:06 -070010002 tw32(MAC_MODE, mac_mode);
Michael Chan9f40dea2005-09-05 17:53:06 -070010003 }
10004 else
10005 return -EINVAL;
Michael Chanc76949a2005-05-29 14:58:59 -070010006
10007 err = -EIO;
10008
Michael Chanc76949a2005-05-29 14:58:59 -070010009 tx_len = 1514;
David S. Millera20e9c62006-07-31 22:38:16 -070010010 skb = netdev_alloc_skb(tp->dev, tx_len);
Jesper Juhla50bb7b2006-05-09 23:14:35 -070010011 if (!skb)
10012 return -ENOMEM;
10013
Michael Chanc76949a2005-05-29 14:58:59 -070010014 tx_data = skb_put(skb, tx_len);
10015 memcpy(tx_data, tp->dev->dev_addr, 6);
10016 memset(tx_data + 6, 0x0, 8);
10017
10018 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10019
10020 for (i = 14; i < tx_len; i++)
10021 tx_data[i] = (u8) (i & 0xff);
10022
10023 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10024
10025 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10026 HOSTCC_MODE_NOW);
10027
10028 udelay(10);
10029
10030 rx_start_idx = tp->hw_status->idx[0].rx_producer;
10031
Michael Chanc76949a2005-05-29 14:58:59 -070010032 num_pkts = 0;
10033
Michael Chan9f40dea2005-09-05 17:53:06 -070010034 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
Michael Chanc76949a2005-05-29 14:58:59 -070010035
Michael Chan9f40dea2005-09-05 17:53:06 -070010036 tp->tx_prod++;
Michael Chanc76949a2005-05-29 14:58:59 -070010037 num_pkts++;
10038
Michael Chan9f40dea2005-09-05 17:53:06 -070010039 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10040 tp->tx_prod);
Michael Chan09ee9292005-08-09 20:17:00 -070010041 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
Michael Chanc76949a2005-05-29 14:58:59 -070010042
10043 udelay(10);
10044
Michael Chan3f7045c2006-09-27 16:02:29 -070010045 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
10046 for (i = 0; i < 25; i++) {
Michael Chanc76949a2005-05-29 14:58:59 -070010047 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10048 HOSTCC_MODE_NOW);
10049
10050 udelay(10);
10051
10052 tx_idx = tp->hw_status->idx[0].tx_consumer;
10053 rx_idx = tp->hw_status->idx[0].rx_producer;
Michael Chan9f40dea2005-09-05 17:53:06 -070010054 if ((tx_idx == tp->tx_prod) &&
Michael Chanc76949a2005-05-29 14:58:59 -070010055 (rx_idx == (rx_start_idx + num_pkts)))
10056 break;
10057 }
10058
10059 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10060 dev_kfree_skb(skb);
10061
Michael Chan9f40dea2005-09-05 17:53:06 -070010062 if (tx_idx != tp->tx_prod)
Michael Chanc76949a2005-05-29 14:58:59 -070010063 goto out;
10064
10065 if (rx_idx != rx_start_idx + num_pkts)
10066 goto out;
10067
10068 desc = &tp->rx_rcb[rx_start_idx];
10069 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10070 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10071 if (opaque_key != RXD_OPAQUE_RING_STD)
10072 goto out;
10073
10074 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10075 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10076 goto out;
10077
10078 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10079 if (rx_len != tx_len)
10080 goto out;
10081
10082 rx_skb = tp->rx_std_buffers[desc_idx].skb;
10083
10084 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10085 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10086
10087 for (i = 14; i < tx_len; i++) {
10088 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10089 goto out;
10090 }
10091 err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010092
Michael Chanc76949a2005-05-29 14:58:59 -070010093 /* tg3_free_rings will unmap and free the rx_skb */
10094out:
10095 return err;
10096}
10097
Michael Chan9f40dea2005-09-05 17:53:06 -070010098#define TG3_MAC_LOOPBACK_FAILED 1
10099#define TG3_PHY_LOOPBACK_FAILED 2
10100#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10101 TG3_PHY_LOOPBACK_FAILED)
10102
10103static int tg3_test_loopback(struct tg3 *tp)
10104{
10105 int err = 0;
Matt Carlson9936bcf2007-10-10 18:03:07 -070010106 u32 cpmuctrl = 0;
Michael Chan9f40dea2005-09-05 17:53:06 -070010107
10108 if (!netif_running(tp->dev))
10109 return TG3_LOOPBACK_FAILED;
10110
Michael Chanb9ec6c12006-07-25 16:37:27 -070010111 err = tg3_reset_hw(tp, 1);
10112 if (err)
10113 return TG3_LOOPBACK_FAILED;
Michael Chan9f40dea2005-09-05 17:53:06 -070010114
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010115 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010116 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10117 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Matt Carlson9936bcf2007-10-10 18:03:07 -070010118 int i;
10119 u32 status;
10120
10121 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10122
10123 /* Wait for up to 40 microseconds to acquire lock. */
10124 for (i = 0; i < 4; i++) {
10125 status = tr32(TG3_CPMU_MUTEX_GNT);
10126 if (status == CPMU_MUTEX_GNT_DRIVER)
10127 break;
10128 udelay(10);
10129 }
10130
10131 if (status != CPMU_MUTEX_GNT_DRIVER)
10132 return TG3_LOOPBACK_FAILED;
10133
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010134 /* Turn off link-based power management. */
Matt Carlsone8750932007-11-12 21:11:51 -080010135 cpmuctrl = tr32(TG3_CPMU_CTRL);
Matt Carlson109115e2008-05-02 16:48:59 -070010136 tw32(TG3_CPMU_CTRL,
10137 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10138 CPMU_CTRL_LINK_AWARE_MODE));
Matt Carlson9936bcf2007-10-10 18:03:07 -070010139 }
10140
Michael Chan9f40dea2005-09-05 17:53:06 -070010141 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10142 err |= TG3_MAC_LOOPBACK_FAILED;
Matt Carlson9936bcf2007-10-10 18:03:07 -070010143
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010144 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010145 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10146 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Matt Carlson9936bcf2007-10-10 18:03:07 -070010147 tw32(TG3_CPMU_CTRL, cpmuctrl);
10148
10149 /* Release the mutex */
10150 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10151 }
10152
Matt Carlsondd477002008-05-25 23:45:58 -070010153 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10154 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
Michael Chan9f40dea2005-09-05 17:53:06 -070010155 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10156 err |= TG3_PHY_LOOPBACK_FAILED;
10157 }
10158
10159 return err;
10160}
10161
Michael Chan4cafd3f2005-05-29 14:56:34 -070010162static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10163 u64 *data)
10164{
Michael Chan566f86a2005-05-29 14:56:58 -070010165 struct tg3 *tp = netdev_priv(dev);
10166
Michael Chanbc1c7562006-03-20 17:48:03 -080010167 if (tp->link_config.phy_is_low_power)
10168 tg3_set_power_state(tp, PCI_D0);
10169
Michael Chan566f86a2005-05-29 14:56:58 -070010170 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10171
10172 if (tg3_test_nvram(tp) != 0) {
10173 etest->flags |= ETH_TEST_FL_FAILED;
10174 data[0] = 1;
10175 }
Michael Chanca430072005-05-29 14:57:23 -070010176 if (tg3_test_link(tp) != 0) {
10177 etest->flags |= ETH_TEST_FL_FAILED;
10178 data[1] = 1;
10179 }
Michael Chana71116d2005-05-29 14:58:11 -070010180 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010181 int err, err2 = 0, irq_sync = 0;
Michael Chana71116d2005-05-29 14:58:11 -070010182
Michael Chanbbe832c2005-06-24 20:20:04 -070010183 if (netif_running(dev)) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010184 tg3_phy_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -070010185 tg3_netif_stop(tp);
10186 irq_sync = 1;
10187 }
10188
10189 tg3_full_lock(tp, irq_sync);
Michael Chana71116d2005-05-29 14:58:11 -070010190
10191 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
Michael Chanec41c7d2006-01-17 02:40:55 -080010192 err = tg3_nvram_lock(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010193 tg3_halt_cpu(tp, RX_CPU_BASE);
10194 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10195 tg3_halt_cpu(tp, TX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -080010196 if (!err)
10197 tg3_nvram_unlock(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010198
Michael Chand9ab5ad2006-03-20 22:27:35 -080010199 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10200 tg3_phy_reset(tp);
10201
Michael Chana71116d2005-05-29 14:58:11 -070010202 if (tg3_test_registers(tp) != 0) {
10203 etest->flags |= ETH_TEST_FL_FAILED;
10204 data[2] = 1;
10205 }
Michael Chan7942e1d2005-05-29 14:58:36 -070010206 if (tg3_test_memory(tp) != 0) {
10207 etest->flags |= ETH_TEST_FL_FAILED;
10208 data[3] = 1;
10209 }
Michael Chan9f40dea2005-09-05 17:53:06 -070010210 if ((data[4] = tg3_test_loopback(tp)) != 0)
Michael Chanc76949a2005-05-29 14:58:59 -070010211 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chana71116d2005-05-29 14:58:11 -070010212
David S. Millerf47c11e2005-06-24 20:18:35 -070010213 tg3_full_unlock(tp);
10214
Michael Chand4bc3922005-05-29 14:59:20 -070010215 if (tg3_test_interrupt(tp) != 0) {
10216 etest->flags |= ETH_TEST_FL_FAILED;
10217 data[5] = 1;
10218 }
David S. Millerf47c11e2005-06-24 20:18:35 -070010219
10220 tg3_full_lock(tp, 0);
Michael Chand4bc3922005-05-29 14:59:20 -070010221
Michael Chana71116d2005-05-29 14:58:11 -070010222 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10223 if (netif_running(dev)) {
10224 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010225 err2 = tg3_restart_hw(tp, 1);
10226 if (!err2)
Michael Chanb9ec6c12006-07-25 16:37:27 -070010227 tg3_netif_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010228 }
David S. Millerf47c11e2005-06-24 20:18:35 -070010229
10230 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010231
10232 if (irq_sync && !err2)
10233 tg3_phy_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -070010234 }
Michael Chanbc1c7562006-03-20 17:48:03 -080010235 if (tp->link_config.phy_is_low_power)
10236 tg3_set_power_state(tp, PCI_D3hot);
10237
Michael Chan4cafd3f2005-05-29 14:56:34 -070010238}
10239
Linus Torvalds1da177e2005-04-16 15:20:36 -070010240static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10241{
10242 struct mii_ioctl_data *data = if_mii(ifr);
10243 struct tg3 *tp = netdev_priv(dev);
10244 int err;
10245
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010246 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10247 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10248 return -EAGAIN;
Lennert Buytenhek298cf9be2008-10-08 16:29:57 -070010249 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070010250 }
10251
Linus Torvalds1da177e2005-04-16 15:20:36 -070010252 switch(cmd) {
10253 case SIOCGMIIPHY:
10254 data->phy_id = PHY_ADDR;
10255
10256 /* fallthru */
10257 case SIOCGMIIREG: {
10258 u32 mii_regval;
10259
10260 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10261 break; /* We have no PHY */
10262
Michael Chanbc1c7562006-03-20 17:48:03 -080010263 if (tp->link_config.phy_is_low_power)
10264 return -EAGAIN;
10265
David S. Millerf47c11e2005-06-24 20:18:35 -070010266 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010267 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
David S. Millerf47c11e2005-06-24 20:18:35 -070010268 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010269
10270 data->val_out = mii_regval;
10271
10272 return err;
10273 }
10274
10275 case SIOCSMIIREG:
10276 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10277 break; /* We have no PHY */
10278
10279 if (!capable(CAP_NET_ADMIN))
10280 return -EPERM;
10281
Michael Chanbc1c7562006-03-20 17:48:03 -080010282 if (tp->link_config.phy_is_low_power)
10283 return -EAGAIN;
10284
David S. Millerf47c11e2005-06-24 20:18:35 -070010285 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010286 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
David S. Millerf47c11e2005-06-24 20:18:35 -070010287 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010288
10289 return err;
10290
10291 default:
10292 /* do nothing */
10293 break;
10294 }
10295 return -EOPNOTSUPP;
10296}
10297
10298#if TG3_VLAN_TAG_USED
10299static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10300{
10301 struct tg3 *tp = netdev_priv(dev);
10302
Michael Chan29315e82006-06-29 20:12:30 -070010303 if (netif_running(dev))
10304 tg3_netif_stop(tp);
10305
David S. Millerf47c11e2005-06-24 20:18:35 -070010306 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010307
10308 tp->vlgrp = grp;
10309
10310 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10311 __tg3_set_rx_mode(dev);
10312
Michael Chan29315e82006-06-29 20:12:30 -070010313 if (netif_running(dev))
10314 tg3_netif_start(tp);
Michael Chan46966542007-07-11 19:47:19 -070010315
10316 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010317}
Linus Torvalds1da177e2005-04-16 15:20:36 -070010318#endif
10319
David S. Miller15f98502005-05-18 22:49:26 -070010320static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10321{
10322 struct tg3 *tp = netdev_priv(dev);
10323
10324 memcpy(ec, &tp->coal, sizeof(*ec));
10325 return 0;
10326}
10327
Michael Chand244c892005-07-05 14:42:33 -070010328static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10329{
10330 struct tg3 *tp = netdev_priv(dev);
10331 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10332 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10333
10334 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10335 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10336 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10337 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10338 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10339 }
10340
10341 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10342 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10343 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10344 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10345 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10346 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10347 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10348 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10349 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10350 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10351 return -EINVAL;
10352
10353 /* No rx interrupts will be generated if both are zero */
10354 if ((ec->rx_coalesce_usecs == 0) &&
10355 (ec->rx_max_coalesced_frames == 0))
10356 return -EINVAL;
10357
10358 /* No tx interrupts will be generated if both are zero */
10359 if ((ec->tx_coalesce_usecs == 0) &&
10360 (ec->tx_max_coalesced_frames == 0))
10361 return -EINVAL;
10362
10363 /* Only copy relevant parameters, ignore all others. */
10364 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10365 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10366 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10367 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10368 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10369 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10370 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10371 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10372 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10373
10374 if (netif_running(dev)) {
10375 tg3_full_lock(tp, 0);
10376 __tg3_set_coalesce(tp, &tp->coal);
10377 tg3_full_unlock(tp);
10378 }
10379 return 0;
10380}
10381
Jeff Garzik7282d492006-09-13 14:30:00 -040010382static const struct ethtool_ops tg3_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010383 .get_settings = tg3_get_settings,
10384 .set_settings = tg3_set_settings,
10385 .get_drvinfo = tg3_get_drvinfo,
10386 .get_regs_len = tg3_get_regs_len,
10387 .get_regs = tg3_get_regs,
10388 .get_wol = tg3_get_wol,
10389 .set_wol = tg3_set_wol,
10390 .get_msglevel = tg3_get_msglevel,
10391 .set_msglevel = tg3_set_msglevel,
10392 .nway_reset = tg3_nway_reset,
10393 .get_link = ethtool_op_get_link,
10394 .get_eeprom_len = tg3_get_eeprom_len,
10395 .get_eeprom = tg3_get_eeprom,
10396 .set_eeprom = tg3_set_eeprom,
10397 .get_ringparam = tg3_get_ringparam,
10398 .set_ringparam = tg3_set_ringparam,
10399 .get_pauseparam = tg3_get_pauseparam,
10400 .set_pauseparam = tg3_set_pauseparam,
10401 .get_rx_csum = tg3_get_rx_csum,
10402 .set_rx_csum = tg3_set_rx_csum,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010403 .set_tx_csum = tg3_set_tx_csum,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010404 .set_sg = ethtool_op_set_sg,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010405 .set_tso = tg3_set_tso,
Michael Chan4cafd3f2005-05-29 14:56:34 -070010406 .self_test = tg3_self_test,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010407 .get_strings = tg3_get_strings,
Michael Chan4009a932005-09-05 17:52:54 -070010408 .phys_id = tg3_phys_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010409 .get_ethtool_stats = tg3_get_ethtool_stats,
David S. Miller15f98502005-05-18 22:49:26 -070010410 .get_coalesce = tg3_get_coalesce,
Michael Chand244c892005-07-05 14:42:33 -070010411 .set_coalesce = tg3_set_coalesce,
Jeff Garzikb9f2c042007-10-03 18:07:32 -070010412 .get_sset_count = tg3_get_sset_count,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010413};
10414
10415static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10416{
Michael Chan1b277772006-03-20 22:27:48 -080010417 u32 cursize, val, magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010418
10419 tp->nvram_size = EEPROM_CHIP_SIZE;
10420
Michael Chan18201802006-03-20 22:29:15 -080010421 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010422 return;
10423
Michael Chanb16250e2006-09-27 16:10:14 -070010424 if ((magic != TG3_EEPROM_MAGIC) &&
10425 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10426 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
Linus Torvalds1da177e2005-04-16 15:20:36 -070010427 return;
10428
10429 /*
10430 * Size the chip by reading offsets at increasing powers of two.
10431 * When we encounter our validation signature, we know the addressing
10432 * has wrapped around, and thus have our chip size.
10433 */
Michael Chan1b277772006-03-20 22:27:48 -080010434 cursize = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010435
10436 while (cursize < tp->nvram_size) {
Michael Chan18201802006-03-20 22:29:15 -080010437 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010438 return;
10439
Michael Chan18201802006-03-20 22:29:15 -080010440 if (val == magic)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010441 break;
10442
10443 cursize <<= 1;
10444 }
10445
10446 tp->nvram_size = cursize;
10447}
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010448
Linus Torvalds1da177e2005-04-16 15:20:36 -070010449static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10450{
10451 u32 val;
10452
Michael Chan18201802006-03-20 22:29:15 -080010453 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
Michael Chan1b277772006-03-20 22:27:48 -080010454 return;
10455
10456 /* Selfboot format */
Michael Chan18201802006-03-20 22:29:15 -080010457 if (val != TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -080010458 tg3_get_eeprom_size(tp);
10459 return;
10460 }
10461
Linus Torvalds1da177e2005-04-16 15:20:36 -070010462 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10463 if (val != 0) {
10464 tp->nvram_size = (val >> 16) * 1024;
10465 return;
10466 }
10467 }
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010468 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010469}
10470
10471static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10472{
10473 u32 nvcfg1;
10474
10475 nvcfg1 = tr32(NVRAM_CFG1);
10476 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10477 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10478 }
10479 else {
10480 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10481 tw32(NVRAM_CFG1, nvcfg1);
10482 }
10483
Michael Chan4c987482005-09-05 17:52:38 -070010484 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
Michael Chana4e2b342005-10-26 15:46:52 -070010485 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010486 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10487 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10488 tp->nvram_jedecnum = JEDEC_ATMEL;
10489 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10490 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10491 break;
10492 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10493 tp->nvram_jedecnum = JEDEC_ATMEL;
10494 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10495 break;
10496 case FLASH_VENDOR_ATMEL_EEPROM:
10497 tp->nvram_jedecnum = JEDEC_ATMEL;
10498 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10499 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10500 break;
10501 case FLASH_VENDOR_ST:
10502 tp->nvram_jedecnum = JEDEC_ST;
10503 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10504 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10505 break;
10506 case FLASH_VENDOR_SAIFUN:
10507 tp->nvram_jedecnum = JEDEC_SAIFUN;
10508 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10509 break;
10510 case FLASH_VENDOR_SST_SMALL:
10511 case FLASH_VENDOR_SST_LARGE:
10512 tp->nvram_jedecnum = JEDEC_SST;
10513 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10514 break;
10515 }
10516 }
10517 else {
10518 tp->nvram_jedecnum = JEDEC_ATMEL;
10519 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10520 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10521 }
10522}
10523
Michael Chan361b4ac2005-04-21 17:11:21 -070010524static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10525{
10526 u32 nvcfg1;
10527
10528 nvcfg1 = tr32(NVRAM_CFG1);
10529
Michael Chane6af3012005-04-21 17:12:05 -070010530 /* NVRAM protection for TPM */
10531 if (nvcfg1 & (1 << 27))
10532 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10533
Michael Chan361b4ac2005-04-21 17:11:21 -070010534 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10535 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10536 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10537 tp->nvram_jedecnum = JEDEC_ATMEL;
10538 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10539 break;
10540 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10541 tp->nvram_jedecnum = JEDEC_ATMEL;
10542 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10543 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10544 break;
10545 case FLASH_5752VENDOR_ST_M45PE10:
10546 case FLASH_5752VENDOR_ST_M45PE20:
10547 case FLASH_5752VENDOR_ST_M45PE40:
10548 tp->nvram_jedecnum = JEDEC_ST;
10549 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10550 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10551 break;
10552 }
10553
10554 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10555 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10556 case FLASH_5752PAGE_SIZE_256:
10557 tp->nvram_pagesize = 256;
10558 break;
10559 case FLASH_5752PAGE_SIZE_512:
10560 tp->nvram_pagesize = 512;
10561 break;
10562 case FLASH_5752PAGE_SIZE_1K:
10563 tp->nvram_pagesize = 1024;
10564 break;
10565 case FLASH_5752PAGE_SIZE_2K:
10566 tp->nvram_pagesize = 2048;
10567 break;
10568 case FLASH_5752PAGE_SIZE_4K:
10569 tp->nvram_pagesize = 4096;
10570 break;
10571 case FLASH_5752PAGE_SIZE_264:
10572 tp->nvram_pagesize = 264;
10573 break;
10574 }
10575 }
10576 else {
10577 /* For eeprom, set pagesize to maximum eeprom size */
10578 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10579
10580 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10581 tw32(NVRAM_CFG1, nvcfg1);
10582 }
10583}
10584
Michael Chand3c7b882006-03-23 01:28:25 -080010585static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10586{
Matt Carlson989a9d22007-05-05 11:51:05 -070010587 u32 nvcfg1, protect = 0;
Michael Chand3c7b882006-03-23 01:28:25 -080010588
10589 nvcfg1 = tr32(NVRAM_CFG1);
10590
10591 /* NVRAM protection for TPM */
Matt Carlson989a9d22007-05-05 11:51:05 -070010592 if (nvcfg1 & (1 << 27)) {
Michael Chand3c7b882006-03-23 01:28:25 -080010593 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
Matt Carlson989a9d22007-05-05 11:51:05 -070010594 protect = 1;
10595 }
Michael Chand3c7b882006-03-23 01:28:25 -080010596
Matt Carlson989a9d22007-05-05 11:51:05 -070010597 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10598 switch (nvcfg1) {
Michael Chand3c7b882006-03-23 01:28:25 -080010599 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10600 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10601 case FLASH_5755VENDOR_ATMEL_FLASH_3:
Matt Carlson70b65a22007-07-11 19:48:50 -070010602 case FLASH_5755VENDOR_ATMEL_FLASH_5:
Michael Chand3c7b882006-03-23 01:28:25 -080010603 tp->nvram_jedecnum = JEDEC_ATMEL;
10604 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10605 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10606 tp->nvram_pagesize = 264;
Matt Carlson70b65a22007-07-11 19:48:50 -070010607 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10608 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010609 tp->nvram_size = (protect ? 0x3e200 :
10610 TG3_NVRAM_SIZE_512KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010611 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010612 tp->nvram_size = (protect ? 0x1f200 :
10613 TG3_NVRAM_SIZE_256KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010614 else
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010615 tp->nvram_size = (protect ? 0x1f200 :
10616 TG3_NVRAM_SIZE_128KB);
Michael Chand3c7b882006-03-23 01:28:25 -080010617 break;
10618 case FLASH_5752VENDOR_ST_M45PE10:
10619 case FLASH_5752VENDOR_ST_M45PE20:
10620 case FLASH_5752VENDOR_ST_M45PE40:
10621 tp->nvram_jedecnum = JEDEC_ST;
10622 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10623 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10624 tp->nvram_pagesize = 256;
Matt Carlson989a9d22007-05-05 11:51:05 -070010625 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010626 tp->nvram_size = (protect ?
10627 TG3_NVRAM_SIZE_64KB :
10628 TG3_NVRAM_SIZE_128KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010629 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010630 tp->nvram_size = (protect ?
10631 TG3_NVRAM_SIZE_64KB :
10632 TG3_NVRAM_SIZE_256KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010633 else
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010634 tp->nvram_size = (protect ?
10635 TG3_NVRAM_SIZE_128KB :
10636 TG3_NVRAM_SIZE_512KB);
Michael Chand3c7b882006-03-23 01:28:25 -080010637 break;
10638 }
10639}
10640
Michael Chan1b277772006-03-20 22:27:48 -080010641static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10642{
10643 u32 nvcfg1;
10644
10645 nvcfg1 = tr32(NVRAM_CFG1);
10646
10647 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10648 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10649 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10650 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10651 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10652 tp->nvram_jedecnum = JEDEC_ATMEL;
10653 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10654 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10655
10656 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10657 tw32(NVRAM_CFG1, nvcfg1);
10658 break;
10659 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10660 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10661 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10662 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10663 tp->nvram_jedecnum = JEDEC_ATMEL;
10664 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10665 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10666 tp->nvram_pagesize = 264;
10667 break;
10668 case FLASH_5752VENDOR_ST_M45PE10:
10669 case FLASH_5752VENDOR_ST_M45PE20:
10670 case FLASH_5752VENDOR_ST_M45PE40:
10671 tp->nvram_jedecnum = JEDEC_ST;
10672 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10673 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10674 tp->nvram_pagesize = 256;
10675 break;
10676 }
10677}
10678
Matt Carlson6b91fa02007-10-10 18:01:09 -070010679static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10680{
10681 u32 nvcfg1, protect = 0;
10682
10683 nvcfg1 = tr32(NVRAM_CFG1);
10684
10685 /* NVRAM protection for TPM */
10686 if (nvcfg1 & (1 << 27)) {
10687 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10688 protect = 1;
10689 }
10690
10691 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10692 switch (nvcfg1) {
10693 case FLASH_5761VENDOR_ATMEL_ADB021D:
10694 case FLASH_5761VENDOR_ATMEL_ADB041D:
10695 case FLASH_5761VENDOR_ATMEL_ADB081D:
10696 case FLASH_5761VENDOR_ATMEL_ADB161D:
10697 case FLASH_5761VENDOR_ATMEL_MDB021D:
10698 case FLASH_5761VENDOR_ATMEL_MDB041D:
10699 case FLASH_5761VENDOR_ATMEL_MDB081D:
10700 case FLASH_5761VENDOR_ATMEL_MDB161D:
10701 tp->nvram_jedecnum = JEDEC_ATMEL;
10702 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10703 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10704 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10705 tp->nvram_pagesize = 256;
10706 break;
10707 case FLASH_5761VENDOR_ST_A_M45PE20:
10708 case FLASH_5761VENDOR_ST_A_M45PE40:
10709 case FLASH_5761VENDOR_ST_A_M45PE80:
10710 case FLASH_5761VENDOR_ST_A_M45PE16:
10711 case FLASH_5761VENDOR_ST_M_M45PE20:
10712 case FLASH_5761VENDOR_ST_M_M45PE40:
10713 case FLASH_5761VENDOR_ST_M_M45PE80:
10714 case FLASH_5761VENDOR_ST_M_M45PE16:
10715 tp->nvram_jedecnum = JEDEC_ST;
10716 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10717 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10718 tp->nvram_pagesize = 256;
10719 break;
10720 }
10721
10722 if (protect) {
10723 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10724 } else {
10725 switch (nvcfg1) {
10726 case FLASH_5761VENDOR_ATMEL_ADB161D:
10727 case FLASH_5761VENDOR_ATMEL_MDB161D:
10728 case FLASH_5761VENDOR_ST_A_M45PE16:
10729 case FLASH_5761VENDOR_ST_M_M45PE16:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010730 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010731 break;
10732 case FLASH_5761VENDOR_ATMEL_ADB081D:
10733 case FLASH_5761VENDOR_ATMEL_MDB081D:
10734 case FLASH_5761VENDOR_ST_A_M45PE80:
10735 case FLASH_5761VENDOR_ST_M_M45PE80:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010736 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010737 break;
10738 case FLASH_5761VENDOR_ATMEL_ADB041D:
10739 case FLASH_5761VENDOR_ATMEL_MDB041D:
10740 case FLASH_5761VENDOR_ST_A_M45PE40:
10741 case FLASH_5761VENDOR_ST_M_M45PE40:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010742 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010743 break;
10744 case FLASH_5761VENDOR_ATMEL_ADB021D:
10745 case FLASH_5761VENDOR_ATMEL_MDB021D:
10746 case FLASH_5761VENDOR_ST_A_M45PE20:
10747 case FLASH_5761VENDOR_ST_M_M45PE20:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010748 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010749 break;
10750 }
10751 }
10752}
10753
Michael Chanb5d37722006-09-27 16:06:21 -070010754static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10755{
10756 tp->nvram_jedecnum = JEDEC_ATMEL;
10757 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10758 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10759}
10760
Linus Torvalds1da177e2005-04-16 15:20:36 -070010761/* Chips other than 5700/5701 use the NVRAM for fetching info. */
10762static void __devinit tg3_nvram_init(struct tg3 *tp)
10763{
Linus Torvalds1da177e2005-04-16 15:20:36 -070010764 tw32_f(GRC_EEPROM_ADDR,
10765 (EEPROM_ADDR_FSM_RESET |
10766 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10767 EEPROM_ADDR_CLKPERD_SHIFT)));
10768
Michael Chan9d57f012006-12-07 00:23:25 -080010769 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010770
10771 /* Enable seeprom accesses. */
10772 tw32_f(GRC_LOCAL_CTRL,
10773 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10774 udelay(100);
10775
10776 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10777 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10778 tp->tg3_flags |= TG3_FLAG_NVRAM;
10779
Michael Chanec41c7d2006-01-17 02:40:55 -080010780 if (tg3_nvram_lock(tp)) {
10781 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10782 "tg3_nvram_init failed.\n", tp->dev->name);
10783 return;
10784 }
Michael Chane6af3012005-04-21 17:12:05 -070010785 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010786
Matt Carlson989a9d22007-05-05 11:51:05 -070010787 tp->nvram_size = 0;
10788
Michael Chan361b4ac2005-04-21 17:11:21 -070010789 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10790 tg3_get_5752_nvram_info(tp);
Michael Chand3c7b882006-03-23 01:28:25 -080010791 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10792 tg3_get_5755_nvram_info(tp);
Matt Carlsond30cdd22007-10-07 23:28:35 -070010793 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson57e69832008-05-25 23:48:31 -070010794 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10795 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Michael Chan1b277772006-03-20 22:27:48 -080010796 tg3_get_5787_nvram_info(tp);
Matt Carlson6b91fa02007-10-10 18:01:09 -070010797 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10798 tg3_get_5761_nvram_info(tp);
Michael Chanb5d37722006-09-27 16:06:21 -070010799 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10800 tg3_get_5906_nvram_info(tp);
Michael Chan361b4ac2005-04-21 17:11:21 -070010801 else
10802 tg3_get_nvram_info(tp);
10803
Matt Carlson989a9d22007-05-05 11:51:05 -070010804 if (tp->nvram_size == 0)
10805 tg3_get_nvram_size(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010806
Michael Chane6af3012005-04-21 17:12:05 -070010807 tg3_disable_nvram_access(tp);
Michael Chan381291b2005-12-13 21:08:21 -080010808 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010809
10810 } else {
10811 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10812
10813 tg3_get_eeprom_size(tp);
10814 }
10815}
10816
10817static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10818 u32 offset, u32 *val)
10819{
10820 u32 tmp;
10821 int i;
10822
10823 if (offset > EEPROM_ADDR_ADDR_MASK ||
10824 (offset % 4) != 0)
10825 return -EINVAL;
10826
10827 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10828 EEPROM_ADDR_DEVID_MASK |
10829 EEPROM_ADDR_READ);
10830 tw32(GRC_EEPROM_ADDR,
10831 tmp |
10832 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10833 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10834 EEPROM_ADDR_ADDR_MASK) |
10835 EEPROM_ADDR_READ | EEPROM_ADDR_START);
10836
Michael Chan9d57f012006-12-07 00:23:25 -080010837 for (i = 0; i < 1000; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010838 tmp = tr32(GRC_EEPROM_ADDR);
10839
10840 if (tmp & EEPROM_ADDR_COMPLETE)
10841 break;
Michael Chan9d57f012006-12-07 00:23:25 -080010842 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010843 }
10844 if (!(tmp & EEPROM_ADDR_COMPLETE))
10845 return -EBUSY;
10846
10847 *val = tr32(GRC_EEPROM_DATA);
10848 return 0;
10849}
10850
10851#define NVRAM_CMD_TIMEOUT 10000
10852
10853static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10854{
10855 int i;
10856
10857 tw32(NVRAM_CMD, nvram_cmd);
10858 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10859 udelay(10);
10860 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10861 udelay(10);
10862 break;
10863 }
10864 }
10865 if (i == NVRAM_CMD_TIMEOUT) {
10866 return -EBUSY;
10867 }
10868 return 0;
10869}
10870
Michael Chan18201802006-03-20 22:29:15 -080010871static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10872{
10873 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10874 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10875 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
Matt Carlson6b91fa02007-10-10 18:01:09 -070010876 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
Michael Chan18201802006-03-20 22:29:15 -080010877 (tp->nvram_jedecnum == JEDEC_ATMEL))
10878
10879 addr = ((addr / tp->nvram_pagesize) <<
10880 ATMEL_AT45DB0X1B_PAGE_POS) +
10881 (addr % tp->nvram_pagesize);
10882
10883 return addr;
10884}
10885
Michael Chanc4e65752006-03-20 22:29:32 -080010886static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10887{
10888 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10889 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10890 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
Matt Carlson6b91fa02007-10-10 18:01:09 -070010891 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
Michael Chanc4e65752006-03-20 22:29:32 -080010892 (tp->nvram_jedecnum == JEDEC_ATMEL))
10893
10894 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10895 tp->nvram_pagesize) +
10896 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10897
10898 return addr;
10899}
10900
Linus Torvalds1da177e2005-04-16 15:20:36 -070010901static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10902{
10903 int ret;
10904
Linus Torvalds1da177e2005-04-16 15:20:36 -070010905 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10906 return tg3_nvram_read_using_eeprom(tp, offset, val);
10907
Michael Chan18201802006-03-20 22:29:15 -080010908 offset = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010909
10910 if (offset > NVRAM_ADDR_MSK)
10911 return -EINVAL;
10912
Michael Chanec41c7d2006-01-17 02:40:55 -080010913 ret = tg3_nvram_lock(tp);
10914 if (ret)
10915 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010916
Michael Chane6af3012005-04-21 17:12:05 -070010917 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010918
10919 tw32(NVRAM_ADDR, offset);
10920 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10921 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10922
10923 if (ret == 0)
10924 *val = swab32(tr32(NVRAM_RDDATA));
10925
Michael Chane6af3012005-04-21 17:12:05 -070010926 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010927
Michael Chan381291b2005-12-13 21:08:21 -080010928 tg3_nvram_unlock(tp);
10929
Linus Torvalds1da177e2005-04-16 15:20:36 -070010930 return ret;
10931}
10932
Al Virob9fc7dc2007-12-17 22:59:57 -080010933static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10934{
10935 u32 v;
10936 int res = tg3_nvram_read(tp, offset, &v);
10937 if (!res)
10938 *val = cpu_to_le32(v);
10939 return res;
10940}
10941
Michael Chan18201802006-03-20 22:29:15 -080010942static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10943{
10944 int err;
10945 u32 tmp;
10946
10947 err = tg3_nvram_read(tp, offset, &tmp);
10948 *val = swab32(tmp);
10949 return err;
10950}
10951
Linus Torvalds1da177e2005-04-16 15:20:36 -070010952static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10953 u32 offset, u32 len, u8 *buf)
10954{
10955 int i, j, rc = 0;
10956 u32 val;
10957
10958 for (i = 0; i < len; i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080010959 u32 addr;
10960 __le32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010961
10962 addr = offset + i;
10963
10964 memcpy(&data, buf + i, 4);
10965
Al Virob9fc7dc2007-12-17 22:59:57 -080010966 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070010967
10968 val = tr32(GRC_EEPROM_ADDR);
10969 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10970
10971 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10972 EEPROM_ADDR_READ);
10973 tw32(GRC_EEPROM_ADDR, val |
10974 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10975 (addr & EEPROM_ADDR_ADDR_MASK) |
10976 EEPROM_ADDR_START |
10977 EEPROM_ADDR_WRITE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010978
Michael Chan9d57f012006-12-07 00:23:25 -080010979 for (j = 0; j < 1000; j++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010980 val = tr32(GRC_EEPROM_ADDR);
10981
10982 if (val & EEPROM_ADDR_COMPLETE)
10983 break;
Michael Chan9d57f012006-12-07 00:23:25 -080010984 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010985 }
10986 if (!(val & EEPROM_ADDR_COMPLETE)) {
10987 rc = -EBUSY;
10988 break;
10989 }
10990 }
10991
10992 return rc;
10993}
10994
10995/* offset and length are dword aligned */
10996static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10997 u8 *buf)
10998{
10999 int ret = 0;
11000 u32 pagesize = tp->nvram_pagesize;
11001 u32 pagemask = pagesize - 1;
11002 u32 nvram_cmd;
11003 u8 *tmp;
11004
11005 tmp = kmalloc(pagesize, GFP_KERNEL);
11006 if (tmp == NULL)
11007 return -ENOMEM;
11008
11009 while (len) {
11010 int j;
Michael Chane6af3012005-04-21 17:12:05 -070011011 u32 phy_addr, page_off, size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011012
11013 phy_addr = offset & ~pagemask;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011014
Linus Torvalds1da177e2005-04-16 15:20:36 -070011015 for (j = 0; j < pagesize; j += 4) {
Al Viro286e3102007-12-17 23:00:31 -080011016 if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
Al Virob9fc7dc2007-12-17 22:59:57 -080011017 (__le32 *) (tmp + j))))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011018 break;
11019 }
11020 if (ret)
11021 break;
11022
11023 page_off = offset & pagemask;
11024 size = pagesize;
11025 if (len < size)
11026 size = len;
11027
11028 len -= size;
11029
11030 memcpy(tmp + page_off, buf, size);
11031
11032 offset = offset + (pagesize - page_off);
11033
Michael Chane6af3012005-04-21 17:12:05 -070011034 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011035
11036 /*
11037 * Before we can erase the flash page, we need
11038 * to issue a special "write enable" command.
11039 */
11040 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11041
11042 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11043 break;
11044
11045 /* Erase the target page */
11046 tw32(NVRAM_ADDR, phy_addr);
11047
11048 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11049 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11050
11051 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11052 break;
11053
11054 /* Issue another write enable to start the write. */
11055 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11056
11057 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11058 break;
11059
11060 for (j = 0; j < pagesize; j += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011061 __be32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011062
Al Virob9fc7dc2007-12-17 22:59:57 -080011063 data = *((__be32 *) (tmp + j));
11064 /* swab32(le32_to_cpu(data)), actually */
11065 tw32(NVRAM_WRDATA, be32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011066
11067 tw32(NVRAM_ADDR, phy_addr + j);
11068
11069 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11070 NVRAM_CMD_WR;
11071
11072 if (j == 0)
11073 nvram_cmd |= NVRAM_CMD_FIRST;
11074 else if (j == (pagesize - 4))
11075 nvram_cmd |= NVRAM_CMD_LAST;
11076
11077 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11078 break;
11079 }
11080 if (ret)
11081 break;
11082 }
11083
11084 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11085 tg3_nvram_exec_cmd(tp, nvram_cmd);
11086
11087 kfree(tmp);
11088
11089 return ret;
11090}
11091
11092/* offset and length are dword aligned */
11093static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11094 u8 *buf)
11095{
11096 int i, ret = 0;
11097
11098 for (i = 0; i < len; i += 4, offset += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011099 u32 page_off, phy_addr, nvram_cmd;
11100 __be32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011101
11102 memcpy(&data, buf + i, 4);
Al Virob9fc7dc2007-12-17 22:59:57 -080011103 tw32(NVRAM_WRDATA, be32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070011104
11105 page_off = offset % tp->nvram_pagesize;
11106
Michael Chan18201802006-03-20 22:29:15 -080011107 phy_addr = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011108
11109 tw32(NVRAM_ADDR, phy_addr);
11110
11111 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11112
11113 if ((page_off == 0) || (i == 0))
11114 nvram_cmd |= NVRAM_CMD_FIRST;
Michael Chanf6d9a252006-04-29 19:00:24 -070011115 if (page_off == (tp->nvram_pagesize - 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011116 nvram_cmd |= NVRAM_CMD_LAST;
11117
11118 if (i == (len - 4))
11119 nvram_cmd |= NVRAM_CMD_LAST;
11120
Michael Chan4c987482005-09-05 17:52:38 -070011121 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
Michael Chanaf36e6b2006-03-23 01:28:06 -080011122 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
Michael Chan1b277772006-03-20 22:27:48 -080011123 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
Matt Carlsond30cdd22007-10-07 23:28:35 -070011124 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
Matt Carlson9936bcf2007-10-10 18:03:07 -070011125 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
Matt Carlson57e69832008-05-25 23:48:31 -070011126 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
Michael Chan4c987482005-09-05 17:52:38 -070011127 (tp->nvram_jedecnum == JEDEC_ST) &&
11128 (nvram_cmd & NVRAM_CMD_FIRST)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011129
11130 if ((ret = tg3_nvram_exec_cmd(tp,
11131 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11132 NVRAM_CMD_DONE)))
11133
11134 break;
11135 }
11136 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11137 /* We always do complete word writes to eeprom. */
11138 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11139 }
11140
11141 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11142 break;
11143 }
11144 return ret;
11145}
11146
11147/* offset and length are dword aligned */
11148static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11149{
11150 int ret;
11151
Linus Torvalds1da177e2005-04-16 15:20:36 -070011152 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -070011153 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11154 ~GRC_LCLCTRL_GPIO_OUTPUT1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011155 udelay(40);
11156 }
11157
11158 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11159 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11160 }
11161 else {
11162 u32 grc_mode;
11163
Michael Chanec41c7d2006-01-17 02:40:55 -080011164 ret = tg3_nvram_lock(tp);
11165 if (ret)
11166 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011167
Michael Chane6af3012005-04-21 17:12:05 -070011168 tg3_enable_nvram_access(tp);
11169 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11170 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011171 tw32(NVRAM_WRITE1, 0x406);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011172
11173 grc_mode = tr32(GRC_MODE);
11174 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11175
11176 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11177 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11178
11179 ret = tg3_nvram_write_block_buffered(tp, offset, len,
11180 buf);
11181 }
11182 else {
11183 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11184 buf);
11185 }
11186
11187 grc_mode = tr32(GRC_MODE);
11188 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11189
Michael Chane6af3012005-04-21 17:12:05 -070011190 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011191 tg3_nvram_unlock(tp);
11192 }
11193
11194 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -070011195 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011196 udelay(40);
11197 }
11198
11199 return ret;
11200}
11201
11202struct subsys_tbl_ent {
11203 u16 subsys_vendor, subsys_devid;
11204 u32 phy_id;
11205};
11206
11207static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11208 /* Broadcom boards. */
11209 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11210 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11211 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11212 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
11213 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11214 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11215 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
11216 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11217 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11218 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11219 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11220
11221 /* 3com boards. */
11222 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11223 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11224 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
11225 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11226 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11227
11228 /* DELL boards. */
11229 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11230 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11231 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11232 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11233
11234 /* Compaq boards. */
11235 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11236 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11237 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
11238 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11239 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11240
11241 /* IBM boards. */
11242 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11243};
11244
11245static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11246{
11247 int i;
11248
11249 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11250 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11251 tp->pdev->subsystem_vendor) &&
11252 (subsys_id_to_phy_id[i].subsys_devid ==
11253 tp->pdev->subsystem_device))
11254 return &subsys_id_to_phy_id[i];
11255 }
11256 return NULL;
11257}
11258
Michael Chan7d0c41e2005-04-21 17:06:20 -070011259static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011260{
Linus Torvalds1da177e2005-04-16 15:20:36 -070011261 u32 val;
Michael Chancaf636c72006-03-22 01:05:31 -080011262 u16 pmcsr;
11263
11264 /* On some early chips the SRAM cannot be accessed in D3hot state,
11265 * so need make sure we're in D0.
11266 */
11267 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11268 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11269 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11270 msleep(1);
Michael Chan7d0c41e2005-04-21 17:06:20 -070011271
11272 /* Make sure register accesses (indirect or otherwise)
11273 * will function correctly.
11274 */
11275 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11276 tp->misc_host_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011277
David S. Millerf49639e2006-06-09 11:58:36 -070011278 /* The memory arbiter has to be enabled in order for SRAM accesses
11279 * to succeed. Normally on powerup the tg3 chip firmware will make
11280 * sure it is enabled, but other entities such as system netboot
11281 * code might disable it.
11282 */
11283 val = tr32(MEMARB_MODE);
11284 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11285
Linus Torvalds1da177e2005-04-16 15:20:36 -070011286 tp->phy_id = PHY_ID_INVALID;
Michael Chan7d0c41e2005-04-21 17:06:20 -070011287 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11288
Gary Zambranoa85feb82007-05-05 11:52:19 -070011289 /* Assume an onboard device and WOL capable by default. */
11290 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
David S. Miller72b845e2006-03-14 14:11:48 -080011291
Michael Chanb5d37722006-09-27 16:06:21 -070011292 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan9d26e212006-12-07 00:21:14 -080011293 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
Michael Chanb5d37722006-09-27 16:06:21 -070011294 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011295 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11296 }
Matt Carlson0527ba32007-10-10 18:03:30 -070011297 val = tr32(VCPU_CFGSHDW);
11298 if (val & VCPU_CFGSHDW_ASPM_DBNC)
Matt Carlson8ed5d972007-05-07 00:25:49 -070011299 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
Matt Carlson0527ba32007-10-10 18:03:30 -070011300 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070011301 (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11302 device_may_wakeup(&tp->pdev->dev))
Matt Carlson0527ba32007-10-10 18:03:30 -070011303 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
Michael Chanb5d37722006-09-27 16:06:21 -070011304 return;
11305 }
11306
Linus Torvalds1da177e2005-04-16 15:20:36 -070011307 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11308 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11309 u32 nic_cfg, led_cfg;
Matt Carlsona9daf362008-05-25 23:49:44 -070011310 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
Michael Chan7d0c41e2005-04-21 17:06:20 -070011311 int eeprom_phy_serdes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011312
11313 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11314 tp->nic_sram_data_cfg = nic_cfg;
11315
11316 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11317 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11318 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11319 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11320 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11321 (ver > 0) && (ver < 0x100))
11322 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11323
Matt Carlsona9daf362008-05-25 23:49:44 -070011324 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11325 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11326
Linus Torvalds1da177e2005-04-16 15:20:36 -070011327 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11328 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11329 eeprom_phy_serdes = 1;
11330
11331 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11332 if (nic_phy_id != 0) {
11333 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11334 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11335
11336 eeprom_phy_id = (id1 >> 16) << 10;
11337 eeprom_phy_id |= (id2 & 0xfc00) << 16;
11338 eeprom_phy_id |= (id2 & 0x03ff) << 0;
11339 } else
11340 eeprom_phy_id = 0;
11341
Michael Chan7d0c41e2005-04-21 17:06:20 -070011342 tp->phy_id = eeprom_phy_id;
Michael Chan747e8f82005-07-25 12:33:22 -070011343 if (eeprom_phy_serdes) {
Michael Chana4e2b342005-10-26 15:46:52 -070011344 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan747e8f82005-07-25 12:33:22 -070011345 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11346 else
11347 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11348 }
Michael Chan7d0c41e2005-04-21 17:06:20 -070011349
John W. Linvillecbf46852005-04-21 17:01:29 -070011350 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011351 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11352 SHASTA_EXT_LED_MODE_MASK);
John W. Linvillecbf46852005-04-21 17:01:29 -070011353 else
Linus Torvalds1da177e2005-04-16 15:20:36 -070011354 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11355
11356 switch (led_cfg) {
11357 default:
11358 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11359 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11360 break;
11361
11362 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11363 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11364 break;
11365
11366 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11367 tp->led_ctrl = LED_CTRL_MODE_MAC;
Michael Chan9ba27792005-06-06 15:16:20 -070011368
11369 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11370 * read on some older 5700/5701 bootcode.
11371 */
11372 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11373 ASIC_REV_5700 ||
11374 GET_ASIC_REV(tp->pci_chip_rev_id) ==
11375 ASIC_REV_5701)
11376 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11377
Linus Torvalds1da177e2005-04-16 15:20:36 -070011378 break;
11379
11380 case SHASTA_EXT_LED_SHARED:
11381 tp->led_ctrl = LED_CTRL_MODE_SHARED;
11382 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11383 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11384 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11385 LED_CTRL_MODE_PHY_2);
11386 break;
11387
11388 case SHASTA_EXT_LED_MAC:
11389 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11390 break;
11391
11392 case SHASTA_EXT_LED_COMBO:
11393 tp->led_ctrl = LED_CTRL_MODE_COMBO;
11394 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11395 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11396 LED_CTRL_MODE_PHY_2);
11397 break;
11398
Stephen Hemminger855e1112008-04-16 16:37:28 -070011399 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011400
11401 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11402 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11403 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11404 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11405
Matt Carlsonb2a5c192008-04-03 21:44:44 -070011406 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11407 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
Matt Carlson5f608912007-11-12 21:17:07 -080011408
Michael Chan9d26e212006-12-07 00:21:14 -080011409 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011410 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011411 if ((tp->pdev->subsystem_vendor ==
11412 PCI_VENDOR_ID_ARIMA) &&
11413 (tp->pdev->subsystem_device == 0x205a ||
11414 tp->pdev->subsystem_device == 0x2063))
11415 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11416 } else {
David S. Millerf49639e2006-06-09 11:58:36 -070011417 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080011418 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11419 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011420
11421 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11422 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
John W. Linvillecbf46852005-04-21 17:01:29 -070011423 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011424 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11425 }
Matt Carlson0d3031d2007-10-10 18:02:43 -070011426 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
11427 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
Gary Zambranoa85feb82007-05-05 11:52:19 -070011428 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11429 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11430 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011431
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070011432 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11433 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE) &&
11434 device_may_wakeup(&tp->pdev->dev))
Matt Carlson0527ba32007-10-10 18:03:30 -070011435 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11436
Linus Torvalds1da177e2005-04-16 15:20:36 -070011437 if (cfg2 & (1 << 17))
11438 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11439
11440 /* serdes signal pre-emphasis in register 0x590 set by */
11441 /* bootcode if bit 18 is set */
11442 if (cfg2 & (1 << 18))
11443 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
Matt Carlson8ed5d972007-05-07 00:25:49 -070011444
11445 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11446 u32 cfg3;
11447
11448 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11449 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11450 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11451 }
Matt Carlsona9daf362008-05-25 23:49:44 -070011452
11453 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11454 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11455 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11456 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11457 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11458 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011459 }
Michael Chan7d0c41e2005-04-21 17:06:20 -070011460}
11461
Matt Carlsonb2a5c192008-04-03 21:44:44 -070011462static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11463{
11464 int i;
11465 u32 val;
11466
11467 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11468 tw32(OTP_CTRL, cmd);
11469
11470 /* Wait for up to 1 ms for command to execute. */
11471 for (i = 0; i < 100; i++) {
11472 val = tr32(OTP_STATUS);
11473 if (val & OTP_STATUS_CMD_DONE)
11474 break;
11475 udelay(10);
11476 }
11477
11478 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11479}
11480
11481/* Read the gphy configuration from the OTP region of the chip. The gphy
11482 * configuration is a 32-bit value that straddles the alignment boundary.
11483 * We do two 32-bit reads and then shift and merge the results.
11484 */
11485static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11486{
11487 u32 bhalf_otp, thalf_otp;
11488
11489 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11490
11491 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11492 return 0;
11493
11494 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11495
11496 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11497 return 0;
11498
11499 thalf_otp = tr32(OTP_READ_DATA);
11500
11501 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11502
11503 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11504 return 0;
11505
11506 bhalf_otp = tr32(OTP_READ_DATA);
11507
11508 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11509}
11510
Michael Chan7d0c41e2005-04-21 17:06:20 -070011511static int __devinit tg3_phy_probe(struct tg3 *tp)
11512{
11513 u32 hw_phy_id_1, hw_phy_id_2;
11514 u32 hw_phy_id, hw_phy_id_masked;
11515 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011516
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070011517 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11518 return tg3_phy_init(tp);
11519
Linus Torvalds1da177e2005-04-16 15:20:36 -070011520 /* Reading the PHY ID register can conflict with ASF
11521 * firwmare access to the PHY hardware.
11522 */
11523 err = 0;
Matt Carlson0d3031d2007-10-10 18:02:43 -070011524 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11525 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011526 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11527 } else {
11528 /* Now read the physical PHY_ID from the chip and verify
11529 * that it is sane. If it doesn't look good, we fall back
11530 * to either the hard-coded table based PHY_ID and failing
11531 * that the value found in the eeprom area.
11532 */
11533 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11534 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11535
11536 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
11537 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11538 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
11539
11540 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11541 }
11542
11543 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11544 tp->phy_id = hw_phy_id;
11545 if (hw_phy_id_masked == PHY_ID_BCM8002)
11546 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
Michael Chanda6b2d02005-08-19 12:54:29 -070011547 else
11548 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011549 } else {
Michael Chan7d0c41e2005-04-21 17:06:20 -070011550 if (tp->phy_id != PHY_ID_INVALID) {
11551 /* Do nothing, phy ID already set up in
11552 * tg3_get_eeprom_hw_cfg().
11553 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070011554 } else {
11555 struct subsys_tbl_ent *p;
11556
11557 /* No eeprom signature? Try the hardcoded
11558 * subsys device table.
11559 */
11560 p = lookup_by_subsys(tp);
11561 if (!p)
11562 return -ENODEV;
11563
11564 tp->phy_id = p->phy_id;
11565 if (!tp->phy_id ||
11566 tp->phy_id == PHY_ID_BCM8002)
11567 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11568 }
11569 }
11570
Michael Chan747e8f82005-07-25 12:33:22 -070011571 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
Matt Carlson0d3031d2007-10-10 18:02:43 -070011572 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070011573 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chan3600d912006-12-07 00:21:48 -080011574 u32 bmsr, adv_reg, tg3_ctrl, mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011575
11576 tg3_readphy(tp, MII_BMSR, &bmsr);
11577 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11578 (bmsr & BMSR_LSTATUS))
11579 goto skip_phy_reset;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011580
Linus Torvalds1da177e2005-04-16 15:20:36 -070011581 err = tg3_phy_reset(tp);
11582 if (err)
11583 return err;
11584
11585 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11586 ADVERTISE_100HALF | ADVERTISE_100FULL |
11587 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11588 tg3_ctrl = 0;
11589 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11590 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11591 MII_TG3_CTRL_ADV_1000_FULL);
11592 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11593 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11594 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11595 MII_TG3_CTRL_ENABLE_AS_MASTER);
11596 }
11597
Michael Chan3600d912006-12-07 00:21:48 -080011598 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11599 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11600 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11601 if (!tg3_copper_is_advertising_all(tp, mask)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011602 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11603
11604 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11605 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11606
11607 tg3_writephy(tp, MII_BMCR,
11608 BMCR_ANENABLE | BMCR_ANRESTART);
11609 }
11610 tg3_phy_set_wirespeed(tp);
11611
11612 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11613 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11614 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11615 }
11616
11617skip_phy_reset:
11618 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11619 err = tg3_init_5401phy_dsp(tp);
11620 if (err)
11621 return err;
11622 }
11623
11624 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11625 err = tg3_init_5401phy_dsp(tp);
11626 }
11627
Michael Chan747e8f82005-07-25 12:33:22 -070011628 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011629 tp->link_config.advertising =
11630 (ADVERTISED_1000baseT_Half |
11631 ADVERTISED_1000baseT_Full |
11632 ADVERTISED_Autoneg |
11633 ADVERTISED_FIBRE);
11634 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11635 tp->link_config.advertising &=
11636 ~(ADVERTISED_1000baseT_Half |
11637 ADVERTISED_1000baseT_Full);
11638
11639 return err;
11640}
11641
11642static void __devinit tg3_read_partno(struct tg3 *tp)
11643{
11644 unsigned char vpd_data[256];
Michael Chanaf2c6a42006-11-07 14:57:51 -080011645 unsigned int i;
Michael Chan1b277772006-03-20 22:27:48 -080011646 u32 magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011647
Michael Chan18201802006-03-20 22:29:15 -080011648 if (tg3_nvram_read_swab(tp, 0x0, &magic))
David S. Millerf49639e2006-06-09 11:58:36 -070011649 goto out_not_found;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011650
Michael Chan18201802006-03-20 22:29:15 -080011651 if (magic == TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -080011652 for (i = 0; i < 256; i += 4) {
11653 u32 tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011654
Michael Chan1b277772006-03-20 22:27:48 -080011655 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11656 goto out_not_found;
11657
11658 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
11659 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
11660 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11661 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11662 }
11663 } else {
11664 int vpd_cap;
11665
11666 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11667 for (i = 0; i < 256; i += 4) {
11668 u32 tmp, j = 0;
Al Virob9fc7dc2007-12-17 22:59:57 -080011669 __le32 v;
Michael Chan1b277772006-03-20 22:27:48 -080011670 u16 tmp16;
11671
11672 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11673 i);
11674 while (j++ < 100) {
11675 pci_read_config_word(tp->pdev, vpd_cap +
11676 PCI_VPD_ADDR, &tmp16);
11677 if (tmp16 & 0x8000)
11678 break;
11679 msleep(1);
11680 }
David S. Millerf49639e2006-06-09 11:58:36 -070011681 if (!(tmp16 & 0x8000))
11682 goto out_not_found;
11683
Michael Chan1b277772006-03-20 22:27:48 -080011684 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11685 &tmp);
Al Virob9fc7dc2007-12-17 22:59:57 -080011686 v = cpu_to_le32(tmp);
11687 memcpy(&vpd_data[i], &v, 4);
Michael Chan1b277772006-03-20 22:27:48 -080011688 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011689 }
11690
11691 /* Now parse and find the part number. */
Michael Chanaf2c6a42006-11-07 14:57:51 -080011692 for (i = 0; i < 254; ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011693 unsigned char val = vpd_data[i];
Michael Chanaf2c6a42006-11-07 14:57:51 -080011694 unsigned int block_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011695
11696 if (val == 0x82 || val == 0x91) {
11697 i = (i + 3 +
11698 (vpd_data[i + 1] +
11699 (vpd_data[i + 2] << 8)));
11700 continue;
11701 }
11702
11703 if (val != 0x90)
11704 goto out_not_found;
11705
11706 block_end = (i + 3 +
11707 (vpd_data[i + 1] +
11708 (vpd_data[i + 2] << 8)));
11709 i += 3;
Michael Chanaf2c6a42006-11-07 14:57:51 -080011710
11711 if (block_end > 256)
11712 goto out_not_found;
11713
11714 while (i < (block_end - 2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011715 if (vpd_data[i + 0] == 'P' &&
11716 vpd_data[i + 1] == 'N') {
11717 int partno_len = vpd_data[i + 2];
11718
Michael Chanaf2c6a42006-11-07 14:57:51 -080011719 i += 3;
11720 if (partno_len > 24 || (partno_len + i) > 256)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011721 goto out_not_found;
11722
11723 memcpy(tp->board_part_number,
Michael Chanaf2c6a42006-11-07 14:57:51 -080011724 &vpd_data[i], partno_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011725
11726 /* Success. */
11727 return;
11728 }
Michael Chanaf2c6a42006-11-07 14:57:51 -080011729 i += 3 + vpd_data[i + 2];
Linus Torvalds1da177e2005-04-16 15:20:36 -070011730 }
11731
11732 /* Part number not found. */
11733 goto out_not_found;
11734 }
11735
11736out_not_found:
Michael Chanb5d37722006-09-27 16:06:21 -070011737 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11738 strcpy(tp->board_part_number, "BCM95906");
11739 else
11740 strcpy(tp->board_part_number, "none");
Linus Torvalds1da177e2005-04-16 15:20:36 -070011741}
11742
Matt Carlson9c8a6202007-10-21 16:16:08 -070011743static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11744{
11745 u32 val;
11746
11747 if (tg3_nvram_read_swab(tp, offset, &val) ||
11748 (val & 0xfc000000) != 0x0c000000 ||
11749 tg3_nvram_read_swab(tp, offset + 4, &val) ||
11750 val != 0)
11751 return 0;
11752
11753 return 1;
11754}
11755
Michael Chanc4e65752006-03-20 22:29:32 -080011756static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11757{
11758 u32 val, offset, start;
Matt Carlson9c8a6202007-10-21 16:16:08 -070011759 u32 ver_offset;
11760 int i, bcnt;
Michael Chanc4e65752006-03-20 22:29:32 -080011761
11762 if (tg3_nvram_read_swab(tp, 0, &val))
11763 return;
11764
11765 if (val != TG3_EEPROM_MAGIC)
11766 return;
11767
11768 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11769 tg3_nvram_read_swab(tp, 0x4, &start))
11770 return;
11771
11772 offset = tg3_nvram_logical_addr(tp, offset);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011773
11774 if (!tg3_fw_img_is_valid(tp, offset) ||
11775 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
Michael Chanc4e65752006-03-20 22:29:32 -080011776 return;
11777
Matt Carlson9c8a6202007-10-21 16:16:08 -070011778 offset = offset + ver_offset - start;
11779 for (i = 0; i < 16; i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011780 __le32 v;
11781 if (tg3_nvram_read_le(tp, offset + i, &v))
Michael Chanc4e65752006-03-20 22:29:32 -080011782 return;
11783
Al Virob9fc7dc2007-12-17 22:59:57 -080011784 memcpy(tp->fw_ver + i, &v, 4);
Michael Chanc4e65752006-03-20 22:29:32 -080011785 }
Matt Carlson9c8a6202007-10-21 16:16:08 -070011786
11787 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
Matt Carlson84af67f2007-11-12 21:08:59 -080011788 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
Matt Carlson9c8a6202007-10-21 16:16:08 -070011789 return;
11790
11791 for (offset = TG3_NVM_DIR_START;
11792 offset < TG3_NVM_DIR_END;
11793 offset += TG3_NVM_DIRENT_SIZE) {
11794 if (tg3_nvram_read_swab(tp, offset, &val))
11795 return;
11796
11797 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11798 break;
11799 }
11800
11801 if (offset == TG3_NVM_DIR_END)
11802 return;
11803
11804 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11805 start = 0x08000000;
11806 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11807 return;
11808
11809 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11810 !tg3_fw_img_is_valid(tp, offset) ||
11811 tg3_nvram_read_swab(tp, offset + 8, &val))
11812 return;
11813
11814 offset += val - start;
11815
11816 bcnt = strlen(tp->fw_ver);
11817
11818 tp->fw_ver[bcnt++] = ',';
11819 tp->fw_ver[bcnt++] = ' ';
11820
11821 for (i = 0; i < 4; i++) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011822 __le32 v;
11823 if (tg3_nvram_read_le(tp, offset, &v))
Matt Carlson9c8a6202007-10-21 16:16:08 -070011824 return;
11825
Al Virob9fc7dc2007-12-17 22:59:57 -080011826 offset += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011827
Al Virob9fc7dc2007-12-17 22:59:57 -080011828 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11829 memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011830 break;
11831 }
11832
Al Virob9fc7dc2007-12-17 22:59:57 -080011833 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11834 bcnt += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011835 }
11836
11837 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
Michael Chanc4e65752006-03-20 22:29:32 -080011838}
11839
Michael Chan7544b092007-05-05 13:08:32 -070011840static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11841
Linus Torvalds1da177e2005-04-16 15:20:36 -070011842static int __devinit tg3_get_invariants(struct tg3 *tp)
11843{
11844 static struct pci_device_id write_reorder_chipsets[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011845 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11846 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
John W. Linvillec165b002006-07-08 13:28:53 -070011847 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11848 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
Michael Chan399de502005-10-03 14:02:39 -070011849 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11850 PCI_DEVICE_ID_VIA_8385_0) },
Linus Torvalds1da177e2005-04-16 15:20:36 -070011851 { },
11852 };
11853 u32 misc_ctrl_reg;
11854 u32 cacheline_sz_reg;
11855 u32 pci_state_reg, grc_misc_cfg;
11856 u32 val;
11857 u16 pci_cmd;
Michael Chanc7835a72006-11-15 21:14:42 -080011858 int err, pcie_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011859
Linus Torvalds1da177e2005-04-16 15:20:36 -070011860 /* Force memory write invalidate off. If we leave it on,
11861 * then on 5700_BX chips we have to enable a workaround.
11862 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11863 * to match the cacheline size. The Broadcom driver have this
11864 * workaround but turns MWI off all the times so never uses
11865 * it. This seems to suggest that the workaround is insufficient.
11866 */
11867 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11868 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11869 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11870
11871 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11872 * has the register indirect write enable bit set before
11873 * we try to access any of the MMIO registers. It is also
11874 * critical that the PCI-X hw workaround situation is decided
11875 * before that as well.
11876 */
11877 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11878 &misc_ctrl_reg);
11879
11880 tp->pci_chip_rev_id = (misc_ctrl_reg >>
11881 MISC_HOST_CTRL_CHIPREV_SHIFT);
Matt Carlson795d01c2007-10-07 23:28:17 -070011882 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11883 u32 prod_id_asic_rev;
11884
11885 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11886 &prod_id_asic_rev);
11887 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11888 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011889
Michael Chanff645be2005-04-21 17:09:53 -070011890 /* Wrong chip ID in 5752 A0. This code can be removed later
11891 * as A0 is not in production.
11892 */
11893 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11894 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11895
Michael Chan68929142005-08-09 20:17:14 -070011896 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11897 * we need to disable memory and use config. cycles
11898 * only to access all registers. The 5702/03 chips
11899 * can mistakenly decode the special cycles from the
11900 * ICH chipsets as memory write cycles, causing corruption
11901 * of register and memory space. Only certain ICH bridges
11902 * will drive special cycles with non-zero data during the
11903 * address phase which can fall within the 5703's address
11904 * range. This is not an ICH bug as the PCI spec allows
11905 * non-zero address during special cycles. However, only
11906 * these ICH bridges are known to drive non-zero addresses
11907 * during special cycles.
11908 *
11909 * Since special cycles do not cross PCI bridges, we only
11910 * enable this workaround if the 5703 is on the secondary
11911 * bus of these ICH bridges.
11912 */
11913 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11914 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11915 static struct tg3_dev_id {
11916 u32 vendor;
11917 u32 device;
11918 u32 rev;
11919 } ich_chipsets[] = {
11920 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11921 PCI_ANY_ID },
11922 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11923 PCI_ANY_ID },
11924 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11925 0xa },
11926 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11927 PCI_ANY_ID },
11928 { },
11929 };
11930 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11931 struct pci_dev *bridge = NULL;
11932
11933 while (pci_id->vendor != 0) {
11934 bridge = pci_get_device(pci_id->vendor, pci_id->device,
11935 bridge);
11936 if (!bridge) {
11937 pci_id++;
11938 continue;
11939 }
11940 if (pci_id->rev != PCI_ANY_ID) {
Auke Kok44c10132007-06-08 15:46:36 -070011941 if (bridge->revision > pci_id->rev)
Michael Chan68929142005-08-09 20:17:14 -070011942 continue;
11943 }
11944 if (bridge->subordinate &&
11945 (bridge->subordinate->number ==
11946 tp->pdev->bus->number)) {
11947
11948 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11949 pci_dev_put(bridge);
11950 break;
11951 }
11952 }
11953 }
11954
Matt Carlson41588ba2008-04-19 18:12:33 -070011955 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11956 static struct tg3_dev_id {
11957 u32 vendor;
11958 u32 device;
11959 } bridge_chipsets[] = {
11960 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11961 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11962 { },
11963 };
11964 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11965 struct pci_dev *bridge = NULL;
11966
11967 while (pci_id->vendor != 0) {
11968 bridge = pci_get_device(pci_id->vendor,
11969 pci_id->device,
11970 bridge);
11971 if (!bridge) {
11972 pci_id++;
11973 continue;
11974 }
11975 if (bridge->subordinate &&
11976 (bridge->subordinate->number <=
11977 tp->pdev->bus->number) &&
11978 (bridge->subordinate->subordinate >=
11979 tp->pdev->bus->number)) {
11980 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11981 pci_dev_put(bridge);
11982 break;
11983 }
11984 }
11985 }
11986
Michael Chan4a29cc22006-03-19 13:21:12 -080011987 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11988 * DMA addresses > 40-bit. This bridge may have other additional
11989 * 57xx devices behind it in some 4-port NIC designs for example.
11990 * Any tg3 device found behind the bridge will also need the 40-bit
11991 * DMA workaround.
11992 */
Michael Chana4e2b342005-10-26 15:46:52 -070011993 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11994 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11995 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
Michael Chan4a29cc22006-03-19 13:21:12 -080011996 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
Michael Chan4cf78e42005-07-25 12:29:19 -070011997 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
Michael Chana4e2b342005-10-26 15:46:52 -070011998 }
Michael Chan4a29cc22006-03-19 13:21:12 -080011999 else {
12000 struct pci_dev *bridge = NULL;
12001
12002 do {
12003 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12004 PCI_DEVICE_ID_SERVERWORKS_EPB,
12005 bridge);
12006 if (bridge && bridge->subordinate &&
12007 (bridge->subordinate->number <=
12008 tp->pdev->bus->number) &&
12009 (bridge->subordinate->subordinate >=
12010 tp->pdev->bus->number)) {
12011 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12012 pci_dev_put(bridge);
12013 break;
12014 }
12015 } while (bridge);
12016 }
Michael Chan4cf78e42005-07-25 12:29:19 -070012017
Linus Torvalds1da177e2005-04-16 15:20:36 -070012018 /* Initialize misc host control in PCI block. */
12019 tp->misc_host_ctrl |= (misc_ctrl_reg &
12020 MISC_HOST_CTRL_CHIPREV);
12021 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12022 tp->misc_host_ctrl);
12023
12024 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12025 &cacheline_sz_reg);
12026
12027 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
12028 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
12029 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
12030 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
12031
Michael Chan7544b092007-05-05 13:08:32 -070012032 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12033 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12034 tp->pdev_peer = tg3_find_peer(tp);
12035
John W. Linville2052da92005-04-21 16:56:08 -070012036 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
Michael Chan4cf78e42005-07-25 12:29:19 -070012037 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -080012038 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chand9ab5ad2006-03-20 22:27:35 -080012039 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012040 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012041 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012042 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012043 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
Michael Chana4e2b342005-10-26 15:46:52 -070012044 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
John W. Linville6708e5c2005-04-21 17:00:52 -070012045 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12046
John W. Linville1b440c562005-04-21 17:03:18 -070012047 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12048 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12049 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12050
Michael Chan5a6f3072006-03-20 22:28:05 -080012051 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Michael Chan7544b092007-05-05 13:08:32 -070012052 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12053 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12054 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12055 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12056 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12057 tp->pdev_peer == tp->pdev))
12058 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12059
Michael Chanaf36e6b2006-03-23 01:28:06 -080012060 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012061 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012062 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012063 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012064 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012065 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan5a6f3072006-03-20 22:28:05 -080012066 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
Michael Chanfcfa0a32006-03-20 22:28:41 -080012067 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
Michael Chan52c0fd82006-06-29 20:15:54 -070012068 } else {
Michael Chan7f62ad52007-02-20 23:25:40 -080012069 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
Michael Chan52c0fd82006-06-29 20:15:54 -070012070 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12071 ASIC_REV_5750 &&
12072 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
Michael Chan7f62ad52007-02-20 23:25:40 -080012073 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
Michael Chan52c0fd82006-06-29 20:15:54 -070012074 }
Michael Chan5a6f3072006-03-20 22:28:05 -080012075 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012076
Matt Carlsonf51f3562008-05-25 23:45:08 -070012077 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12078 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan0f893dc2005-07-25 12:30:38 -070012079 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12080
Michael Chanc7835a72006-11-15 21:14:42 -080012081 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12082 if (pcie_cap != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012083 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
Matt Carlson5f5c51e2007-11-12 21:19:37 -080012084
12085 pcie_set_readrq(tp->pdev, 4096);
12086
Michael Chanc7835a72006-11-15 21:14:42 -080012087 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12088 u16 lnkctl;
12089
12090 pci_read_config_word(tp->pdev,
12091 pcie_cap + PCI_EXP_LNKCTL,
12092 &lnkctl);
12093 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
12094 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12095 }
12096 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012097
Michael Chan399de502005-10-03 14:02:39 -070012098 /* If we have an AMD 762 or VIA K8T800 chipset, write
12099 * reordering to the mailbox registers done by the host
12100 * controller can cause major troubles. We read back from
12101 * every mailbox register write to force the writes to be
12102 * posted to the chip in order.
12103 */
12104 if (pci_dev_present(write_reorder_chipsets) &&
12105 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12106 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12107
Linus Torvalds1da177e2005-04-16 15:20:36 -070012108 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12109 tp->pci_lat_timer < 64) {
12110 tp->pci_lat_timer = 64;
12111
12112 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
12113 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
12114 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
12115 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
12116
12117 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12118 cacheline_sz_reg);
12119 }
12120
Matt Carlson9974a352007-10-07 23:27:28 -070012121 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12122 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12123 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12124 if (!tp->pcix_cap) {
12125 printk(KERN_ERR PFX "Cannot find PCI-X "
12126 "capability, aborting.\n");
12127 return -EIO;
12128 }
12129 }
12130
Linus Torvalds1da177e2005-04-16 15:20:36 -070012131 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12132 &pci_state_reg);
12133
Matt Carlson9974a352007-10-07 23:27:28 -070012134 if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012135 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12136
12137 /* If this is a 5700 BX chipset, and we are in PCI-X
12138 * mode, enable register write workaround.
12139 *
12140 * The workaround is to use indirect register accesses
12141 * for all chip writes not to mailbox registers.
12142 */
12143 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12144 u32 pm_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012145
12146 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12147
12148 /* The chip can have it's power management PCI config
12149 * space registers clobbered due to this bug.
12150 * So explicitly force the chip into D0 here.
12151 */
Matt Carlson9974a352007-10-07 23:27:28 -070012152 pci_read_config_dword(tp->pdev,
12153 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012154 &pm_reg);
12155 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12156 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
Matt Carlson9974a352007-10-07 23:27:28 -070012157 pci_write_config_dword(tp->pdev,
12158 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012159 pm_reg);
12160
12161 /* Also, force SERR#/PERR# in PCI command. */
12162 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12163 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12164 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12165 }
12166 }
12167
Michael Chan087fe252005-08-09 20:17:41 -070012168 /* 5700 BX chips need to have their TX producer index mailboxes
12169 * written twice to workaround a bug.
12170 */
12171 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
12172 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12173
Linus Torvalds1da177e2005-04-16 15:20:36 -070012174 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12175 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12176 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12177 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12178
12179 /* Chip-specific fixup from Broadcom driver */
12180 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12181 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12182 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12183 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12184 }
12185
Michael Chan1ee582d2005-08-09 20:16:46 -070012186 /* Default fast path register access methods */
Michael Chan20094932005-08-09 20:16:32 -070012187 tp->read32 = tg3_read32;
Michael Chan1ee582d2005-08-09 20:16:46 -070012188 tp->write32 = tg3_write32;
Michael Chan09ee9292005-08-09 20:17:00 -070012189 tp->read32_mbox = tg3_read32;
Michael Chan20094932005-08-09 20:16:32 -070012190 tp->write32_mbox = tg3_write32;
Michael Chan1ee582d2005-08-09 20:16:46 -070012191 tp->write32_tx_mbox = tg3_write32;
12192 tp->write32_rx_mbox = tg3_write32;
12193
12194 /* Various workaround register access methods */
12195 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12196 tp->write32 = tg3_write_indirect_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070012197 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12198 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12199 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12200 /*
12201 * Back to back register writes can cause problems on these
12202 * chips, the workaround is to read back all reg writes
12203 * except those to mailbox regs.
12204 *
12205 * See tg3_write_indirect_reg32().
12206 */
Michael Chan1ee582d2005-08-09 20:16:46 -070012207 tp->write32 = tg3_write_flush_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070012208 }
12209
Michael Chan1ee582d2005-08-09 20:16:46 -070012210
12211 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12212 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12213 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12214 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12215 tp->write32_rx_mbox = tg3_write_flush_reg32;
12216 }
Michael Chan20094932005-08-09 20:16:32 -070012217
Michael Chan68929142005-08-09 20:17:14 -070012218 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12219 tp->read32 = tg3_read_indirect_reg32;
12220 tp->write32 = tg3_write_indirect_reg32;
12221 tp->read32_mbox = tg3_read_indirect_mbox;
12222 tp->write32_mbox = tg3_write_indirect_mbox;
12223 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12224 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12225
12226 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070012227 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070012228
12229 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12230 pci_cmd &= ~PCI_COMMAND_MEMORY;
12231 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12232 }
Michael Chanb5d37722006-09-27 16:06:21 -070012233 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12234 tp->read32_mbox = tg3_read32_mbox_5906;
12235 tp->write32_mbox = tg3_write32_mbox_5906;
12236 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12237 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12238 }
Michael Chan68929142005-08-09 20:17:14 -070012239
Michael Chanbbadf502006-04-06 21:46:34 -070012240 if (tp->write32 == tg3_write_indirect_reg32 ||
12241 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12242 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
David S. Millerf49639e2006-06-09 11:58:36 -070012243 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
Michael Chanbbadf502006-04-06 21:46:34 -070012244 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12245
Michael Chan7d0c41e2005-04-21 17:06:20 -070012246 /* Get eeprom hw config before calling tg3_set_power_state().
Michael Chan9d26e212006-12-07 00:21:14 -080012247 * In particular, the TG3_FLG2_IS_NIC flag must be
Michael Chan7d0c41e2005-04-21 17:06:20 -070012248 * determined before calling tg3_set_power_state() so that
12249 * we know whether or not to switch out of Vaux power.
12250 * When the flag is set, it means that GPIO1 is used for eeprom
12251 * write protect and also implies that it is a LOM where GPIOs
12252 * are not used to switch power.
Jeff Garzik6aa20a22006-09-13 13:24:59 -040012253 */
Michael Chan7d0c41e2005-04-21 17:06:20 -070012254 tg3_get_eeprom_hw_cfg(tp);
12255
Matt Carlson0d3031d2007-10-10 18:02:43 -070012256 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12257 /* Allow reads and writes to the
12258 * APE register and memory space.
12259 */
12260 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12261 PCISTATE_ALLOW_APE_SHMEM_WR;
12262 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12263 pci_state_reg);
12264 }
12265
Matt Carlson9936bcf2007-10-10 18:03:07 -070012266 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012267 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12268 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
Matt Carlsond30cdd22007-10-07 23:28:35 -070012269 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12270
Matt Carlsonb5af7122007-11-12 21:22:02 -080012271 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
12272 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
12273 tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
12274 tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
12275 tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
12276 }
12277
Michael Chan314fba32005-04-21 17:07:04 -070012278 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12279 * GPIO1 driven high will bring 5700's external PHY out of reset.
12280 * It is also used as eeprom write protect on LOMs.
12281 */
12282 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12283 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12284 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12285 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12286 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan3e7d83b2005-04-21 17:10:36 -070012287 /* Unused GPIO3 must be driven as output on 5752 because there
12288 * are no pull-up resistors on unused GPIO pins.
12289 */
12290 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12291 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chan314fba32005-04-21 17:07:04 -070012292
Michael Chanaf36e6b2006-03-23 01:28:06 -080012293 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12294 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12295
Matt Carlson5f0c4a32008-06-09 15:41:12 -070012296 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12297 /* Turn off the debug UART. */
12298 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12299 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12300 /* Keep VMain power. */
12301 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12302 GRC_LCLCTRL_GPIO_OUTPUT0;
12303 }
12304
Linus Torvalds1da177e2005-04-16 15:20:36 -070012305 /* Force the chip into D0. */
Michael Chanbc1c7562006-03-20 17:48:03 -080012306 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012307 if (err) {
12308 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12309 pci_name(tp->pdev));
12310 return err;
12311 }
12312
12313 /* 5700 B0 chips do not support checksumming correctly due
12314 * to hardware bugs.
12315 */
12316 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12317 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12318
Linus Torvalds1da177e2005-04-16 15:20:36 -070012319 /* Derive initial jumbo mode from MTU assigned in
12320 * ether_setup() via the alloc_etherdev() call
12321 */
Michael Chan0f893dc2005-07-25 12:30:38 -070012322 if (tp->dev->mtu > ETH_DATA_LEN &&
Michael Chana4e2b342005-10-26 15:46:52 -070012323 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan0f893dc2005-07-25 12:30:38 -070012324 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012325
12326 /* Determine WakeOnLan speed to use. */
12327 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12328 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12329 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12330 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12331 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12332 } else {
12333 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12334 }
12335
12336 /* A few boards don't want Ethernet@WireSpeed phy feature */
12337 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12338 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12339 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
Michael Chan747e8f82005-07-25 12:33:22 -070012340 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
Michael Chanb5d37722006-09-27 16:06:21 -070012341 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
Michael Chan747e8f82005-07-25 12:33:22 -070012342 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
Linus Torvalds1da177e2005-04-16 15:20:36 -070012343 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12344
12345 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12346 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12347 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12348 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12349 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12350
Michael Chanc424cb22006-04-29 18:56:34 -070012351 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12352 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012353 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012354 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12355 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
Michael Chand4011ad2007-02-13 12:17:25 -080012356 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12357 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12358 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
Michael Chanc1d2a192007-01-08 19:57:20 -080012359 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12360 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
Matt Carlson57e69832008-05-25 23:48:31 -070012361 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12362 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
Michael Chanc424cb22006-04-29 18:56:34 -070012363 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12364 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012365
Matt Carlsonb2a5c192008-04-03 21:44:44 -070012366 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12367 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12368 tp->phy_otp = tg3_read_otp_phycfg(tp);
12369 if (tp->phy_otp == 0)
12370 tp->phy_otp = TG3_OTP_DEFAULT;
12371 }
12372
Matt Carlsonf51f3562008-05-25 23:45:08 -070012373 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
Matt Carlson8ef21422008-05-02 16:47:53 -070012374 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12375 else
12376 tp->mi_mode = MAC_MI_MODE_BASE;
12377
Linus Torvalds1da177e2005-04-16 15:20:36 -070012378 tp->coalesce_mode = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012379 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12380 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12381 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12382
Matt Carlson57e69832008-05-25 23:48:31 -070012383 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12384 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12385
Matt Carlson158d7ab2008-05-29 01:37:54 -070012386 err = tg3_mdio_init(tp);
12387 if (err)
12388 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012389
12390 /* Initialize data/descriptor byte/word swapping. */
12391 val = tr32(GRC_MODE);
12392 val &= GRC_MODE_HOST_STACKUP;
12393 tw32(GRC_MODE, val | tp->grc_mode);
12394
12395 tg3_switch_clocks(tp);
12396
12397 /* Clear this out for sanity. */
12398 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12399
12400 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12401 &pci_state_reg);
12402 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12403 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12404 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12405
12406 if (chiprevid == CHIPREV_ID_5701_A0 ||
12407 chiprevid == CHIPREV_ID_5701_B0 ||
12408 chiprevid == CHIPREV_ID_5701_B2 ||
12409 chiprevid == CHIPREV_ID_5701_B5) {
12410 void __iomem *sram_base;
12411
12412 /* Write some dummy words into the SRAM status block
12413 * area, see if it reads back correctly. If the return
12414 * value is bad, force enable the PCIX workaround.
12415 */
12416 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12417
12418 writel(0x00000000, sram_base);
12419 writel(0x00000000, sram_base + 4);
12420 writel(0xffffffff, sram_base + 4);
12421 if (readl(sram_base) != 0x00000000)
12422 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12423 }
12424 }
12425
12426 udelay(50);
12427 tg3_nvram_init(tp);
12428
12429 grc_misc_cfg = tr32(GRC_MISC_CFG);
12430 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12431
Linus Torvalds1da177e2005-04-16 15:20:36 -070012432 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12433 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12434 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12435 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12436
David S. Millerfac9b832005-05-18 22:46:34 -070012437 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12438 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12439 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12440 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12441 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12442 HOSTCC_MODE_CLRTICK_TXBD);
12443
12444 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12445 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12446 tp->misc_host_ctrl);
12447 }
12448
Matt Carlson3bda1252008-08-15 14:08:22 -070012449 /* Preserve the APE MAC_MODE bits */
12450 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12451 tp->mac_mode = tr32(MAC_MODE) |
12452 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12453 else
12454 tp->mac_mode = TG3_DEF_MAC_MODE;
12455
Linus Torvalds1da177e2005-04-16 15:20:36 -070012456 /* these are limited to 10/100 only */
12457 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12458 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12459 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12460 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12461 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12462 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12463 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12464 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12465 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
Michael Chan676917d2006-12-07 00:20:22 -080012466 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12467 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
Michael Chanb5d37722006-09-27 16:06:21 -070012468 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012469 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12470
12471 err = tg3_phy_probe(tp);
12472 if (err) {
12473 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12474 pci_name(tp->pdev), err);
12475 /* ... but do not return immediately ... */
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070012476 tg3_mdio_fini(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012477 }
12478
12479 tg3_read_partno(tp);
Michael Chanc4e65752006-03-20 22:29:32 -080012480 tg3_read_fw_ver(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012481
12482 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12483 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12484 } else {
12485 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12486 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12487 else
12488 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12489 }
12490
12491 /* 5700 {AX,BX} chips have a broken status block link
12492 * change bit implementation, so we must use the
12493 * status register in those cases.
12494 */
12495 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12496 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12497 else
12498 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12499
12500 /* The led_ctrl is set during tg3_phy_probe, here we might
12501 * have to force the link status polling mechanism based
12502 * upon subsystem IDs.
12503 */
12504 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
Michael Chan007a880d2007-05-31 14:49:51 -070012505 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070012506 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12507 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12508 TG3_FLAG_USE_LINKCHG_REG);
12509 }
12510
12511 /* For all SERDES we poll the MAC status register. */
12512 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12513 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12514 else
12515 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12516
Michael Chan5a6f3072006-03-20 22:28:05 -080012517 /* All chips before 5787 can get confused if TX buffers
Linus Torvalds1da177e2005-04-16 15:20:36 -070012518 * straddle the 4GB address boundary in some cases.
12519 */
Michael Chanaf36e6b2006-03-23 01:28:06 -080012520 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012521 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070012522 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070012523 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Matt Carlson57e69832008-05-25 23:48:31 -070012524 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
Michael Chanb5d37722006-09-27 16:06:21 -070012525 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Michael Chan5a6f3072006-03-20 22:28:05 -080012526 tp->dev->hard_start_xmit = tg3_start_xmit;
12527 else
12528 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012529
12530 tp->rx_offset = 2;
12531 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12532 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12533 tp->rx_offset = 0;
12534
Michael Chanf92905d2006-06-29 20:14:29 -070012535 tp->rx_std_max_post = TG3_RX_RING_SIZE;
12536
12537 /* Increment the rx prod index on the rx std ring by at most
12538 * 8 for these chips to workaround hw errata.
12539 */
12540 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12541 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12542 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12543 tp->rx_std_max_post = 8;
12544
Matt Carlson8ed5d972007-05-07 00:25:49 -070012545 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12546 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12547 PCIE_PWR_MGMT_L1_THRESH_MSK;
12548
Linus Torvalds1da177e2005-04-16 15:20:36 -070012549 return err;
12550}
12551
David S. Miller49b6e95f2007-03-29 01:38:42 -070012552#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012553static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12554{
12555 struct net_device *dev = tp->dev;
12556 struct pci_dev *pdev = tp->pdev;
David S. Miller49b6e95f2007-03-29 01:38:42 -070012557 struct device_node *dp = pci_device_to_OF_node(pdev);
David S. Miller374d4ca2007-03-29 01:57:57 -070012558 const unsigned char *addr;
David S. Miller49b6e95f2007-03-29 01:38:42 -070012559 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012560
David S. Miller49b6e95f2007-03-29 01:38:42 -070012561 addr = of_get_property(dp, "local-mac-address", &len);
12562 if (addr && len == 6) {
12563 memcpy(dev->dev_addr, addr, 6);
12564 memcpy(dev->perm_addr, dev->dev_addr, 6);
12565 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012566 }
12567 return -ENODEV;
12568}
12569
12570static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12571{
12572 struct net_device *dev = tp->dev;
12573
12574 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
John W. Linville2ff43692005-09-12 14:44:20 -070012575 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012576 return 0;
12577}
12578#endif
12579
12580static int __devinit tg3_get_device_address(struct tg3 *tp)
12581{
12582 struct net_device *dev = tp->dev;
12583 u32 hi, lo, mac_offset;
Michael Chan008652b2006-03-27 23:14:53 -080012584 int addr_ok = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012585
David S. Miller49b6e95f2007-03-29 01:38:42 -070012586#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012587 if (!tg3_get_macaddr_sparc(tp))
12588 return 0;
12589#endif
12590
12591 mac_offset = 0x7c;
David S. Millerf49639e2006-06-09 11:58:36 -070012592 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
Michael Chana4e2b342005-10-26 15:46:52 -070012593 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012594 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12595 mac_offset = 0xcc;
12596 if (tg3_nvram_lock(tp))
12597 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12598 else
12599 tg3_nvram_unlock(tp);
12600 }
Michael Chanb5d37722006-09-27 16:06:21 -070012601 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12602 mac_offset = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012603
12604 /* First try to get it from MAC address mailbox. */
12605 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12606 if ((hi >> 16) == 0x484b) {
12607 dev->dev_addr[0] = (hi >> 8) & 0xff;
12608 dev->dev_addr[1] = (hi >> 0) & 0xff;
12609
12610 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12611 dev->dev_addr[2] = (lo >> 24) & 0xff;
12612 dev->dev_addr[3] = (lo >> 16) & 0xff;
12613 dev->dev_addr[4] = (lo >> 8) & 0xff;
12614 dev->dev_addr[5] = (lo >> 0) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012615
Michael Chan008652b2006-03-27 23:14:53 -080012616 /* Some old bootcode may report a 0 MAC address in SRAM */
12617 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12618 }
12619 if (!addr_ok) {
12620 /* Next, try NVRAM. */
David S. Millerf49639e2006-06-09 11:58:36 -070012621 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
Michael Chan008652b2006-03-27 23:14:53 -080012622 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12623 dev->dev_addr[0] = ((hi >> 16) & 0xff);
12624 dev->dev_addr[1] = ((hi >> 24) & 0xff);
12625 dev->dev_addr[2] = ((lo >> 0) & 0xff);
12626 dev->dev_addr[3] = ((lo >> 8) & 0xff);
12627 dev->dev_addr[4] = ((lo >> 16) & 0xff);
12628 dev->dev_addr[5] = ((lo >> 24) & 0xff);
12629 }
12630 /* Finally just fetch it out of the MAC control regs. */
12631 else {
12632 hi = tr32(MAC_ADDR_0_HIGH);
12633 lo = tr32(MAC_ADDR_0_LOW);
12634
12635 dev->dev_addr[5] = lo & 0xff;
12636 dev->dev_addr[4] = (lo >> 8) & 0xff;
12637 dev->dev_addr[3] = (lo >> 16) & 0xff;
12638 dev->dev_addr[2] = (lo >> 24) & 0xff;
12639 dev->dev_addr[1] = hi & 0xff;
12640 dev->dev_addr[0] = (hi >> 8) & 0xff;
12641 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012642 }
12643
12644 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
David S. Miller7582a332008-03-20 15:53:15 -070012645#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012646 if (!tg3_get_default_macaddr_sparc(tp))
12647 return 0;
12648#endif
12649 return -EINVAL;
12650 }
John W. Linville2ff43692005-09-12 14:44:20 -070012651 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012652 return 0;
12653}
12654
David S. Miller59e6b432005-05-18 22:50:10 -070012655#define BOUNDARY_SINGLE_CACHELINE 1
12656#define BOUNDARY_MULTI_CACHELINE 2
12657
12658static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12659{
12660 int cacheline_size;
12661 u8 byte;
12662 int goal;
12663
12664 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12665 if (byte == 0)
12666 cacheline_size = 1024;
12667 else
12668 cacheline_size = (int) byte * 4;
12669
12670 /* On 5703 and later chips, the boundary bits have no
12671 * effect.
12672 */
12673 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12674 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12675 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12676 goto out;
12677
12678#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12679 goal = BOUNDARY_MULTI_CACHELINE;
12680#else
12681#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12682 goal = BOUNDARY_SINGLE_CACHELINE;
12683#else
12684 goal = 0;
12685#endif
12686#endif
12687
12688 if (!goal)
12689 goto out;
12690
12691 /* PCI controllers on most RISC systems tend to disconnect
12692 * when a device tries to burst across a cache-line boundary.
12693 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12694 *
12695 * Unfortunately, for PCI-E there are only limited
12696 * write-side controls for this, and thus for reads
12697 * we will still get the disconnects. We'll also waste
12698 * these PCI cycles for both read and write for chips
12699 * other than 5700 and 5701 which do not implement the
12700 * boundary bits.
12701 */
12702 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12703 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12704 switch (cacheline_size) {
12705 case 16:
12706 case 32:
12707 case 64:
12708 case 128:
12709 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12710 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12711 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12712 } else {
12713 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12714 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12715 }
12716 break;
12717
12718 case 256:
12719 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12720 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12721 break;
12722
12723 default:
12724 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12725 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12726 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012727 }
David S. Miller59e6b432005-05-18 22:50:10 -070012728 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12729 switch (cacheline_size) {
12730 case 16:
12731 case 32:
12732 case 64:
12733 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12734 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12735 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12736 break;
12737 }
12738 /* fallthrough */
12739 case 128:
12740 default:
12741 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12742 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12743 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012744 }
David S. Miller59e6b432005-05-18 22:50:10 -070012745 } else {
12746 switch (cacheline_size) {
12747 case 16:
12748 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12749 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12750 DMA_RWCTRL_WRITE_BNDRY_16);
12751 break;
12752 }
12753 /* fallthrough */
12754 case 32:
12755 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12756 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12757 DMA_RWCTRL_WRITE_BNDRY_32);
12758 break;
12759 }
12760 /* fallthrough */
12761 case 64:
12762 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12763 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12764 DMA_RWCTRL_WRITE_BNDRY_64);
12765 break;
12766 }
12767 /* fallthrough */
12768 case 128:
12769 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12770 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12771 DMA_RWCTRL_WRITE_BNDRY_128);
12772 break;
12773 }
12774 /* fallthrough */
12775 case 256:
12776 val |= (DMA_RWCTRL_READ_BNDRY_256 |
12777 DMA_RWCTRL_WRITE_BNDRY_256);
12778 break;
12779 case 512:
12780 val |= (DMA_RWCTRL_READ_BNDRY_512 |
12781 DMA_RWCTRL_WRITE_BNDRY_512);
12782 break;
12783 case 1024:
12784 default:
12785 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12786 DMA_RWCTRL_WRITE_BNDRY_1024);
12787 break;
Stephen Hemminger855e1112008-04-16 16:37:28 -070012788 }
David S. Miller59e6b432005-05-18 22:50:10 -070012789 }
12790
12791out:
12792 return val;
12793}
12794
Linus Torvalds1da177e2005-04-16 15:20:36 -070012795static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12796{
12797 struct tg3_internal_buffer_desc test_desc;
12798 u32 sram_dma_descs;
12799 int i, ret;
12800
12801 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12802
12803 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12804 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12805 tw32(RDMAC_STATUS, 0);
12806 tw32(WDMAC_STATUS, 0);
12807
12808 tw32(BUFMGR_MODE, 0);
12809 tw32(FTQ_RESET, 0);
12810
12811 test_desc.addr_hi = ((u64) buf_dma) >> 32;
12812 test_desc.addr_lo = buf_dma & 0xffffffff;
12813 test_desc.nic_mbuf = 0x00002100;
12814 test_desc.len = size;
12815
12816 /*
12817 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12818 * the *second* time the tg3 driver was getting loaded after an
12819 * initial scan.
12820 *
12821 * Broadcom tells me:
12822 * ...the DMA engine is connected to the GRC block and a DMA
12823 * reset may affect the GRC block in some unpredictable way...
12824 * The behavior of resets to individual blocks has not been tested.
12825 *
12826 * Broadcom noted the GRC reset will also reset all sub-components.
12827 */
12828 if (to_device) {
12829 test_desc.cqid_sqid = (13 << 8) | 2;
12830
12831 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12832 udelay(40);
12833 } else {
12834 test_desc.cqid_sqid = (16 << 8) | 7;
12835
12836 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12837 udelay(40);
12838 }
12839 test_desc.flags = 0x00000005;
12840
12841 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12842 u32 val;
12843
12844 val = *(((u32 *)&test_desc) + i);
12845 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12846 sram_dma_descs + (i * sizeof(u32)));
12847 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12848 }
12849 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12850
12851 if (to_device) {
12852 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12853 } else {
12854 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12855 }
12856
12857 ret = -ENODEV;
12858 for (i = 0; i < 40; i++) {
12859 u32 val;
12860
12861 if (to_device)
12862 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12863 else
12864 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12865 if ((val & 0xffff) == sram_dma_descs) {
12866 ret = 0;
12867 break;
12868 }
12869
12870 udelay(100);
12871 }
12872
12873 return ret;
12874}
12875
David S. Millerded73402005-05-23 13:59:47 -070012876#define TEST_BUFFER_SIZE 0x2000
Linus Torvalds1da177e2005-04-16 15:20:36 -070012877
12878static int __devinit tg3_test_dma(struct tg3 *tp)
12879{
12880 dma_addr_t buf_dma;
David S. Miller59e6b432005-05-18 22:50:10 -070012881 u32 *buf, saved_dma_rwctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012882 int ret;
12883
12884 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12885 if (!buf) {
12886 ret = -ENOMEM;
12887 goto out_nofree;
12888 }
12889
12890 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12891 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12892
David S. Miller59e6b432005-05-18 22:50:10 -070012893 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012894
12895 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12896 /* DMA read watermark not used on PCIE */
12897 tp->dma_rwctrl |= 0x00180000;
12898 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
Michael Chan85e94ce2005-04-21 17:05:28 -070012899 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12900 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012901 tp->dma_rwctrl |= 0x003f0000;
12902 else
12903 tp->dma_rwctrl |= 0x003f000f;
12904 } else {
12905 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12906 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12907 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
Michael Chan49afdeb2007-02-13 12:17:03 -080012908 u32 read_water = 0x7;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012909
Michael Chan4a29cc22006-03-19 13:21:12 -080012910 /* If the 5704 is behind the EPB bridge, we can
12911 * do the less restrictive ONE_DMA workaround for
12912 * better performance.
12913 */
12914 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12915 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12916 tp->dma_rwctrl |= 0x8000;
12917 else if (ccval == 0x6 || ccval == 0x7)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012918 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12919
Michael Chan49afdeb2007-02-13 12:17:03 -080012920 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12921 read_water = 4;
David S. Miller59e6b432005-05-18 22:50:10 -070012922 /* Set bit 23 to enable PCIX hw bug fix */
Michael Chan49afdeb2007-02-13 12:17:03 -080012923 tp->dma_rwctrl |=
12924 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12925 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12926 (1 << 23);
Michael Chan4cf78e42005-07-25 12:29:19 -070012927 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12928 /* 5780 always in PCIX mode */
12929 tp->dma_rwctrl |= 0x00144000;
Michael Chana4e2b342005-10-26 15:46:52 -070012930 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12931 /* 5714 always in PCIX mode */
12932 tp->dma_rwctrl |= 0x00148000;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012933 } else {
12934 tp->dma_rwctrl |= 0x001b000f;
12935 }
12936 }
12937
12938 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12939 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12940 tp->dma_rwctrl &= 0xfffffff0;
12941
12942 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12943 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12944 /* Remove this if it causes problems for some boards. */
12945 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12946
12947 /* On 5700/5701 chips, we need to set this bit.
12948 * Otherwise the chip will issue cacheline transactions
12949 * to streamable DMA memory with not all the byte
12950 * enables turned on. This is an error on several
12951 * RISC PCI controllers, in particular sparc64.
12952 *
12953 * On 5703/5704 chips, this bit has been reassigned
12954 * a different meaning. In particular, it is used
12955 * on those chips to enable a PCI-X workaround.
12956 */
12957 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12958 }
12959
12960 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12961
12962#if 0
12963 /* Unneeded, already done by tg3_get_invariants. */
12964 tg3_switch_clocks(tp);
12965#endif
12966
12967 ret = 0;
12968 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12969 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12970 goto out;
12971
David S. Miller59e6b432005-05-18 22:50:10 -070012972 /* It is best to perform DMA test with maximum write burst size
12973 * to expose the 5700/5701 write DMA bug.
12974 */
12975 saved_dma_rwctrl = tp->dma_rwctrl;
12976 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12977 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12978
Linus Torvalds1da177e2005-04-16 15:20:36 -070012979 while (1) {
12980 u32 *p = buf, i;
12981
12982 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12983 p[i] = i;
12984
12985 /* Send the buffer to the chip. */
12986 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12987 if (ret) {
12988 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12989 break;
12990 }
12991
12992#if 0
12993 /* validate data reached card RAM correctly. */
12994 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12995 u32 val;
12996 tg3_read_mem(tp, 0x2100 + (i*4), &val);
12997 if (le32_to_cpu(val) != p[i]) {
12998 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
12999 /* ret = -ENODEV here? */
13000 }
13001 p[i] = 0;
13002 }
13003#endif
13004 /* Now read it back. */
13005 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13006 if (ret) {
13007 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13008
13009 break;
13010 }
13011
13012 /* Verify it. */
13013 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13014 if (p[i] == i)
13015 continue;
13016
David S. Miller59e6b432005-05-18 22:50:10 -070013017 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13018 DMA_RWCTRL_WRITE_BNDRY_16) {
13019 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013020 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13021 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13022 break;
13023 } else {
13024 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13025 ret = -ENODEV;
13026 goto out;
13027 }
13028 }
13029
13030 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13031 /* Success. */
13032 ret = 0;
13033 break;
13034 }
13035 }
David S. Miller59e6b432005-05-18 22:50:10 -070013036 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13037 DMA_RWCTRL_WRITE_BNDRY_16) {
Michael Chan6d1cfba2005-06-08 14:13:14 -070013038 static struct pci_device_id dma_wait_state_chipsets[] = {
13039 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13040 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13041 { },
13042 };
13043
David S. Miller59e6b432005-05-18 22:50:10 -070013044 /* DMA test passed without adjusting DMA boundary,
Michael Chan6d1cfba2005-06-08 14:13:14 -070013045 * now look for chipsets that are known to expose the
13046 * DMA bug without failing the test.
David S. Miller59e6b432005-05-18 22:50:10 -070013047 */
Michael Chan6d1cfba2005-06-08 14:13:14 -070013048 if (pci_dev_present(dma_wait_state_chipsets)) {
13049 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13050 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13051 }
13052 else
13053 /* Safe to use the calculated DMA boundary. */
13054 tp->dma_rwctrl = saved_dma_rwctrl;
13055
David S. Miller59e6b432005-05-18 22:50:10 -070013056 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13057 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013058
13059out:
13060 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13061out_nofree:
13062 return ret;
13063}
13064
13065static void __devinit tg3_init_link_config(struct tg3 *tp)
13066{
13067 tp->link_config.advertising =
13068 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13069 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13070 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13071 ADVERTISED_Autoneg | ADVERTISED_MII);
13072 tp->link_config.speed = SPEED_INVALID;
13073 tp->link_config.duplex = DUPLEX_INVALID;
13074 tp->link_config.autoneg = AUTONEG_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013075 tp->link_config.active_speed = SPEED_INVALID;
13076 tp->link_config.active_duplex = DUPLEX_INVALID;
13077 tp->link_config.phy_is_low_power = 0;
13078 tp->link_config.orig_speed = SPEED_INVALID;
13079 tp->link_config.orig_duplex = DUPLEX_INVALID;
13080 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13081}
13082
13083static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13084{
Michael Chanfdfec1722005-07-25 12:31:48 -070013085 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13086 tp->bufmgr_config.mbuf_read_dma_low_water =
13087 DEFAULT_MB_RDMA_LOW_WATER_5705;
13088 tp->bufmgr_config.mbuf_mac_rx_low_water =
13089 DEFAULT_MB_MACRX_LOW_WATER_5705;
13090 tp->bufmgr_config.mbuf_high_water =
13091 DEFAULT_MB_HIGH_WATER_5705;
Michael Chanb5d37722006-09-27 16:06:21 -070013092 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13093 tp->bufmgr_config.mbuf_mac_rx_low_water =
13094 DEFAULT_MB_MACRX_LOW_WATER_5906;
13095 tp->bufmgr_config.mbuf_high_water =
13096 DEFAULT_MB_HIGH_WATER_5906;
13097 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013098
Michael Chanfdfec1722005-07-25 12:31:48 -070013099 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13100 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13101 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13102 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13103 tp->bufmgr_config.mbuf_high_water_jumbo =
13104 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13105 } else {
13106 tp->bufmgr_config.mbuf_read_dma_low_water =
13107 DEFAULT_MB_RDMA_LOW_WATER;
13108 tp->bufmgr_config.mbuf_mac_rx_low_water =
13109 DEFAULT_MB_MACRX_LOW_WATER;
13110 tp->bufmgr_config.mbuf_high_water =
13111 DEFAULT_MB_HIGH_WATER;
13112
13113 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13114 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13115 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13116 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13117 tp->bufmgr_config.mbuf_high_water_jumbo =
13118 DEFAULT_MB_HIGH_WATER_JUMBO;
13119 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013120
13121 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13122 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13123}
13124
13125static char * __devinit tg3_phy_string(struct tg3 *tp)
13126{
13127 switch (tp->phy_id & PHY_ID_MASK) {
13128 case PHY_ID_BCM5400: return "5400";
13129 case PHY_ID_BCM5401: return "5401";
13130 case PHY_ID_BCM5411: return "5411";
13131 case PHY_ID_BCM5701: return "5701";
13132 case PHY_ID_BCM5703: return "5703";
13133 case PHY_ID_BCM5704: return "5704";
13134 case PHY_ID_BCM5705: return "5705";
13135 case PHY_ID_BCM5750: return "5750";
Michael Chan85e94ce2005-04-21 17:05:28 -070013136 case PHY_ID_BCM5752: return "5752";
Michael Chana4e2b342005-10-26 15:46:52 -070013137 case PHY_ID_BCM5714: return "5714";
Michael Chan4cf78e42005-07-25 12:29:19 -070013138 case PHY_ID_BCM5780: return "5780";
Michael Chanaf36e6b2006-03-23 01:28:06 -080013139 case PHY_ID_BCM5755: return "5755";
Michael Chand9ab5ad2006-03-20 22:27:35 -080013140 case PHY_ID_BCM5787: return "5787";
Matt Carlsond30cdd22007-10-07 23:28:35 -070013141 case PHY_ID_BCM5784: return "5784";
Michael Chan126a3362006-09-27 16:03:07 -070013142 case PHY_ID_BCM5756: return "5722/5756";
Michael Chanb5d37722006-09-27 16:06:21 -070013143 case PHY_ID_BCM5906: return "5906";
Matt Carlson9936bcf2007-10-10 18:03:07 -070013144 case PHY_ID_BCM5761: return "5761";
Linus Torvalds1da177e2005-04-16 15:20:36 -070013145 case PHY_ID_BCM8002: return "8002/serdes";
13146 case 0: return "serdes";
13147 default: return "unknown";
Stephen Hemminger855e1112008-04-16 16:37:28 -070013148 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013149}
13150
Michael Chanf9804dd2005-09-27 12:13:10 -070013151static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13152{
13153 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13154 strcpy(str, "PCI Express");
13155 return str;
13156 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13157 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13158
13159 strcpy(str, "PCIX:");
13160
13161 if ((clock_ctrl == 7) ||
13162 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13163 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13164 strcat(str, "133MHz");
13165 else if (clock_ctrl == 0)
13166 strcat(str, "33MHz");
13167 else if (clock_ctrl == 2)
13168 strcat(str, "50MHz");
13169 else if (clock_ctrl == 4)
13170 strcat(str, "66MHz");
13171 else if (clock_ctrl == 6)
13172 strcat(str, "100MHz");
Michael Chanf9804dd2005-09-27 12:13:10 -070013173 } else {
13174 strcpy(str, "PCI:");
13175 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13176 strcat(str, "66MHz");
13177 else
13178 strcat(str, "33MHz");
13179 }
13180 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13181 strcat(str, ":32-bit");
13182 else
13183 strcat(str, ":64-bit");
13184 return str;
13185}
13186
Michael Chan8c2dc7e2005-12-19 16:26:02 -080013187static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013188{
13189 struct pci_dev *peer;
13190 unsigned int func, devnr = tp->pdev->devfn & ~7;
13191
13192 for (func = 0; func < 8; func++) {
13193 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13194 if (peer && peer != tp->pdev)
13195 break;
13196 pci_dev_put(peer);
13197 }
Michael Chan16fe9d72005-12-13 21:09:54 -080013198 /* 5704 can be configured in single-port mode, set peer to
13199 * tp->pdev in that case.
13200 */
13201 if (!peer) {
13202 peer = tp->pdev;
13203 return peer;
13204 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013205
13206 /*
13207 * We don't need to keep the refcount elevated; there's no way
13208 * to remove one half of this device without removing the other
13209 */
13210 pci_dev_put(peer);
13211
13212 return peer;
13213}
13214
David S. Miller15f98502005-05-18 22:49:26 -070013215static void __devinit tg3_init_coal(struct tg3 *tp)
13216{
13217 struct ethtool_coalesce *ec = &tp->coal;
13218
13219 memset(ec, 0, sizeof(*ec));
13220 ec->cmd = ETHTOOL_GCOALESCE;
13221 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13222 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13223 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13224 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13225 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13226 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13227 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13228 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13229 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13230
13231 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13232 HOSTCC_MODE_CLRTICK_TXBD)) {
13233 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13234 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13235 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13236 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13237 }
Michael Chand244c892005-07-05 14:42:33 -070013238
13239 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13240 ec->rx_coalesce_usecs_irq = 0;
13241 ec->tx_coalesce_usecs_irq = 0;
13242 ec->stats_block_coalesce_usecs = 0;
13243 }
David S. Miller15f98502005-05-18 22:49:26 -070013244}
13245
Linus Torvalds1da177e2005-04-16 15:20:36 -070013246static int __devinit tg3_init_one(struct pci_dev *pdev,
13247 const struct pci_device_id *ent)
13248{
13249 static int tg3_version_printed = 0;
Sergei Shtylyov2de58e32008-04-12 18:30:58 -070013250 resource_size_t tg3reg_base;
13251 unsigned long tg3reg_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013252 struct net_device *dev;
13253 struct tg3 *tp;
Joe Perchesd6645372007-12-20 04:06:59 -080013254 int err, pm_cap;
Michael Chanf9804dd2005-09-27 12:13:10 -070013255 char str[40];
Michael Chan72f2afb2006-03-06 19:28:35 -080013256 u64 dma_mask, persist_dma_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013257
13258 if (tg3_version_printed++ == 0)
13259 printk(KERN_INFO "%s", version);
13260
13261 err = pci_enable_device(pdev);
13262 if (err) {
13263 printk(KERN_ERR PFX "Cannot enable PCI device, "
13264 "aborting.\n");
13265 return err;
13266 }
13267
13268 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
13269 printk(KERN_ERR PFX "Cannot find proper PCI device "
13270 "base address, aborting.\n");
13271 err = -ENODEV;
13272 goto err_out_disable_pdev;
13273 }
13274
13275 err = pci_request_regions(pdev, DRV_MODULE_NAME);
13276 if (err) {
13277 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13278 "aborting.\n");
13279 goto err_out_disable_pdev;
13280 }
13281
13282 pci_set_master(pdev);
13283
13284 /* Find power-management capability. */
13285 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13286 if (pm_cap == 0) {
13287 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13288 "aborting.\n");
13289 err = -EIO;
13290 goto err_out_free_res;
13291 }
13292
Linus Torvalds1da177e2005-04-16 15:20:36 -070013293 tg3reg_base = pci_resource_start(pdev, 0);
13294 tg3reg_len = pci_resource_len(pdev, 0);
13295
13296 dev = alloc_etherdev(sizeof(*tp));
13297 if (!dev) {
13298 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13299 err = -ENOMEM;
13300 goto err_out_free_res;
13301 }
13302
Linus Torvalds1da177e2005-04-16 15:20:36 -070013303 SET_NETDEV_DEV(dev, &pdev->dev);
13304
Linus Torvalds1da177e2005-04-16 15:20:36 -070013305#if TG3_VLAN_TAG_USED
13306 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13307 dev->vlan_rx_register = tg3_vlan_rx_register;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013308#endif
13309
13310 tp = netdev_priv(dev);
13311 tp->pdev = pdev;
13312 tp->dev = dev;
13313 tp->pm_cap = pm_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013314 tp->rx_mode = TG3_DEF_RX_MODE;
13315 tp->tx_mode = TG3_DEF_TX_MODE;
Matt Carlson8ef21422008-05-02 16:47:53 -070013316
Linus Torvalds1da177e2005-04-16 15:20:36 -070013317 if (tg3_debug > 0)
13318 tp->msg_enable = tg3_debug;
13319 else
13320 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13321
13322 /* The word/byte swap controls here control register access byte
13323 * swapping. DMA data byte swapping is controlled in the GRC_MODE
13324 * setting below.
13325 */
13326 tp->misc_host_ctrl =
13327 MISC_HOST_CTRL_MASK_PCI_INT |
13328 MISC_HOST_CTRL_WORD_SWAP |
13329 MISC_HOST_CTRL_INDIR_ACCESS |
13330 MISC_HOST_CTRL_PCISTATE_RW;
13331
13332 /* The NONFRM (non-frame) byte/word swap controls take effect
13333 * on descriptor entries, anything which isn't packet data.
13334 *
13335 * The StrongARM chips on the board (one for tx, one for rx)
13336 * are running in big-endian mode.
13337 */
13338 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13339 GRC_MODE_WSWAP_NONFRM_DATA);
13340#ifdef __BIG_ENDIAN
13341 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13342#endif
13343 spin_lock_init(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013344 spin_lock_init(&tp->indirect_lock);
David Howellsc4028952006-11-22 14:57:56 +000013345 INIT_WORK(&tp->reset_task, tg3_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013346
13347 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
Andy Gospodarekab0049b2007-09-06 20:42:14 +010013348 if (!tp->regs) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013349 printk(KERN_ERR PFX "Cannot map device registers, "
13350 "aborting.\n");
13351 err = -ENOMEM;
13352 goto err_out_free_dev;
13353 }
13354
13355 tg3_init_link_config(tp);
13356
Linus Torvalds1da177e2005-04-16 15:20:36 -070013357 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13358 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13359 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13360
13361 dev->open = tg3_open;
13362 dev->stop = tg3_close;
13363 dev->get_stats = tg3_get_stats;
13364 dev->set_multicast_list = tg3_set_rx_mode;
13365 dev->set_mac_address = tg3_set_mac_addr;
13366 dev->do_ioctl = tg3_ioctl;
13367 dev->tx_timeout = tg3_tx_timeout;
Stephen Hemmingerbea33482007-10-03 16:41:36 -070013368 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013369 dev->ethtool_ops = &tg3_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013370 dev->watchdog_timeo = TG3_TX_TIMEOUT;
13371 dev->change_mtu = tg3_change_mtu;
13372 dev->irq = pdev->irq;
13373#ifdef CONFIG_NET_POLL_CONTROLLER
13374 dev->poll_controller = tg3_poll_controller;
13375#endif
13376
13377 err = tg3_get_invariants(tp);
13378 if (err) {
13379 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13380 "aborting.\n");
13381 goto err_out_iounmap;
13382 }
13383
Michael Chan4a29cc22006-03-19 13:21:12 -080013384 /* The EPB bridge inside 5714, 5715, and 5780 and any
13385 * device behind the EPB cannot support DMA addresses > 40-bit.
Michael Chan72f2afb2006-03-06 19:28:35 -080013386 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13387 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13388 * do DMA address check in tg3_start_xmit().
13389 */
Michael Chan4a29cc22006-03-19 13:21:12 -080013390 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13391 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13392 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
Michael Chan72f2afb2006-03-06 19:28:35 -080013393 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13394#ifdef CONFIG_HIGHMEM
13395 dma_mask = DMA_64BIT_MASK;
13396#endif
Michael Chan4a29cc22006-03-19 13:21:12 -080013397 } else
Michael Chan72f2afb2006-03-06 19:28:35 -080013398 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13399
13400 /* Configure DMA attributes. */
13401 if (dma_mask > DMA_32BIT_MASK) {
13402 err = pci_set_dma_mask(pdev, dma_mask);
13403 if (!err) {
13404 dev->features |= NETIF_F_HIGHDMA;
13405 err = pci_set_consistent_dma_mask(pdev,
13406 persist_dma_mask);
13407 if (err < 0) {
13408 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13409 "DMA for consistent allocations\n");
13410 goto err_out_iounmap;
13411 }
13412 }
13413 }
13414 if (err || dma_mask == DMA_32BIT_MASK) {
13415 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13416 if (err) {
13417 printk(KERN_ERR PFX "No usable DMA configuration, "
13418 "aborting.\n");
13419 goto err_out_iounmap;
13420 }
13421 }
13422
Michael Chanfdfec1722005-07-25 12:31:48 -070013423 tg3_init_bufmgr_config(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013424
Linus Torvalds1da177e2005-04-16 15:20:36 -070013425 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13426 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13427 }
13428 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13429 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13430 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
Michael Chanc7835a72006-11-15 21:14:42 -080013431 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -070013432 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13433 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13434 } else {
Michael Chan7f62ad52007-02-20 23:25:40 -080013435 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013436 }
13437
Michael Chan4e3a7aa2006-03-20 17:47:44 -080013438 /* TSO is on by default on chips that support hardware TSO.
13439 * Firmware TSO on older chips gives lower performance, so it
13440 * is off by default, but can be enabled using ethtool.
13441 */
Michael Chanb0026622006-07-03 19:42:14 -070013442 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070013443 dev->features |= NETIF_F_TSO;
Michael Chanb5d37722006-09-27 16:06:21 -070013444 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13445 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
Michael Chanb0026622006-07-03 19:42:14 -070013446 dev->features |= NETIF_F_TSO6;
Matt Carlson57e69832008-05-25 23:48:31 -070013447 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13448 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13449 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13450 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlson9936bcf2007-10-10 18:03:07 -070013451 dev->features |= NETIF_F_TSO_ECN;
Michael Chanb0026622006-07-03 19:42:14 -070013452 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013453
Linus Torvalds1da177e2005-04-16 15:20:36 -070013454
13455 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13456 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13457 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13458 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13459 tp->rx_pending = 63;
13460 }
13461
Linus Torvalds1da177e2005-04-16 15:20:36 -070013462 err = tg3_get_device_address(tp);
13463 if (err) {
13464 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13465 "aborting.\n");
13466 goto err_out_iounmap;
13467 }
13468
Matt Carlson0d3031d2007-10-10 18:02:43 -070013469 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13470 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
13471 printk(KERN_ERR PFX "Cannot find proper PCI device "
13472 "base address for APE, aborting.\n");
13473 err = -ENODEV;
13474 goto err_out_iounmap;
13475 }
13476
13477 tg3reg_base = pci_resource_start(pdev, 2);
13478 tg3reg_len = pci_resource_len(pdev, 2);
13479
13480 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
Al Viro79ea13c2008-01-24 02:06:46 -080013481 if (!tp->aperegs) {
Matt Carlson0d3031d2007-10-10 18:02:43 -070013482 printk(KERN_ERR PFX "Cannot map APE registers, "
13483 "aborting.\n");
13484 err = -ENOMEM;
13485 goto err_out_iounmap;
13486 }
13487
13488 tg3_ape_lock_init(tp);
13489 }
13490
Matt Carlsonc88864d2007-11-12 21:07:01 -080013491 /*
13492 * Reset chip in case UNDI or EFI driver did not shutdown
13493 * DMA self test will enable WDMAC and we'll see (spurious)
13494 * pending DMA on the PCI bus at that point.
13495 */
13496 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13497 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13498 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13499 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13500 }
13501
13502 err = tg3_test_dma(tp);
13503 if (err) {
13504 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13505 goto err_out_apeunmap;
13506 }
13507
13508 /* Tigon3 can do ipv4 only... and some chips have buggy
13509 * checksumming.
13510 */
13511 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13512 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13513 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13514 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13515 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson57e69832008-05-25 23:48:31 -070013516 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13517 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
Matt Carlsonc88864d2007-11-12 21:07:01 -080013518 dev->features |= NETIF_F_IPV6_CSUM;
13519
13520 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13521 } else
13522 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13523
13524 /* flow control autonegotiation is default behavior */
13525 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
Matt Carlson8d018622007-12-20 20:05:44 -080013526 tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
Matt Carlsonc88864d2007-11-12 21:07:01 -080013527
13528 tg3_init_coal(tp);
13529
Michael Chanc49a1562006-12-17 17:07:29 -080013530 pci_set_drvdata(pdev, dev);
13531
Linus Torvalds1da177e2005-04-16 15:20:36 -070013532 err = register_netdev(dev);
13533 if (err) {
13534 printk(KERN_ERR PFX "Cannot register net device, "
13535 "aborting.\n");
Matt Carlson0d3031d2007-10-10 18:02:43 -070013536 goto err_out_apeunmap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013537 }
13538
Joe Perchesd6645372007-12-20 04:06:59 -080013539 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
Johannes Berge1749612008-10-27 15:59:26 -070013540 "(%s) %s Ethernet %pM\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070013541 dev->name,
13542 tp->board_part_number,
13543 tp->pci_chip_rev_id,
13544 tg3_phy_string(tp),
Michael Chanf9804dd2005-09-27 12:13:10 -070013545 tg3_bus_string(tp, str),
Michael Chancbb45d22006-12-07 00:24:09 -080013546 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13547 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
Joe Perchesd6645372007-12-20 04:06:59 -080013548 "10/100/1000Base-T")),
Johannes Berge1749612008-10-27 15:59:26 -070013549 dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013550
13551 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
Michael Chan1c46ae052007-03-24 20:54:37 -070013552 "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070013553 dev->name,
13554 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13555 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13556 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13557 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -070013558 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
13559 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
Michael Chan4a29cc22006-03-19 13:21:12 -080013560 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13561 dev->name, tp->dma_rwctrl,
13562 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13563 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
Linus Torvalds1da177e2005-04-16 15:20:36 -070013564
13565 return 0;
13566
Matt Carlson0d3031d2007-10-10 18:02:43 -070013567err_out_apeunmap:
13568 if (tp->aperegs) {
13569 iounmap(tp->aperegs);
13570 tp->aperegs = NULL;
13571 }
13572
Linus Torvalds1da177e2005-04-16 15:20:36 -070013573err_out_iounmap:
Michael Chan68929142005-08-09 20:17:14 -070013574 if (tp->regs) {
13575 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070013576 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070013577 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013578
13579err_out_free_dev:
13580 free_netdev(dev);
13581
13582err_out_free_res:
13583 pci_release_regions(pdev);
13584
13585err_out_disable_pdev:
13586 pci_disable_device(pdev);
13587 pci_set_drvdata(pdev, NULL);
13588 return err;
13589}
13590
13591static void __devexit tg3_remove_one(struct pci_dev *pdev)
13592{
13593 struct net_device *dev = pci_get_drvdata(pdev);
13594
13595 if (dev) {
13596 struct tg3 *tp = netdev_priv(dev);
13597
Michael Chan7faa0062006-02-02 17:29:28 -080013598 flush_scheduled_work();
Matt Carlson158d7ab2008-05-29 01:37:54 -070013599
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013600 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13601 tg3_phy_fini(tp);
Matt Carlson158d7ab2008-05-29 01:37:54 -070013602 tg3_mdio_fini(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013603 }
Matt Carlson158d7ab2008-05-29 01:37:54 -070013604
Linus Torvalds1da177e2005-04-16 15:20:36 -070013605 unregister_netdev(dev);
Matt Carlson0d3031d2007-10-10 18:02:43 -070013606 if (tp->aperegs) {
13607 iounmap(tp->aperegs);
13608 tp->aperegs = NULL;
13609 }
Michael Chan68929142005-08-09 20:17:14 -070013610 if (tp->regs) {
13611 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070013612 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070013613 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013614 free_netdev(dev);
13615 pci_release_regions(pdev);
13616 pci_disable_device(pdev);
13617 pci_set_drvdata(pdev, NULL);
13618 }
13619}
13620
13621static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13622{
13623 struct net_device *dev = pci_get_drvdata(pdev);
13624 struct tg3 *tp = netdev_priv(dev);
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070013625 pci_power_t target_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013626 int err;
13627
Michael Chan3e0c95f2007-08-03 20:56:54 -070013628 /* PCI register 4 needs to be saved whether netif_running() or not.
13629 * MSI address and data need to be saved if using MSI and
13630 * netif_running().
13631 */
13632 pci_save_state(pdev);
13633
Linus Torvalds1da177e2005-04-16 15:20:36 -070013634 if (!netif_running(dev))
13635 return 0;
13636
Michael Chan7faa0062006-02-02 17:29:28 -080013637 flush_scheduled_work();
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013638 tg3_phy_stop(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013639 tg3_netif_stop(tp);
13640
13641 del_timer_sync(&tp->timer);
13642
David S. Millerf47c11e2005-06-24 20:18:35 -070013643 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013644 tg3_disable_ints(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -070013645 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013646
13647 netif_device_detach(dev);
13648
David S. Millerf47c11e2005-06-24 20:18:35 -070013649 tg3_full_lock(tp, 0);
Michael Chan944d9802005-05-29 14:57:48 -070013650 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan6a9eba12005-12-13 21:08:58 -080013651 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
David S. Millerf47c11e2005-06-24 20:18:35 -070013652 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013653
Rafael J. Wysocki12dac072008-07-30 16:37:33 -070013654 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13655
13656 err = tg3_set_power_state(tp, target_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013657 if (err) {
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013658 int err2;
13659
David S. Millerf47c11e2005-06-24 20:18:35 -070013660 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013661
Michael Chan6a9eba12005-12-13 21:08:58 -080013662 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013663 err2 = tg3_restart_hw(tp, 1);
13664 if (err2)
Michael Chanb9ec6c12006-07-25 16:37:27 -070013665 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013666
13667 tp->timer.expires = jiffies + tp->timer_offset;
13668 add_timer(&tp->timer);
13669
13670 netif_device_attach(dev);
13671 tg3_netif_start(tp);
13672
Michael Chanb9ec6c12006-07-25 16:37:27 -070013673out:
David S. Millerf47c11e2005-06-24 20:18:35 -070013674 tg3_full_unlock(tp);
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013675
13676 if (!err2)
13677 tg3_phy_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013678 }
13679
13680 return err;
13681}
13682
13683static int tg3_resume(struct pci_dev *pdev)
13684{
13685 struct net_device *dev = pci_get_drvdata(pdev);
13686 struct tg3 *tp = netdev_priv(dev);
13687 int err;
13688
Michael Chan3e0c95f2007-08-03 20:56:54 -070013689 pci_restore_state(tp->pdev);
13690
Linus Torvalds1da177e2005-04-16 15:20:36 -070013691 if (!netif_running(dev))
13692 return 0;
13693
Michael Chanbc1c7562006-03-20 17:48:03 -080013694 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013695 if (err)
13696 return err;
13697
13698 netif_device_attach(dev);
13699
David S. Millerf47c11e2005-06-24 20:18:35 -070013700 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013701
Michael Chan6a9eba12005-12-13 21:08:58 -080013702 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Michael Chanb9ec6c12006-07-25 16:37:27 -070013703 err = tg3_restart_hw(tp, 1);
13704 if (err)
13705 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013706
13707 tp->timer.expires = jiffies + tp->timer_offset;
13708 add_timer(&tp->timer);
13709
Linus Torvalds1da177e2005-04-16 15:20:36 -070013710 tg3_netif_start(tp);
13711
Michael Chanb9ec6c12006-07-25 16:37:27 -070013712out:
David S. Millerf47c11e2005-06-24 20:18:35 -070013713 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013714
Matt Carlsonb02fd9e2008-05-25 23:47:41 -070013715 if (!err)
13716 tg3_phy_start(tp);
13717
Michael Chanb9ec6c12006-07-25 16:37:27 -070013718 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013719}
13720
13721static struct pci_driver tg3_driver = {
13722 .name = DRV_MODULE_NAME,
13723 .id_table = tg3_pci_tbl,
13724 .probe = tg3_init_one,
13725 .remove = __devexit_p(tg3_remove_one),
13726 .suspend = tg3_suspend,
13727 .resume = tg3_resume
13728};
13729
13730static int __init tg3_init(void)
13731{
Jeff Garzik29917622006-08-19 17:48:59 -040013732 return pci_register_driver(&tg3_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013733}
13734
13735static void __exit tg3_cleanup(void)
13736{
13737 pci_unregister_driver(&tg3_driver);
13738}
13739
13740module_init(tg3_init);
13741module_exit(tg3_cleanup);