blob: ad0289ab26c9ba4c6fefd3378a15ce0aed4c44df [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
Michael Chan65610fb2007-02-13 12:18:46 -08007 * Copyright (C) 2005-2007 Broadcom Corporation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Firmware is:
Michael Chan49cabf42005-06-06 15:15:17 -070010 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/compiler.h>
24#include <linux/slab.h>
25#include <linux/delay.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020026#include <linux/in.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/workqueue.h>
Michael Chan61487482005-09-05 17:53:19 -070039#include <linux/prefetch.h>
Tobias Klauserf9a5f7d2005-10-29 15:09:26 +020040#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42#include <net/checksum.h>
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -030043#include <net/ip.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45#include <asm/system.h>
46#include <asm/io.h>
47#include <asm/byteorder.h>
48#include <asm/uaccess.h>
49
David S. Miller49b6e95f2007-03-29 01:38:42 -070050#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <asm/idprom.h>
David S. Miller49b6e95f2007-03-29 01:38:42 -070052#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#endif
54
55#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56#define TG3_VLAN_TAG_USED 1
57#else
58#define TG3_VLAN_TAG_USED 0
59#endif
60
Linus Torvalds1da177e2005-04-16 15:20:36 -070061#define TG3_TSO_SUPPORT 1
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
63#include "tg3.h"
64
65#define DRV_MODULE_NAME "tg3"
66#define PFX DRV_MODULE_NAME ": "
Matt Carlson458c0962007-11-12 21:23:21 -080067#define DRV_MODULE_VERSION "3.86"
68#define DRV_MODULE_RELDATE "November 9, 2007"
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
70#define TG3_DEF_MAC_MODE 0
71#define TG3_DEF_RX_MODE 0
72#define TG3_DEF_TX_MODE 0
73#define TG3_DEF_MSG_ENABLE \
74 (NETIF_MSG_DRV | \
75 NETIF_MSG_PROBE | \
76 NETIF_MSG_LINK | \
77 NETIF_MSG_TIMER | \
78 NETIF_MSG_IFDOWN | \
79 NETIF_MSG_IFUP | \
80 NETIF_MSG_RX_ERR | \
81 NETIF_MSG_TX_ERR)
82
83/* length of time before we decide the hardware is borked,
84 * and dev->tx_timeout() should be called to fix the problem
85 */
86#define TG3_TX_TIMEOUT (5 * HZ)
87
88/* hardware minimum and maximum for a single frame's data payload */
89#define TG3_MIN_MTU 60
90#define TG3_MAX_MTU(tp) \
Michael Chan0f893dc2005-07-25 12:30:38 -070091 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
93/* These numbers seem to be hard coded in the NIC firmware somehow.
94 * You can't change the ring sizes, but you can change where you place
95 * them in the NIC onboard memory.
96 */
97#define TG3_RX_RING_SIZE 512
98#define TG3_DEF_RX_RING_PENDING 200
99#define TG3_RX_JUMBO_RING_SIZE 256
100#define TG3_DEF_RX_JUMBO_RING_PENDING 100
101
102/* Do not place this n-ring entries value into the tp struct itself,
103 * we really want to expose these constants to GCC so that modulo et
104 * al. operations are done with shifts and masks instead of with
105 * hw multiply/modulo instructions. Another solution would be to
106 * replace things like '% foo' with '& (foo - 1)'.
107 */
108#define TG3_RX_RCB_RING_SIZE(tp) \
109 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
110
111#define TG3_TX_RING_SIZE 512
112#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
113
114#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
115 TG3_RX_RING_SIZE)
116#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117 TG3_RX_JUMBO_RING_SIZE)
118#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_RCB_RING_SIZE(tp))
120#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
121 TG3_TX_RING_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
125#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
126
127/* minimum number of free TX descriptors required to wake up TX process */
Ranjit Manomohan42952232006-10-18 20:54:26 -0700128#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
130/* number of ETHTOOL_GSTATS u64's */
131#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
Michael Chan4cafd3f2005-05-29 14:56:34 -0700133#define TG3_NUM_TEST 6
134
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135static char version[] __devinitdata =
136 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140MODULE_LICENSE("GPL");
141MODULE_VERSION(DRV_MODULE_VERSION);
142
143static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
144module_param(tg3_debug, int, 0);
145MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147static struct pci_device_id tg3_pci_tbl[] = {
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700148 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
Michael Chan126a3362006-09-27 16:03:07 -0700172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
Michael Chan126a3362006-09-27 16:03:07 -0700187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
Michael Chan676917d2006-12-07 00:20:22 -0800191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
Michael Chanb5d37722006-09-27 16:06:21 -0700199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
Matt Carlsond30cdd22007-10-07 23:28:35 -0700201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
Matt Carlson6c7af272007-10-21 16:12:02 -0700203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
Matt Carlson9936bcf2007-10-10 18:03:07 -0700204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700206 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
213 {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214};
215
216MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
217
Andreas Mohr50da8592006-08-14 23:54:30 -0700218static const struct {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 const char string[ETH_GSTRING_LEN];
220} ethtool_stats_keys[TG3_NUM_STATS] = {
221 { "rx_octets" },
222 { "rx_fragments" },
223 { "rx_ucast_packets" },
224 { "rx_mcast_packets" },
225 { "rx_bcast_packets" },
226 { "rx_fcs_errors" },
227 { "rx_align_errors" },
228 { "rx_xon_pause_rcvd" },
229 { "rx_xoff_pause_rcvd" },
230 { "rx_mac_ctrl_rcvd" },
231 { "rx_xoff_entered" },
232 { "rx_frame_too_long_errors" },
233 { "rx_jabbers" },
234 { "rx_undersize_packets" },
235 { "rx_in_length_errors" },
236 { "rx_out_length_errors" },
237 { "rx_64_or_less_octet_packets" },
238 { "rx_65_to_127_octet_packets" },
239 { "rx_128_to_255_octet_packets" },
240 { "rx_256_to_511_octet_packets" },
241 { "rx_512_to_1023_octet_packets" },
242 { "rx_1024_to_1522_octet_packets" },
243 { "rx_1523_to_2047_octet_packets" },
244 { "rx_2048_to_4095_octet_packets" },
245 { "rx_4096_to_8191_octet_packets" },
246 { "rx_8192_to_9022_octet_packets" },
247
248 { "tx_octets" },
249 { "tx_collisions" },
250
251 { "tx_xon_sent" },
252 { "tx_xoff_sent" },
253 { "tx_flow_control" },
254 { "tx_mac_errors" },
255 { "tx_single_collisions" },
256 { "tx_mult_collisions" },
257 { "tx_deferred" },
258 { "tx_excessive_collisions" },
259 { "tx_late_collisions" },
260 { "tx_collide_2times" },
261 { "tx_collide_3times" },
262 { "tx_collide_4times" },
263 { "tx_collide_5times" },
264 { "tx_collide_6times" },
265 { "tx_collide_7times" },
266 { "tx_collide_8times" },
267 { "tx_collide_9times" },
268 { "tx_collide_10times" },
269 { "tx_collide_11times" },
270 { "tx_collide_12times" },
271 { "tx_collide_13times" },
272 { "tx_collide_14times" },
273 { "tx_collide_15times" },
274 { "tx_ucast_packets" },
275 { "tx_mcast_packets" },
276 { "tx_bcast_packets" },
277 { "tx_carrier_sense_errors" },
278 { "tx_discards" },
279 { "tx_errors" },
280
281 { "dma_writeq_full" },
282 { "dma_write_prioq_full" },
283 { "rxbds_empty" },
284 { "rx_discards" },
285 { "rx_errors" },
286 { "rx_threshold_hit" },
287
288 { "dma_readq_full" },
289 { "dma_read_prioq_full" },
290 { "tx_comp_queue_full" },
291
292 { "ring_set_send_prod_index" },
293 { "ring_status_update" },
294 { "nic_irqs" },
295 { "nic_avoided_irqs" },
296 { "nic_tx_threshold_hit" }
297};
298
Andreas Mohr50da8592006-08-14 23:54:30 -0700299static const struct {
Michael Chan4cafd3f2005-05-29 14:56:34 -0700300 const char string[ETH_GSTRING_LEN];
301} ethtool_test_keys[TG3_NUM_TEST] = {
302 { "nvram test (online) " },
303 { "link test (online) " },
304 { "register test (offline)" },
305 { "memory test (offline)" },
306 { "loopback test (offline)" },
307 { "interrupt test (offline)" },
308};
309
Michael Chanb401e9e2005-12-19 16:27:04 -0800310static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
311{
312 writel(val, tp->regs + off);
313}
314
315static u32 tg3_read32(struct tg3 *tp, u32 off)
316{
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400317 return (readl(tp->regs + off));
Michael Chanb401e9e2005-12-19 16:27:04 -0800318}
319
Matt Carlson0d3031d2007-10-10 18:02:43 -0700320static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
321{
322 writel(val, tp->aperegs + off);
323}
324
325static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
326{
327 return (readl(tp->aperegs + off));
328}
329
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
331{
Michael Chan68929142005-08-09 20:17:14 -0700332 unsigned long flags;
333
334 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700335 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
Michael Chan68929142005-08-09 20:17:14 -0700337 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700338}
339
340static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
341{
342 writel(val, tp->regs + off);
343 readl(tp->regs + off);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344}
345
Michael Chan68929142005-08-09 20:17:14 -0700346static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
347{
348 unsigned long flags;
349 u32 val;
350
351 spin_lock_irqsave(&tp->indirect_lock, flags);
352 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354 spin_unlock_irqrestore(&tp->indirect_lock, flags);
355 return val;
356}
357
358static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
359{
360 unsigned long flags;
361
362 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364 TG3_64BIT_REG_LOW, val);
365 return;
366 }
367 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369 TG3_64BIT_REG_LOW, val);
370 return;
371 }
372
373 spin_lock_irqsave(&tp->indirect_lock, flags);
374 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376 spin_unlock_irqrestore(&tp->indirect_lock, flags);
377
378 /* In indirect mode when disabling interrupts, we also need
379 * to clear the interrupt bit in the GRC local ctrl register.
380 */
381 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
382 (val == 0x1)) {
383 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
385 }
386}
387
388static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
389{
390 unsigned long flags;
391 u32 val;
392
393 spin_lock_irqsave(&tp->indirect_lock, flags);
394 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396 spin_unlock_irqrestore(&tp->indirect_lock, flags);
397 return val;
398}
399
Michael Chanb401e9e2005-12-19 16:27:04 -0800400/* usec_wait specifies the wait time in usec when writing to certain registers
401 * where it is unsafe to read back the register without some delay.
402 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
404 */
405static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406{
Michael Chanb401e9e2005-12-19 16:27:04 -0800407 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409 /* Non-posted methods */
410 tp->write32(tp, off, val);
411 else {
412 /* Posted method */
413 tg3_write32(tp, off, val);
414 if (usec_wait)
415 udelay(usec_wait);
416 tp->read32(tp, off);
417 }
418 /* Wait again after the read for the posted method to guarantee that
419 * the wait time is met.
420 */
421 if (usec_wait)
422 udelay(usec_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423}
424
Michael Chan09ee9292005-08-09 20:17:00 -0700425static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
426{
427 tp->write32_mbox(tp, off, val);
Michael Chan68929142005-08-09 20:17:14 -0700428 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430 tp->read32_mbox(tp, off);
Michael Chan09ee9292005-08-09 20:17:00 -0700431}
432
Michael Chan20094932005-08-09 20:16:32 -0700433static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434{
435 void __iomem *mbox = tp->regs + off;
436 writel(val, mbox);
437 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
438 writel(val, mbox);
439 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
440 readl(mbox);
441}
442
Michael Chanb5d37722006-09-27 16:06:21 -0700443static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
444{
445 return (readl(tp->regs + off + GRCMBOX_BASE));
446}
447
448static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
449{
450 writel(val, tp->regs + off + GRCMBOX_BASE);
451}
452
Michael Chan20094932005-08-09 20:16:32 -0700453#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700454#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
Michael Chan20094932005-08-09 20:16:32 -0700455#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
456#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700457#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
Michael Chan20094932005-08-09 20:16:32 -0700458
459#define tw32(reg,val) tp->write32(tp, reg, val)
Michael Chanb401e9e2005-12-19 16:27:04 -0800460#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
461#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
Michael Chan20094932005-08-09 20:16:32 -0700462#define tr32(reg) tp->read32(tp, reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463
464static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
465{
Michael Chan68929142005-08-09 20:17:14 -0700466 unsigned long flags;
467
Michael Chanb5d37722006-09-27 16:06:21 -0700468 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
470 return;
471
Michael Chan68929142005-08-09 20:17:14 -0700472 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700473 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
Michael Chanbbadf502006-04-06 21:46:34 -0700477 /* Always leave this as zero. */
478 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
479 } else {
480 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481 tw32_f(TG3PCI_MEM_WIN_DATA, val);
482
483 /* Always leave this as zero. */
484 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
485 }
Michael Chan68929142005-08-09 20:17:14 -0700486 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487}
488
489static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
490{
Michael Chan68929142005-08-09 20:17:14 -0700491 unsigned long flags;
492
Michael Chanb5d37722006-09-27 16:06:21 -0700493 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
495 *val = 0;
496 return;
497 }
498
Michael Chan68929142005-08-09 20:17:14 -0700499 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700500 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503
Michael Chanbbadf502006-04-06 21:46:34 -0700504 /* Always leave this as zero. */
505 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506 } else {
507 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508 *val = tr32(TG3PCI_MEM_WIN_DATA);
509
510 /* Always leave this as zero. */
511 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512 }
Michael Chan68929142005-08-09 20:17:14 -0700513 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514}
515
Matt Carlson0d3031d2007-10-10 18:02:43 -0700516static void tg3_ape_lock_init(struct tg3 *tp)
517{
518 int i;
519
520 /* Make sure the driver hasn't any stale locks. */
521 for (i = 0; i < 8; i++)
522 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523 APE_LOCK_GRANT_DRIVER);
524}
525
526static int tg3_ape_lock(struct tg3 *tp, int locknum)
527{
528 int i, off;
529 int ret = 0;
530 u32 status;
531
532 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
533 return 0;
534
535 switch (locknum) {
536 case TG3_APE_LOCK_MEM:
537 break;
538 default:
539 return -EINVAL;
540 }
541
542 off = 4 * locknum;
543
544 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
545
546 /* Wait for up to 1 millisecond to acquire lock. */
547 for (i = 0; i < 100; i++) {
548 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549 if (status == APE_LOCK_GRANT_DRIVER)
550 break;
551 udelay(10);
552 }
553
554 if (status != APE_LOCK_GRANT_DRIVER) {
555 /* Revoke the lock request. */
556 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557 APE_LOCK_GRANT_DRIVER);
558
559 ret = -EBUSY;
560 }
561
562 return ret;
563}
564
565static void tg3_ape_unlock(struct tg3 *tp, int locknum)
566{
567 int off;
568
569 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
570 return;
571
572 switch (locknum) {
573 case TG3_APE_LOCK_MEM:
574 break;
575 default:
576 return;
577 }
578
579 off = 4 * locknum;
580 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
581}
582
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583static void tg3_disable_ints(struct tg3 *tp)
584{
585 tw32(TG3PCI_MISC_HOST_CTRL,
586 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700587 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588}
589
590static inline void tg3_cond_int(struct tg3 *tp)
591{
Michael Chan38f38432005-09-05 17:53:32 -0700592 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593 (tp->hw_status->status & SD_STATUS_UPDATED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
Michael Chanb5d37722006-09-27 16:06:21 -0700595 else
596 tw32(HOSTCC_MODE, tp->coalesce_mode |
597 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598}
599
600static void tg3_enable_ints(struct tg3 *tp)
601{
Michael Chanbbe832c2005-06-24 20:20:04 -0700602 tp->irq_sync = 0;
603 wmb();
604
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 tw32(TG3PCI_MISC_HOST_CTRL,
606 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700607 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608 (tp->last_tag << 24));
Michael Chanfcfa0a32006-03-20 22:28:41 -0800609 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611 (tp->last_tag << 24));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 tg3_cond_int(tp);
613}
614
Michael Chan04237dd2005-04-25 15:17:17 -0700615static inline unsigned int tg3_has_work(struct tg3 *tp)
616{
617 struct tg3_hw_status *sblk = tp->hw_status;
618 unsigned int work_exists = 0;
619
620 /* check for phy events */
621 if (!(tp->tg3_flags &
622 (TG3_FLAG_USE_LINKCHG_REG |
623 TG3_FLAG_POLL_SERDES))) {
624 if (sblk->status & SD_STATUS_LINK_CHG)
625 work_exists = 1;
626 }
627 /* check for RX/TX work to do */
628 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
630 work_exists = 1;
631
632 return work_exists;
633}
634
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635/* tg3_restart_ints
Michael Chan04237dd2005-04-25 15:17:17 -0700636 * similar to tg3_enable_ints, but it accurately determines whether there
637 * is new work pending and can return without flushing the PIO write
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400638 * which reenables interrupts
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 */
640static void tg3_restart_ints(struct tg3 *tp)
641{
David S. Millerfac9b832005-05-18 22:46:34 -0700642 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
643 tp->last_tag << 24);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 mmiowb();
645
David S. Millerfac9b832005-05-18 22:46:34 -0700646 /* When doing tagged status, this work check is unnecessary.
647 * The last_tag we write above tells the chip which piece of
648 * work we've completed.
649 */
650 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
651 tg3_has_work(tp))
Michael Chan04237dd2005-04-25 15:17:17 -0700652 tw32(HOSTCC_MODE, tp->coalesce_mode |
653 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654}
655
656static inline void tg3_netif_stop(struct tg3 *tp)
657{
Michael Chanbbe832c2005-06-24 20:20:04 -0700658 tp->dev->trans_start = jiffies; /* prevent tx timeout */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700659 napi_disable(&tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 netif_tx_disable(tp->dev);
661}
662
663static inline void tg3_netif_start(struct tg3 *tp)
664{
665 netif_wake_queue(tp->dev);
666 /* NOTE: unconditional netif_wake_queue is only appropriate
667 * so long as all callers are assured to have free tx slots
668 * (such as after tg3_init_hw)
669 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700670 napi_enable(&tp->napi);
David S. Millerf47c11e2005-06-24 20:18:35 -0700671 tp->hw_status->status |= SD_STATUS_UPDATED;
672 tg3_enable_ints(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673}
674
675static void tg3_switch_clocks(struct tg3 *tp)
676{
677 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
678 u32 orig_clock_ctrl;
679
Matt Carlson795d01c2007-10-07 23:28:17 -0700680 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan4cf78e42005-07-25 12:29:19 -0700682 return;
683
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 orig_clock_ctrl = clock_ctrl;
685 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686 CLOCK_CTRL_CLKRUN_OENABLE |
687 0x1f);
688 tp->pci_clock_ctrl = clock_ctrl;
689
690 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800692 tw32_wait_f(TG3PCI_CLOCK_CTRL,
693 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 }
695 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800696 tw32_wait_f(TG3PCI_CLOCK_CTRL,
697 clock_ctrl |
698 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
699 40);
700 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701 clock_ctrl | (CLOCK_CTRL_ALTCLK),
702 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 }
Michael Chanb401e9e2005-12-19 16:27:04 -0800704 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705}
706
707#define PHY_BUSY_LOOPS 5000
708
709static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
710{
711 u32 frame_val;
712 unsigned int loops;
713 int ret;
714
715 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716 tw32_f(MAC_MI_MODE,
717 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718 udelay(80);
719 }
720
721 *val = 0x0;
722
723 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724 MI_COM_PHY_ADDR_MASK);
725 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726 MI_COM_REG_ADDR_MASK);
727 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400728
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 tw32_f(MAC_MI_COM, frame_val);
730
731 loops = PHY_BUSY_LOOPS;
732 while (loops != 0) {
733 udelay(10);
734 frame_val = tr32(MAC_MI_COM);
735
736 if ((frame_val & MI_COM_BUSY) == 0) {
737 udelay(5);
738 frame_val = tr32(MAC_MI_COM);
739 break;
740 }
741 loops -= 1;
742 }
743
744 ret = -EBUSY;
745 if (loops != 0) {
746 *val = frame_val & MI_COM_DATA_MASK;
747 ret = 0;
748 }
749
750 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751 tw32_f(MAC_MI_MODE, tp->mi_mode);
752 udelay(80);
753 }
754
755 return ret;
756}
757
758static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
759{
760 u32 frame_val;
761 unsigned int loops;
762 int ret;
763
Michael Chanb5d37722006-09-27 16:06:21 -0700764 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
766 return 0;
767
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
769 tw32_f(MAC_MI_MODE,
770 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
771 udelay(80);
772 }
773
774 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775 MI_COM_PHY_ADDR_MASK);
776 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777 MI_COM_REG_ADDR_MASK);
778 frame_val |= (val & MI_COM_DATA_MASK);
779 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400780
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 tw32_f(MAC_MI_COM, frame_val);
782
783 loops = PHY_BUSY_LOOPS;
784 while (loops != 0) {
785 udelay(10);
786 frame_val = tr32(MAC_MI_COM);
787 if ((frame_val & MI_COM_BUSY) == 0) {
788 udelay(5);
789 frame_val = tr32(MAC_MI_COM);
790 break;
791 }
792 loops -= 1;
793 }
794
795 ret = -EBUSY;
796 if (loops != 0)
797 ret = 0;
798
799 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800 tw32_f(MAC_MI_MODE, tp->mi_mode);
801 udelay(80);
802 }
803
804 return ret;
805}
806
Matt Carlson9ef8ca92007-07-11 19:48:29 -0700807static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
808{
809 u32 phy;
810
811 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
812 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
813 return;
814
815 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
816 u32 ephy;
817
818 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
819 tg3_writephy(tp, MII_TG3_EPHY_TEST,
820 ephy | MII_TG3_EPHY_SHADOW_EN);
821 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
822 if (enable)
823 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
824 else
825 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
826 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
827 }
828 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
829 }
830 } else {
831 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
832 MII_TG3_AUXCTL_SHDWSEL_MISC;
833 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
834 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
835 if (enable)
836 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
837 else
838 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
839 phy |= MII_TG3_AUXCTL_MISC_WREN;
840 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
841 }
842 }
843}
844
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845static void tg3_phy_set_wirespeed(struct tg3 *tp)
846{
847 u32 val;
848
849 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
850 return;
851
852 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
853 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
854 tg3_writephy(tp, MII_TG3_AUX_CTRL,
855 (val | (1 << 15) | (1 << 4)));
856}
857
858static int tg3_bmcr_reset(struct tg3 *tp)
859{
860 u32 phy_control;
861 int limit, err;
862
863 /* OK, reset it, and poll the BMCR_RESET bit until it
864 * clears or we time out.
865 */
866 phy_control = BMCR_RESET;
867 err = tg3_writephy(tp, MII_BMCR, phy_control);
868 if (err != 0)
869 return -EBUSY;
870
871 limit = 5000;
872 while (limit--) {
873 err = tg3_readphy(tp, MII_BMCR, &phy_control);
874 if (err != 0)
875 return -EBUSY;
876
877 if ((phy_control & BMCR_RESET) == 0) {
878 udelay(40);
879 break;
880 }
881 udelay(10);
882 }
883 if (limit <= 0)
884 return -EBUSY;
885
886 return 0;
887}
888
889static int tg3_wait_macro_done(struct tg3 *tp)
890{
891 int limit = 100;
892
893 while (limit--) {
894 u32 tmp32;
895
896 if (!tg3_readphy(tp, 0x16, &tmp32)) {
897 if ((tmp32 & 0x1000) == 0)
898 break;
899 }
900 }
901 if (limit <= 0)
902 return -EBUSY;
903
904 return 0;
905}
906
907static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
908{
909 static const u32 test_pat[4][6] = {
910 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
911 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
912 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
913 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
914 };
915 int chan;
916
917 for (chan = 0; chan < 4; chan++) {
918 int i;
919
920 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
921 (chan * 0x2000) | 0x0200);
922 tg3_writephy(tp, 0x16, 0x0002);
923
924 for (i = 0; i < 6; i++)
925 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
926 test_pat[chan][i]);
927
928 tg3_writephy(tp, 0x16, 0x0202);
929 if (tg3_wait_macro_done(tp)) {
930 *resetp = 1;
931 return -EBUSY;
932 }
933
934 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
935 (chan * 0x2000) | 0x0200);
936 tg3_writephy(tp, 0x16, 0x0082);
937 if (tg3_wait_macro_done(tp)) {
938 *resetp = 1;
939 return -EBUSY;
940 }
941
942 tg3_writephy(tp, 0x16, 0x0802);
943 if (tg3_wait_macro_done(tp)) {
944 *resetp = 1;
945 return -EBUSY;
946 }
947
948 for (i = 0; i < 6; i += 2) {
949 u32 low, high;
950
951 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
952 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
953 tg3_wait_macro_done(tp)) {
954 *resetp = 1;
955 return -EBUSY;
956 }
957 low &= 0x7fff;
958 high &= 0x000f;
959 if (low != test_pat[chan][i] ||
960 high != test_pat[chan][i+1]) {
961 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
962 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
963 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
964
965 return -EBUSY;
966 }
967 }
968 }
969
970 return 0;
971}
972
973static int tg3_phy_reset_chanpat(struct tg3 *tp)
974{
975 int chan;
976
977 for (chan = 0; chan < 4; chan++) {
978 int i;
979
980 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
981 (chan * 0x2000) | 0x0200);
982 tg3_writephy(tp, 0x16, 0x0002);
983 for (i = 0; i < 6; i++)
984 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
985 tg3_writephy(tp, 0x16, 0x0202);
986 if (tg3_wait_macro_done(tp))
987 return -EBUSY;
988 }
989
990 return 0;
991}
992
993static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
994{
995 u32 reg32, phy9_orig;
996 int retries, do_phy_reset, err;
997
998 retries = 10;
999 do_phy_reset = 1;
1000 do {
1001 if (do_phy_reset) {
1002 err = tg3_bmcr_reset(tp);
1003 if (err)
1004 return err;
1005 do_phy_reset = 0;
1006 }
1007
1008 /* Disable transmitter and interrupt. */
1009 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1010 continue;
1011
1012 reg32 |= 0x3000;
1013 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1014
1015 /* Set full-duplex, 1000 mbps. */
1016 tg3_writephy(tp, MII_BMCR,
1017 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1018
1019 /* Set to master mode. */
1020 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1021 continue;
1022
1023 tg3_writephy(tp, MII_TG3_CTRL,
1024 (MII_TG3_CTRL_AS_MASTER |
1025 MII_TG3_CTRL_ENABLE_AS_MASTER));
1026
1027 /* Enable SM_DSP_CLOCK and 6dB. */
1028 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1029
1030 /* Block the PHY control access. */
1031 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1032 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1033
1034 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1035 if (!err)
1036 break;
1037 } while (--retries);
1038
1039 err = tg3_phy_reset_chanpat(tp);
1040 if (err)
1041 return err;
1042
1043 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1044 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1045
1046 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1047 tg3_writephy(tp, 0x16, 0x0000);
1048
1049 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1050 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1051 /* Set Extended packet length bit for jumbo frames */
1052 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1053 }
1054 else {
1055 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1056 }
1057
1058 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1059
1060 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1061 reg32 &= ~0x3000;
1062 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1063 } else if (!err)
1064 err = -EBUSY;
1065
1066 return err;
1067}
1068
Michael Chanc8e1e822006-04-29 18:55:17 -07001069static void tg3_link_report(struct tg3 *);
1070
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071/* This will reset the tigon3 PHY if there is no valid
1072 * link unless the FORCE argument is non-zero.
1073 */
1074static int tg3_phy_reset(struct tg3 *tp)
1075{
1076 u32 phy_status;
1077 int err;
1078
Michael Chan60189dd2006-12-17 17:08:07 -08001079 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1080 u32 val;
1081
1082 val = tr32(GRC_MISC_CFG);
1083 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1084 udelay(40);
1085 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1087 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1088 if (err != 0)
1089 return -EBUSY;
1090
Michael Chanc8e1e822006-04-29 18:55:17 -07001091 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1092 netif_carrier_off(tp->dev);
1093 tg3_link_report(tp);
1094 }
1095
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1097 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1098 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1099 err = tg3_phy_reset_5703_4_5(tp);
1100 if (err)
1101 return err;
1102 goto out;
1103 }
1104
1105 err = tg3_bmcr_reset(tp);
1106 if (err)
1107 return err;
1108
Matt Carlsonb5af7122007-11-12 21:22:02 -08001109 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
Matt Carlsonce057f02007-11-12 21:08:03 -08001110 u32 val;
1111
1112 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1113 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1114 CPMU_LSPD_1000MB_MACCLK_12_5) {
1115 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1116 udelay(40);
1117 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1118 }
Matt Carlson662f38d2007-11-12 21:16:17 -08001119
1120 /* Disable GPHY autopowerdown. */
1121 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1122 MII_TG3_MISC_SHDW_WREN |
1123 MII_TG3_MISC_SHDW_APD_SEL |
1124 MII_TG3_MISC_SHDW_APD_WKTM_84MS);
Matt Carlsonce057f02007-11-12 21:08:03 -08001125 }
1126
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127out:
1128 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1129 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1130 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1131 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1132 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1133 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1134 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1135 }
1136 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1137 tg3_writephy(tp, 0x1c, 0x8d68);
1138 tg3_writephy(tp, 0x1c, 0x8d68);
1139 }
1140 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1141 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1142 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1143 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1144 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1145 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1146 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1147 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1148 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1149 }
Michael Chanc424cb22006-04-29 18:56:34 -07001150 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1151 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1152 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
Michael Chanc1d2a192007-01-08 19:57:20 -08001153 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1154 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1155 tg3_writephy(tp, MII_TG3_TEST1,
1156 MII_TG3_TEST1_TRIM_EN | 0x4);
1157 } else
1158 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
Michael Chanc424cb22006-04-29 18:56:34 -07001159 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1160 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 /* Set Extended packet length bit (bit 14) on all chips that */
1162 /* support jumbo frames */
1163 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1164 /* Cannot do read-modify-write on 5401 */
1165 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
Michael Chan0f893dc2005-07-25 12:30:38 -07001166 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 u32 phy_reg;
1168
1169 /* Set bit 14 with read-modify-write to preserve other bits */
1170 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1171 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1172 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1173 }
1174
1175 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1176 * jumbo frames transmission.
1177 */
Michael Chan0f893dc2005-07-25 12:30:38 -07001178 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 u32 phy_reg;
1180
1181 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1182 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1183 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1184 }
1185
Michael Chan715116a2006-09-27 16:09:25 -07001186 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan715116a2006-09-27 16:09:25 -07001187 /* adjust output voltage */
1188 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
Michael Chan715116a2006-09-27 16:09:25 -07001189 }
1190
Matt Carlson9ef8ca92007-07-11 19:48:29 -07001191 tg3_phy_toggle_automdix(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 tg3_phy_set_wirespeed(tp);
1193 return 0;
1194}
1195
1196static void tg3_frob_aux_power(struct tg3 *tp)
1197{
1198 struct tg3 *tp_peer = tp;
1199
Michael Chan9d26e212006-12-07 00:21:14 -08001200 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 return;
1202
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001203 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1204 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1205 struct net_device *dev_peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001207 dev_peer = pci_get_drvdata(tp->pdev_peer);
Michael Chanbc1c7562006-03-20 17:48:03 -08001208 /* remove_one() may have been run on the peer. */
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001209 if (!dev_peer)
Michael Chanbc1c7562006-03-20 17:48:03 -08001210 tp_peer = tp;
1211 else
1212 tp_peer = netdev_priv(dev_peer);
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001213 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214
1215 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
Michael Chan6921d202005-12-13 21:15:53 -08001216 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1217 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1218 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1220 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
Michael Chanb401e9e2005-12-19 16:27:04 -08001221 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1222 (GRC_LCLCTRL_GPIO_OE0 |
1223 GRC_LCLCTRL_GPIO_OE1 |
1224 GRC_LCLCTRL_GPIO_OE2 |
1225 GRC_LCLCTRL_GPIO_OUTPUT0 |
1226 GRC_LCLCTRL_GPIO_OUTPUT1),
1227 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 } else {
1229 u32 no_gpio2;
Michael Chandc56b7d2005-12-19 16:26:28 -08001230 u32 grc_local_ctrl = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231
1232 if (tp_peer != tp &&
1233 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1234 return;
1235
Michael Chandc56b7d2005-12-19 16:26:28 -08001236 /* Workaround to prevent overdrawing Amps. */
1237 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1238 ASIC_REV_5714) {
1239 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chanb401e9e2005-12-19 16:27:04 -08001240 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1241 grc_local_ctrl, 100);
Michael Chandc56b7d2005-12-19 16:26:28 -08001242 }
1243
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 /* On 5753 and variants, GPIO2 cannot be used. */
1245 no_gpio2 = tp->nic_sram_data_cfg &
1246 NIC_SRAM_DATA_CFG_NO_GPIO2;
1247
Michael Chandc56b7d2005-12-19 16:26:28 -08001248 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 GRC_LCLCTRL_GPIO_OE1 |
1250 GRC_LCLCTRL_GPIO_OE2 |
1251 GRC_LCLCTRL_GPIO_OUTPUT1 |
1252 GRC_LCLCTRL_GPIO_OUTPUT2;
1253 if (no_gpio2) {
1254 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1255 GRC_LCLCTRL_GPIO_OUTPUT2);
1256 }
Michael Chanb401e9e2005-12-19 16:27:04 -08001257 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1258 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259
1260 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1261
Michael Chanb401e9e2005-12-19 16:27:04 -08001262 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1263 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264
1265 if (!no_gpio2) {
1266 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chanb401e9e2005-12-19 16:27:04 -08001267 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1268 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 }
1270 }
1271 } else {
1272 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1273 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1274 if (tp_peer != tp &&
1275 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1276 return;
1277
Michael Chanb401e9e2005-12-19 16:27:04 -08001278 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1279 (GRC_LCLCTRL_GPIO_OE1 |
1280 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281
Michael Chanb401e9e2005-12-19 16:27:04 -08001282 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1283 GRC_LCLCTRL_GPIO_OE1, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284
Michael Chanb401e9e2005-12-19 16:27:04 -08001285 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1286 (GRC_LCLCTRL_GPIO_OE1 |
1287 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 }
1289 }
1290}
1291
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07001292static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1293{
1294 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1295 return 1;
1296 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1297 if (speed != SPEED_10)
1298 return 1;
1299 } else if (speed == SPEED_10)
1300 return 1;
1301
1302 return 0;
1303}
1304
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305static int tg3_setup_phy(struct tg3 *, int);
1306
1307#define RESET_KIND_SHUTDOWN 0
1308#define RESET_KIND_INIT 1
1309#define RESET_KIND_SUSPEND 2
1310
1311static void tg3_write_sig_post_reset(struct tg3 *, int);
1312static int tg3_halt_cpu(struct tg3 *, u32);
Michael Chan6921d202005-12-13 21:15:53 -08001313static int tg3_nvram_lock(struct tg3 *);
1314static void tg3_nvram_unlock(struct tg3 *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315
Michael Chan15c3b692006-03-22 01:06:52 -08001316static void tg3_power_down_phy(struct tg3 *tp)
1317{
Matt Carlsonce057f02007-11-12 21:08:03 -08001318 u32 val;
1319
Michael Chan51297242007-02-13 12:17:57 -08001320 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1321 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1322 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1323 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1324
1325 sg_dig_ctrl |=
1326 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1327 tw32(SG_DIG_CTRL, sg_dig_ctrl);
1328 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1329 }
Michael Chan3f7045c2006-09-27 16:02:29 -07001330 return;
Michael Chan51297242007-02-13 12:17:57 -08001331 }
Michael Chan3f7045c2006-09-27 16:02:29 -07001332
Michael Chan60189dd2006-12-17 17:08:07 -08001333 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan60189dd2006-12-17 17:08:07 -08001334 tg3_bmcr_reset(tp);
1335 val = tr32(GRC_MISC_CFG);
1336 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1337 udelay(40);
1338 return;
1339 } else {
Michael Chan715116a2006-09-27 16:09:25 -07001340 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1341 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1342 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1343 }
Michael Chan3f7045c2006-09-27 16:02:29 -07001344
Michael Chan15c3b692006-03-22 01:06:52 -08001345 /* The PHY should not be powered down on some chips because
1346 * of bugs.
1347 */
1348 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1349 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1350 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1351 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1352 return;
Matt Carlsonce057f02007-11-12 21:08:03 -08001353
Matt Carlsonb5af7122007-11-12 21:22:02 -08001354 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
Matt Carlsonce057f02007-11-12 21:08:03 -08001355 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1356 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1357 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1358 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1359 }
1360
Michael Chan15c3b692006-03-22 01:06:52 -08001361 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1362}
1363
Michael Chanbc1c7562006-03-20 17:48:03 -08001364static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365{
1366 u32 misc_host_ctrl;
1367 u16 power_control, power_caps;
1368 int pm = tp->pm_cap;
1369
1370 /* Make sure register accesses (indirect or otherwise)
1371 * will function correctly.
1372 */
1373 pci_write_config_dword(tp->pdev,
1374 TG3PCI_MISC_HOST_CTRL,
1375 tp->misc_host_ctrl);
1376
1377 pci_read_config_word(tp->pdev,
1378 pm + PCI_PM_CTRL,
1379 &power_control);
1380 power_control |= PCI_PM_CTRL_PME_STATUS;
1381 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1382 switch (state) {
Michael Chanbc1c7562006-03-20 17:48:03 -08001383 case PCI_D0:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 power_control |= 0;
1385 pci_write_config_word(tp->pdev,
1386 pm + PCI_PM_CTRL,
1387 power_control);
Michael Chan8c6bda12005-04-21 17:09:08 -07001388 udelay(100); /* Delay after power state change */
1389
Michael Chan9d26e212006-12-07 00:21:14 -08001390 /* Switch out of Vaux if it is a NIC */
1391 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
Michael Chanb401e9e2005-12-19 16:27:04 -08001392 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393
1394 return 0;
1395
Michael Chanbc1c7562006-03-20 17:48:03 -08001396 case PCI_D1:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 power_control |= 1;
1398 break;
1399
Michael Chanbc1c7562006-03-20 17:48:03 -08001400 case PCI_D2:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 power_control |= 2;
1402 break;
1403
Michael Chanbc1c7562006-03-20 17:48:03 -08001404 case PCI_D3hot:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 power_control |= 3;
1406 break;
1407
1408 default:
1409 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1410 "requested.\n",
1411 tp->dev->name, state);
1412 return -EINVAL;
1413 };
1414
1415 power_control |= PCI_PM_CTRL_PME_ENABLE;
1416
1417 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1418 tw32(TG3PCI_MISC_HOST_CTRL,
1419 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1420
1421 if (tp->link_config.phy_is_low_power == 0) {
1422 tp->link_config.phy_is_low_power = 1;
1423 tp->link_config.orig_speed = tp->link_config.speed;
1424 tp->link_config.orig_duplex = tp->link_config.duplex;
1425 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1426 }
1427
Michael Chan747e8f82005-07-25 12:33:22 -07001428 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 tp->link_config.speed = SPEED_10;
1430 tp->link_config.duplex = DUPLEX_HALF;
1431 tp->link_config.autoneg = AUTONEG_ENABLE;
1432 tg3_setup_phy(tp, 0);
1433 }
1434
Michael Chanb5d37722006-09-27 16:06:21 -07001435 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1436 u32 val;
1437
1438 val = tr32(GRC_VCPU_EXT_CTRL);
1439 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1440 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chan6921d202005-12-13 21:15:53 -08001441 int i;
1442 u32 val;
1443
1444 for (i = 0; i < 200; i++) {
1445 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1446 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1447 break;
1448 msleep(1);
1449 }
1450 }
Gary Zambranoa85feb82007-05-05 11:52:19 -07001451 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1452 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1453 WOL_DRV_STATE_SHUTDOWN |
1454 WOL_DRV_WOL |
1455 WOL_SET_MAGIC_PKT);
Michael Chan6921d202005-12-13 21:15:53 -08001456
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1458
1459 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1460 u32 mac_mode;
1461
1462 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1463 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1464 udelay(40);
1465
Michael Chan3f7045c2006-09-27 16:02:29 -07001466 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1467 mac_mode = MAC_MODE_PORT_MODE_GMII;
1468 else
1469 mac_mode = MAC_MODE_PORT_MODE_MII;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07001471 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1472 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1473 ASIC_REV_5700) {
1474 u32 speed = (tp->tg3_flags &
1475 TG3_FLAG_WOL_SPEED_100MB) ?
1476 SPEED_100 : SPEED_10;
1477 if (tg3_5700_link_polarity(tp, speed))
1478 mac_mode |= MAC_MODE_LINK_POLARITY;
1479 else
1480 mac_mode &= ~MAC_MODE_LINK_POLARITY;
1481 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 } else {
1483 mac_mode = MAC_MODE_PORT_MODE_TBI;
1484 }
1485
John W. Linvillecbf46852005-04-21 17:01:29 -07001486 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 tw32(MAC_LED_CTRL, tp->led_ctrl);
1488
1489 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1490 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1491 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1492
1493 tw32_f(MAC_MODE, mac_mode);
1494 udelay(100);
1495
1496 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1497 udelay(10);
1498 }
1499
1500 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1501 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1502 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1503 u32 base_val;
1504
1505 base_val = tp->pci_clock_ctrl;
1506 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1507 CLOCK_CTRL_TXCLK_DISABLE);
1508
Michael Chanb401e9e2005-12-19 16:27:04 -08001509 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1510 CLOCK_CTRL_PWRDOWN_PLL133, 40);
Michael Chand7b0a852007-02-13 12:17:38 -08001511 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
Matt Carlson795d01c2007-10-07 23:28:17 -07001512 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
Michael Chand7b0a852007-02-13 12:17:38 -08001513 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
Michael Chan4cf78e42005-07-25 12:29:19 -07001514 /* do nothing */
Michael Chan85e94ce2005-04-21 17:05:28 -07001515 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1517 u32 newbits1, newbits2;
1518
1519 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1520 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1521 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1522 CLOCK_CTRL_TXCLK_DISABLE |
1523 CLOCK_CTRL_ALTCLK);
1524 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1525 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1526 newbits1 = CLOCK_CTRL_625_CORE;
1527 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1528 } else {
1529 newbits1 = CLOCK_CTRL_ALTCLK;
1530 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1531 }
1532
Michael Chanb401e9e2005-12-19 16:27:04 -08001533 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1534 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535
Michael Chanb401e9e2005-12-19 16:27:04 -08001536 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1537 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538
1539 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1540 u32 newbits3;
1541
1542 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1543 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1544 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1545 CLOCK_CTRL_TXCLK_DISABLE |
1546 CLOCK_CTRL_44MHZ_CORE);
1547 } else {
1548 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1549 }
1550
Michael Chanb401e9e2005-12-19 16:27:04 -08001551 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1552 tp->pci_clock_ctrl | newbits3, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 }
1554 }
1555
Michael Chan6921d202005-12-13 21:15:53 -08001556 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
Matt Carlson0d3031d2007-10-10 18:02:43 -07001557 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1558 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
Michael Chan3f7045c2006-09-27 16:02:29 -07001559 tg3_power_down_phy(tp);
Michael Chan6921d202005-12-13 21:15:53 -08001560
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 tg3_frob_aux_power(tp);
1562
1563 /* Workaround for unstable PLL clock */
1564 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1565 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1566 u32 val = tr32(0x7d00);
1567
1568 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1569 tw32(0x7d00, val);
Michael Chan6921d202005-12-13 21:15:53 -08001570 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chanec41c7d2006-01-17 02:40:55 -08001571 int err;
1572
1573 err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 tg3_halt_cpu(tp, RX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -08001575 if (!err)
1576 tg3_nvram_unlock(tp);
Michael Chan6921d202005-12-13 21:15:53 -08001577 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 }
1579
Michael Chanbbadf502006-04-06 21:46:34 -07001580 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1581
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 /* Finally, set the new power state. */
1583 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
Michael Chan8c6bda12005-04-21 17:09:08 -07001584 udelay(100); /* Delay after power state change */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 return 0;
1587}
1588
1589static void tg3_link_report(struct tg3 *tp)
1590{
1591 if (!netif_carrier_ok(tp->dev)) {
Michael Chan9f88f292006-12-07 00:22:54 -08001592 if (netif_msg_link(tp))
1593 printk(KERN_INFO PFX "%s: Link is down.\n",
1594 tp->dev->name);
1595 } else if (netif_msg_link(tp)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1597 tp->dev->name,
1598 (tp->link_config.active_speed == SPEED_1000 ?
1599 1000 :
1600 (tp->link_config.active_speed == SPEED_100 ?
1601 100 : 10)),
1602 (tp->link_config.active_duplex == DUPLEX_FULL ?
1603 "full" : "half"));
1604
Matt Carlson8d018622007-12-20 20:05:44 -08001605 printk(KERN_INFO PFX
1606 "%s: Flow control is %s for TX and %s for RX.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 tp->dev->name,
Matt Carlson8d018622007-12-20 20:05:44 -08001608 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1609 "on" : "off",
1610 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1611 "on" : "off");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612 }
1613}
1614
1615static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1616{
Matt Carlson8d018622007-12-20 20:05:44 -08001617 u8 new_tg3_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 u32 old_rx_mode = tp->rx_mode;
1619 u32 old_tx_mode = tp->tx_mode;
1620
1621 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
Michael Chan747e8f82005-07-25 12:33:22 -07001622
1623 /* Convert 1000BaseX flow control bits to 1000BaseT
1624 * bits before resolving flow control.
1625 */
1626 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1627 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1628 ADVERTISE_PAUSE_ASYM);
1629 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1630
1631 if (local_adv & ADVERTISE_1000XPAUSE)
1632 local_adv |= ADVERTISE_PAUSE_CAP;
1633 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1634 local_adv |= ADVERTISE_PAUSE_ASYM;
1635 if (remote_adv & LPA_1000XPAUSE)
1636 remote_adv |= LPA_PAUSE_CAP;
1637 if (remote_adv & LPA_1000XPAUSE_ASYM)
1638 remote_adv |= LPA_PAUSE_ASYM;
1639 }
1640
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 if (local_adv & ADVERTISE_PAUSE_CAP) {
1642 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1643 if (remote_adv & LPA_PAUSE_CAP)
Matt Carlson8d018622007-12-20 20:05:44 -08001644 new_tg3_flags = TG3_FLOW_CTRL_RX |
1645 TG3_FLOW_CTRL_TX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 else if (remote_adv & LPA_PAUSE_ASYM)
Matt Carlson8d018622007-12-20 20:05:44 -08001647 new_tg3_flags = TG3_FLOW_CTRL_RX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 } else {
1649 if (remote_adv & LPA_PAUSE_CAP)
Matt Carlson8d018622007-12-20 20:05:44 -08001650 new_tg3_flags = TG3_FLOW_CTRL_RX |
1651 TG3_FLOW_CTRL_TX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 }
1653 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1654 if ((remote_adv & LPA_PAUSE_CAP) &&
Matt Carlson8d018622007-12-20 20:05:44 -08001655 (remote_adv & LPA_PAUSE_ASYM))
1656 new_tg3_flags = TG3_FLOW_CTRL_TX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 } else {
Matt Carlson8d018622007-12-20 20:05:44 -08001659 new_tg3_flags = tp->link_config.flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 }
1661
Matt Carlson8d018622007-12-20 20:05:44 -08001662 tp->link_config.active_flowctrl = new_tg3_flags;
1663
1664 if (new_tg3_flags & TG3_FLOW_CTRL_RX)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1666 else
1667 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1668
1669 if (old_rx_mode != tp->rx_mode) {
1670 tw32_f(MAC_RX_MODE, tp->rx_mode);
1671 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001672
Matt Carlson8d018622007-12-20 20:05:44 -08001673 if (new_tg3_flags & TG3_FLOW_CTRL_TX)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1675 else
1676 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1677
1678 if (old_tx_mode != tp->tx_mode) {
1679 tw32_f(MAC_TX_MODE, tp->tx_mode);
1680 }
1681}
1682
1683static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1684{
1685 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1686 case MII_TG3_AUX_STAT_10HALF:
1687 *speed = SPEED_10;
1688 *duplex = DUPLEX_HALF;
1689 break;
1690
1691 case MII_TG3_AUX_STAT_10FULL:
1692 *speed = SPEED_10;
1693 *duplex = DUPLEX_FULL;
1694 break;
1695
1696 case MII_TG3_AUX_STAT_100HALF:
1697 *speed = SPEED_100;
1698 *duplex = DUPLEX_HALF;
1699 break;
1700
1701 case MII_TG3_AUX_STAT_100FULL:
1702 *speed = SPEED_100;
1703 *duplex = DUPLEX_FULL;
1704 break;
1705
1706 case MII_TG3_AUX_STAT_1000HALF:
1707 *speed = SPEED_1000;
1708 *duplex = DUPLEX_HALF;
1709 break;
1710
1711 case MII_TG3_AUX_STAT_1000FULL:
1712 *speed = SPEED_1000;
1713 *duplex = DUPLEX_FULL;
1714 break;
1715
1716 default:
Michael Chan715116a2006-09-27 16:09:25 -07001717 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1718 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1719 SPEED_10;
1720 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1721 DUPLEX_HALF;
1722 break;
1723 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 *speed = SPEED_INVALID;
1725 *duplex = DUPLEX_INVALID;
1726 break;
1727 };
1728}
1729
1730static void tg3_phy_copper_begin(struct tg3 *tp)
1731{
1732 u32 new_adv;
1733 int i;
1734
1735 if (tp->link_config.phy_is_low_power) {
1736 /* Entering low power mode. Disable gigabit and
1737 * 100baseT advertisements.
1738 */
1739 tg3_writephy(tp, MII_TG3_CTRL, 0);
1740
1741 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1742 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1743 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1744 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1745
1746 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1747 } else if (tp->link_config.speed == SPEED_INVALID) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1749 tp->link_config.advertising &=
1750 ~(ADVERTISED_1000baseT_Half |
1751 ADVERTISED_1000baseT_Full);
1752
1753 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1754 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1755 new_adv |= ADVERTISE_10HALF;
1756 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1757 new_adv |= ADVERTISE_10FULL;
1758 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1759 new_adv |= ADVERTISE_100HALF;
1760 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1761 new_adv |= ADVERTISE_100FULL;
1762 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1763
1764 if (tp->link_config.advertising &
1765 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1766 new_adv = 0;
1767 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1768 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1769 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1770 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1771 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1772 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1773 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1774 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1775 MII_TG3_CTRL_ENABLE_AS_MASTER);
1776 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1777 } else {
1778 tg3_writephy(tp, MII_TG3_CTRL, 0);
1779 }
1780 } else {
1781 /* Asking for a specific link mode. */
1782 if (tp->link_config.speed == SPEED_1000) {
1783 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1784 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1785
1786 if (tp->link_config.duplex == DUPLEX_FULL)
1787 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1788 else
1789 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1790 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1791 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1792 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1793 MII_TG3_CTRL_ENABLE_AS_MASTER);
1794 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1795 } else {
1796 tg3_writephy(tp, MII_TG3_CTRL, 0);
1797
1798 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1799 if (tp->link_config.speed == SPEED_100) {
1800 if (tp->link_config.duplex == DUPLEX_FULL)
1801 new_adv |= ADVERTISE_100FULL;
1802 else
1803 new_adv |= ADVERTISE_100HALF;
1804 } else {
1805 if (tp->link_config.duplex == DUPLEX_FULL)
1806 new_adv |= ADVERTISE_10FULL;
1807 else
1808 new_adv |= ADVERTISE_10HALF;
1809 }
1810 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1811 }
1812 }
1813
1814 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1815 tp->link_config.speed != SPEED_INVALID) {
1816 u32 bmcr, orig_bmcr;
1817
1818 tp->link_config.active_speed = tp->link_config.speed;
1819 tp->link_config.active_duplex = tp->link_config.duplex;
1820
1821 bmcr = 0;
1822 switch (tp->link_config.speed) {
1823 default:
1824 case SPEED_10:
1825 break;
1826
1827 case SPEED_100:
1828 bmcr |= BMCR_SPEED100;
1829 break;
1830
1831 case SPEED_1000:
1832 bmcr |= TG3_BMCR_SPEED1000;
1833 break;
1834 };
1835
1836 if (tp->link_config.duplex == DUPLEX_FULL)
1837 bmcr |= BMCR_FULLDPLX;
1838
1839 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1840 (bmcr != orig_bmcr)) {
1841 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1842 for (i = 0; i < 1500; i++) {
1843 u32 tmp;
1844
1845 udelay(10);
1846 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1847 tg3_readphy(tp, MII_BMSR, &tmp))
1848 continue;
1849 if (!(tmp & BMSR_LSTATUS)) {
1850 udelay(40);
1851 break;
1852 }
1853 }
1854 tg3_writephy(tp, MII_BMCR, bmcr);
1855 udelay(40);
1856 }
1857 } else {
1858 tg3_writephy(tp, MII_BMCR,
1859 BMCR_ANENABLE | BMCR_ANRESTART);
1860 }
1861}
1862
1863static int tg3_init_5401phy_dsp(struct tg3 *tp)
1864{
1865 int err;
1866
1867 /* Turn off tap power management. */
1868 /* Set Extended packet length bit */
1869 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1870
1871 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1872 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1873
1874 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1875 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1876
1877 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1878 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1879
1880 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1881 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1882
1883 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1884 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1885
1886 udelay(40);
1887
1888 return err;
1889}
1890
Michael Chan3600d912006-12-07 00:21:48 -08001891static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892{
Michael Chan3600d912006-12-07 00:21:48 -08001893 u32 adv_reg, all_mask = 0;
1894
1895 if (mask & ADVERTISED_10baseT_Half)
1896 all_mask |= ADVERTISE_10HALF;
1897 if (mask & ADVERTISED_10baseT_Full)
1898 all_mask |= ADVERTISE_10FULL;
1899 if (mask & ADVERTISED_100baseT_Half)
1900 all_mask |= ADVERTISE_100HALF;
1901 if (mask & ADVERTISED_100baseT_Full)
1902 all_mask |= ADVERTISE_100FULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903
1904 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1905 return 0;
1906
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 if ((adv_reg & all_mask) != all_mask)
1908 return 0;
1909 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1910 u32 tg3_ctrl;
1911
Michael Chan3600d912006-12-07 00:21:48 -08001912 all_mask = 0;
1913 if (mask & ADVERTISED_1000baseT_Half)
1914 all_mask |= ADVERTISE_1000HALF;
1915 if (mask & ADVERTISED_1000baseT_Full)
1916 all_mask |= ADVERTISE_1000FULL;
1917
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1919 return 0;
1920
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 if ((tg3_ctrl & all_mask) != all_mask)
1922 return 0;
1923 }
1924 return 1;
1925}
1926
1927static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1928{
1929 int current_link_up;
1930 u32 bmsr, dummy;
1931 u16 current_speed;
1932 u8 current_duplex;
1933 int i, err;
1934
1935 tw32(MAC_EVENT, 0);
1936
1937 tw32_f(MAC_STATUS,
1938 (MAC_STATUS_SYNC_CHANGED |
1939 MAC_STATUS_CFG_CHANGED |
1940 MAC_STATUS_MI_COMPLETION |
1941 MAC_STATUS_LNKSTATE_CHANGED));
1942 udelay(40);
1943
1944 tp->mi_mode = MAC_MI_MODE_BASE;
1945 tw32_f(MAC_MI_MODE, tp->mi_mode);
1946 udelay(80);
1947
1948 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1949
1950 /* Some third-party PHYs need to be reset on link going
1951 * down.
1952 */
1953 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1954 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1955 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1956 netif_carrier_ok(tp->dev)) {
1957 tg3_readphy(tp, MII_BMSR, &bmsr);
1958 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1959 !(bmsr & BMSR_LSTATUS))
1960 force_reset = 1;
1961 }
1962 if (force_reset)
1963 tg3_phy_reset(tp);
1964
1965 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1966 tg3_readphy(tp, MII_BMSR, &bmsr);
1967 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1968 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1969 bmsr = 0;
1970
1971 if (!(bmsr & BMSR_LSTATUS)) {
1972 err = tg3_init_5401phy_dsp(tp);
1973 if (err)
1974 return err;
1975
1976 tg3_readphy(tp, MII_BMSR, &bmsr);
1977 for (i = 0; i < 1000; i++) {
1978 udelay(10);
1979 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1980 (bmsr & BMSR_LSTATUS)) {
1981 udelay(40);
1982 break;
1983 }
1984 }
1985
1986 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1987 !(bmsr & BMSR_LSTATUS) &&
1988 tp->link_config.active_speed == SPEED_1000) {
1989 err = tg3_phy_reset(tp);
1990 if (!err)
1991 err = tg3_init_5401phy_dsp(tp);
1992 if (err)
1993 return err;
1994 }
1995 }
1996 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1997 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1998 /* 5701 {A0,B0} CRC bug workaround */
1999 tg3_writephy(tp, 0x15, 0x0a75);
2000 tg3_writephy(tp, 0x1c, 0x8c68);
2001 tg3_writephy(tp, 0x1c, 0x8d68);
2002 tg3_writephy(tp, 0x1c, 0x8c68);
2003 }
2004
2005 /* Clear pending interrupts... */
2006 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2007 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2008
2009 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2010 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
Michael Chan715116a2006-09-27 16:09:25 -07002011 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2013
2014 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2015 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2016 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2017 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2018 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2019 else
2020 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2021 }
2022
2023 current_link_up = 0;
2024 current_speed = SPEED_INVALID;
2025 current_duplex = DUPLEX_INVALID;
2026
2027 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2028 u32 val;
2029
2030 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2031 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2032 if (!(val & (1 << 10))) {
2033 val |= (1 << 10);
2034 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2035 goto relink;
2036 }
2037 }
2038
2039 bmsr = 0;
2040 for (i = 0; i < 100; i++) {
2041 tg3_readphy(tp, MII_BMSR, &bmsr);
2042 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2043 (bmsr & BMSR_LSTATUS))
2044 break;
2045 udelay(40);
2046 }
2047
2048 if (bmsr & BMSR_LSTATUS) {
2049 u32 aux_stat, bmcr;
2050
2051 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2052 for (i = 0; i < 2000; i++) {
2053 udelay(10);
2054 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2055 aux_stat)
2056 break;
2057 }
2058
2059 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2060 &current_speed,
2061 &current_duplex);
2062
2063 bmcr = 0;
2064 for (i = 0; i < 200; i++) {
2065 tg3_readphy(tp, MII_BMCR, &bmcr);
2066 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2067 continue;
2068 if (bmcr && bmcr != 0x7fff)
2069 break;
2070 udelay(10);
2071 }
2072
2073 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2074 if (bmcr & BMCR_ANENABLE) {
2075 current_link_up = 1;
2076
2077 /* Force autoneg restart if we are exiting
2078 * low power mode.
2079 */
Michael Chan3600d912006-12-07 00:21:48 -08002080 if (!tg3_copper_is_advertising_all(tp,
2081 tp->link_config.advertising))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 current_link_up = 0;
2083 } else {
2084 current_link_up = 0;
2085 }
2086 } else {
2087 if (!(bmcr & BMCR_ANENABLE) &&
2088 tp->link_config.speed == current_speed &&
2089 tp->link_config.duplex == current_duplex) {
2090 current_link_up = 1;
2091 } else {
2092 current_link_up = 0;
2093 }
2094 }
2095
2096 tp->link_config.active_speed = current_speed;
2097 tp->link_config.active_duplex = current_duplex;
2098 }
2099
2100 if (current_link_up == 1 &&
2101 (tp->link_config.active_duplex == DUPLEX_FULL) &&
2102 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2103 u32 local_adv, remote_adv;
2104
2105 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
2106 local_adv = 0;
2107 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2108
2109 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2110 remote_adv = 0;
2111
2112 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
2113
2114 /* If we are not advertising full pause capability,
2115 * something is wrong. Bring the link down and reconfigure.
2116 */
2117 if (local_adv != ADVERTISE_PAUSE_CAP) {
2118 current_link_up = 0;
2119 } else {
2120 tg3_setup_flow_control(tp, local_adv, remote_adv);
2121 }
2122 }
2123relink:
Michael Chan6921d202005-12-13 21:15:53 -08002124 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 u32 tmp;
2126
2127 tg3_phy_copper_begin(tp);
2128
2129 tg3_readphy(tp, MII_BMSR, &tmp);
2130 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2131 (tmp & BMSR_LSTATUS))
2132 current_link_up = 1;
2133 }
2134
2135 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2136 if (current_link_up == 1) {
2137 if (tp->link_config.active_speed == SPEED_100 ||
2138 tp->link_config.active_speed == SPEED_10)
2139 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2140 else
2141 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2142 } else
2143 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2144
2145 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2146 if (tp->link_config.active_duplex == DUPLEX_HALF)
2147 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2148
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002150 if (current_link_up == 1 &&
2151 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002153 else
2154 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155 }
2156
2157 /* ??? Without this setting Netgear GA302T PHY does not
2158 * ??? send/receive packets...
2159 */
2160 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2161 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2162 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2163 tw32_f(MAC_MI_MODE, tp->mi_mode);
2164 udelay(80);
2165 }
2166
2167 tw32_f(MAC_MODE, tp->mac_mode);
2168 udelay(40);
2169
2170 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2171 /* Polled via timer. */
2172 tw32_f(MAC_EVENT, 0);
2173 } else {
2174 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2175 }
2176 udelay(40);
2177
2178 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2179 current_link_up == 1 &&
2180 tp->link_config.active_speed == SPEED_1000 &&
2181 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2182 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2183 udelay(120);
2184 tw32_f(MAC_STATUS,
2185 (MAC_STATUS_SYNC_CHANGED |
2186 MAC_STATUS_CFG_CHANGED));
2187 udelay(40);
2188 tg3_write_mem(tp,
2189 NIC_SRAM_FIRMWARE_MBOX,
2190 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2191 }
2192
2193 if (current_link_up != netif_carrier_ok(tp->dev)) {
2194 if (current_link_up)
2195 netif_carrier_on(tp->dev);
2196 else
2197 netif_carrier_off(tp->dev);
2198 tg3_link_report(tp);
2199 }
2200
2201 return 0;
2202}
2203
2204struct tg3_fiber_aneginfo {
2205 int state;
2206#define ANEG_STATE_UNKNOWN 0
2207#define ANEG_STATE_AN_ENABLE 1
2208#define ANEG_STATE_RESTART_INIT 2
2209#define ANEG_STATE_RESTART 3
2210#define ANEG_STATE_DISABLE_LINK_OK 4
2211#define ANEG_STATE_ABILITY_DETECT_INIT 5
2212#define ANEG_STATE_ABILITY_DETECT 6
2213#define ANEG_STATE_ACK_DETECT_INIT 7
2214#define ANEG_STATE_ACK_DETECT 8
2215#define ANEG_STATE_COMPLETE_ACK_INIT 9
2216#define ANEG_STATE_COMPLETE_ACK 10
2217#define ANEG_STATE_IDLE_DETECT_INIT 11
2218#define ANEG_STATE_IDLE_DETECT 12
2219#define ANEG_STATE_LINK_OK 13
2220#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2221#define ANEG_STATE_NEXT_PAGE_WAIT 15
2222
2223 u32 flags;
2224#define MR_AN_ENABLE 0x00000001
2225#define MR_RESTART_AN 0x00000002
2226#define MR_AN_COMPLETE 0x00000004
2227#define MR_PAGE_RX 0x00000008
2228#define MR_NP_LOADED 0x00000010
2229#define MR_TOGGLE_TX 0x00000020
2230#define MR_LP_ADV_FULL_DUPLEX 0x00000040
2231#define MR_LP_ADV_HALF_DUPLEX 0x00000080
2232#define MR_LP_ADV_SYM_PAUSE 0x00000100
2233#define MR_LP_ADV_ASYM_PAUSE 0x00000200
2234#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2235#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2236#define MR_LP_ADV_NEXT_PAGE 0x00001000
2237#define MR_TOGGLE_RX 0x00002000
2238#define MR_NP_RX 0x00004000
2239
2240#define MR_LINK_OK 0x80000000
2241
2242 unsigned long link_time, cur_time;
2243
2244 u32 ability_match_cfg;
2245 int ability_match_count;
2246
2247 char ability_match, idle_match, ack_match;
2248
2249 u32 txconfig, rxconfig;
2250#define ANEG_CFG_NP 0x00000080
2251#define ANEG_CFG_ACK 0x00000040
2252#define ANEG_CFG_RF2 0x00000020
2253#define ANEG_CFG_RF1 0x00000010
2254#define ANEG_CFG_PS2 0x00000001
2255#define ANEG_CFG_PS1 0x00008000
2256#define ANEG_CFG_HD 0x00004000
2257#define ANEG_CFG_FD 0x00002000
2258#define ANEG_CFG_INVAL 0x00001f06
2259
2260};
2261#define ANEG_OK 0
2262#define ANEG_DONE 1
2263#define ANEG_TIMER_ENAB 2
2264#define ANEG_FAILED -1
2265
2266#define ANEG_STATE_SETTLE_TIME 10000
2267
2268static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2269 struct tg3_fiber_aneginfo *ap)
2270{
2271 unsigned long delta;
2272 u32 rx_cfg_reg;
2273 int ret;
2274
2275 if (ap->state == ANEG_STATE_UNKNOWN) {
2276 ap->rxconfig = 0;
2277 ap->link_time = 0;
2278 ap->cur_time = 0;
2279 ap->ability_match_cfg = 0;
2280 ap->ability_match_count = 0;
2281 ap->ability_match = 0;
2282 ap->idle_match = 0;
2283 ap->ack_match = 0;
2284 }
2285 ap->cur_time++;
2286
2287 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2288 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2289
2290 if (rx_cfg_reg != ap->ability_match_cfg) {
2291 ap->ability_match_cfg = rx_cfg_reg;
2292 ap->ability_match = 0;
2293 ap->ability_match_count = 0;
2294 } else {
2295 if (++ap->ability_match_count > 1) {
2296 ap->ability_match = 1;
2297 ap->ability_match_cfg = rx_cfg_reg;
2298 }
2299 }
2300 if (rx_cfg_reg & ANEG_CFG_ACK)
2301 ap->ack_match = 1;
2302 else
2303 ap->ack_match = 0;
2304
2305 ap->idle_match = 0;
2306 } else {
2307 ap->idle_match = 1;
2308 ap->ability_match_cfg = 0;
2309 ap->ability_match_count = 0;
2310 ap->ability_match = 0;
2311 ap->ack_match = 0;
2312
2313 rx_cfg_reg = 0;
2314 }
2315
2316 ap->rxconfig = rx_cfg_reg;
2317 ret = ANEG_OK;
2318
2319 switch(ap->state) {
2320 case ANEG_STATE_UNKNOWN:
2321 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2322 ap->state = ANEG_STATE_AN_ENABLE;
2323
2324 /* fallthru */
2325 case ANEG_STATE_AN_ENABLE:
2326 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2327 if (ap->flags & MR_AN_ENABLE) {
2328 ap->link_time = 0;
2329 ap->cur_time = 0;
2330 ap->ability_match_cfg = 0;
2331 ap->ability_match_count = 0;
2332 ap->ability_match = 0;
2333 ap->idle_match = 0;
2334 ap->ack_match = 0;
2335
2336 ap->state = ANEG_STATE_RESTART_INIT;
2337 } else {
2338 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2339 }
2340 break;
2341
2342 case ANEG_STATE_RESTART_INIT:
2343 ap->link_time = ap->cur_time;
2344 ap->flags &= ~(MR_NP_LOADED);
2345 ap->txconfig = 0;
2346 tw32(MAC_TX_AUTO_NEG, 0);
2347 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2348 tw32_f(MAC_MODE, tp->mac_mode);
2349 udelay(40);
2350
2351 ret = ANEG_TIMER_ENAB;
2352 ap->state = ANEG_STATE_RESTART;
2353
2354 /* fallthru */
2355 case ANEG_STATE_RESTART:
2356 delta = ap->cur_time - ap->link_time;
2357 if (delta > ANEG_STATE_SETTLE_TIME) {
2358 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2359 } else {
2360 ret = ANEG_TIMER_ENAB;
2361 }
2362 break;
2363
2364 case ANEG_STATE_DISABLE_LINK_OK:
2365 ret = ANEG_DONE;
2366 break;
2367
2368 case ANEG_STATE_ABILITY_DETECT_INIT:
2369 ap->flags &= ~(MR_TOGGLE_TX);
2370 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2371 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2372 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2373 tw32_f(MAC_MODE, tp->mac_mode);
2374 udelay(40);
2375
2376 ap->state = ANEG_STATE_ABILITY_DETECT;
2377 break;
2378
2379 case ANEG_STATE_ABILITY_DETECT:
2380 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2381 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2382 }
2383 break;
2384
2385 case ANEG_STATE_ACK_DETECT_INIT:
2386 ap->txconfig |= ANEG_CFG_ACK;
2387 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2388 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2389 tw32_f(MAC_MODE, tp->mac_mode);
2390 udelay(40);
2391
2392 ap->state = ANEG_STATE_ACK_DETECT;
2393
2394 /* fallthru */
2395 case ANEG_STATE_ACK_DETECT:
2396 if (ap->ack_match != 0) {
2397 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2398 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2399 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2400 } else {
2401 ap->state = ANEG_STATE_AN_ENABLE;
2402 }
2403 } else if (ap->ability_match != 0 &&
2404 ap->rxconfig == 0) {
2405 ap->state = ANEG_STATE_AN_ENABLE;
2406 }
2407 break;
2408
2409 case ANEG_STATE_COMPLETE_ACK_INIT:
2410 if (ap->rxconfig & ANEG_CFG_INVAL) {
2411 ret = ANEG_FAILED;
2412 break;
2413 }
2414 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2415 MR_LP_ADV_HALF_DUPLEX |
2416 MR_LP_ADV_SYM_PAUSE |
2417 MR_LP_ADV_ASYM_PAUSE |
2418 MR_LP_ADV_REMOTE_FAULT1 |
2419 MR_LP_ADV_REMOTE_FAULT2 |
2420 MR_LP_ADV_NEXT_PAGE |
2421 MR_TOGGLE_RX |
2422 MR_NP_RX);
2423 if (ap->rxconfig & ANEG_CFG_FD)
2424 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2425 if (ap->rxconfig & ANEG_CFG_HD)
2426 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2427 if (ap->rxconfig & ANEG_CFG_PS1)
2428 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2429 if (ap->rxconfig & ANEG_CFG_PS2)
2430 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2431 if (ap->rxconfig & ANEG_CFG_RF1)
2432 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2433 if (ap->rxconfig & ANEG_CFG_RF2)
2434 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2435 if (ap->rxconfig & ANEG_CFG_NP)
2436 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2437
2438 ap->link_time = ap->cur_time;
2439
2440 ap->flags ^= (MR_TOGGLE_TX);
2441 if (ap->rxconfig & 0x0008)
2442 ap->flags |= MR_TOGGLE_RX;
2443 if (ap->rxconfig & ANEG_CFG_NP)
2444 ap->flags |= MR_NP_RX;
2445 ap->flags |= MR_PAGE_RX;
2446
2447 ap->state = ANEG_STATE_COMPLETE_ACK;
2448 ret = ANEG_TIMER_ENAB;
2449 break;
2450
2451 case ANEG_STATE_COMPLETE_ACK:
2452 if (ap->ability_match != 0 &&
2453 ap->rxconfig == 0) {
2454 ap->state = ANEG_STATE_AN_ENABLE;
2455 break;
2456 }
2457 delta = ap->cur_time - ap->link_time;
2458 if (delta > ANEG_STATE_SETTLE_TIME) {
2459 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2460 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2461 } else {
2462 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2463 !(ap->flags & MR_NP_RX)) {
2464 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2465 } else {
2466 ret = ANEG_FAILED;
2467 }
2468 }
2469 }
2470 break;
2471
2472 case ANEG_STATE_IDLE_DETECT_INIT:
2473 ap->link_time = ap->cur_time;
2474 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2475 tw32_f(MAC_MODE, tp->mac_mode);
2476 udelay(40);
2477
2478 ap->state = ANEG_STATE_IDLE_DETECT;
2479 ret = ANEG_TIMER_ENAB;
2480 break;
2481
2482 case ANEG_STATE_IDLE_DETECT:
2483 if (ap->ability_match != 0 &&
2484 ap->rxconfig == 0) {
2485 ap->state = ANEG_STATE_AN_ENABLE;
2486 break;
2487 }
2488 delta = ap->cur_time - ap->link_time;
2489 if (delta > ANEG_STATE_SETTLE_TIME) {
2490 /* XXX another gem from the Broadcom driver :( */
2491 ap->state = ANEG_STATE_LINK_OK;
2492 }
2493 break;
2494
2495 case ANEG_STATE_LINK_OK:
2496 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2497 ret = ANEG_DONE;
2498 break;
2499
2500 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2501 /* ??? unimplemented */
2502 break;
2503
2504 case ANEG_STATE_NEXT_PAGE_WAIT:
2505 /* ??? unimplemented */
2506 break;
2507
2508 default:
2509 ret = ANEG_FAILED;
2510 break;
2511 };
2512
2513 return ret;
2514}
2515
2516static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2517{
2518 int res = 0;
2519 struct tg3_fiber_aneginfo aninfo;
2520 int status = ANEG_FAILED;
2521 unsigned int tick;
2522 u32 tmp;
2523
2524 tw32_f(MAC_TX_AUTO_NEG, 0);
2525
2526 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2527 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2528 udelay(40);
2529
2530 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2531 udelay(40);
2532
2533 memset(&aninfo, 0, sizeof(aninfo));
2534 aninfo.flags |= MR_AN_ENABLE;
2535 aninfo.state = ANEG_STATE_UNKNOWN;
2536 aninfo.cur_time = 0;
2537 tick = 0;
2538 while (++tick < 195000) {
2539 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2540 if (status == ANEG_DONE || status == ANEG_FAILED)
2541 break;
2542
2543 udelay(1);
2544 }
2545
2546 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2547 tw32_f(MAC_MODE, tp->mac_mode);
2548 udelay(40);
2549
2550 *flags = aninfo.flags;
2551
2552 if (status == ANEG_DONE &&
2553 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2554 MR_LP_ADV_FULL_DUPLEX)))
2555 res = 1;
2556
2557 return res;
2558}
2559
2560static void tg3_init_bcm8002(struct tg3 *tp)
2561{
2562 u32 mac_status = tr32(MAC_STATUS);
2563 int i;
2564
2565 /* Reset when initting first time or we have a link. */
2566 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2567 !(mac_status & MAC_STATUS_PCS_SYNCED))
2568 return;
2569
2570 /* Set PLL lock range. */
2571 tg3_writephy(tp, 0x16, 0x8007);
2572
2573 /* SW reset */
2574 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2575
2576 /* Wait for reset to complete. */
2577 /* XXX schedule_timeout() ... */
2578 for (i = 0; i < 500; i++)
2579 udelay(10);
2580
2581 /* Config mode; select PMA/Ch 1 regs. */
2582 tg3_writephy(tp, 0x10, 0x8411);
2583
2584 /* Enable auto-lock and comdet, select txclk for tx. */
2585 tg3_writephy(tp, 0x11, 0x0a10);
2586
2587 tg3_writephy(tp, 0x18, 0x00a0);
2588 tg3_writephy(tp, 0x16, 0x41ff);
2589
2590 /* Assert and deassert POR. */
2591 tg3_writephy(tp, 0x13, 0x0400);
2592 udelay(40);
2593 tg3_writephy(tp, 0x13, 0x0000);
2594
2595 tg3_writephy(tp, 0x11, 0x0a50);
2596 udelay(40);
2597 tg3_writephy(tp, 0x11, 0x0a10);
2598
2599 /* Wait for signal to stabilize */
2600 /* XXX schedule_timeout() ... */
2601 for (i = 0; i < 15000; i++)
2602 udelay(10);
2603
2604 /* Deselect the channel register so we can read the PHYID
2605 * later.
2606 */
2607 tg3_writephy(tp, 0x10, 0x8011);
2608}
2609
2610static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2611{
2612 u32 sg_dig_ctrl, sg_dig_status;
2613 u32 serdes_cfg, expected_sg_dig_ctrl;
2614 int workaround, port_a;
2615 int current_link_up;
2616
2617 serdes_cfg = 0;
2618 expected_sg_dig_ctrl = 0;
2619 workaround = 0;
2620 port_a = 1;
2621 current_link_up = 0;
2622
2623 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2624 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2625 workaround = 1;
2626 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2627 port_a = 0;
2628
2629 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2630 /* preserve bits 20-23 for voltage regulator */
2631 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2632 }
2633
2634 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2635
2636 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2637 if (sg_dig_ctrl & (1 << 31)) {
2638 if (workaround) {
2639 u32 val = serdes_cfg;
2640
2641 if (port_a)
2642 val |= 0xc010000;
2643 else
2644 val |= 0x4010000;
2645 tw32_f(MAC_SERDES_CFG, val);
2646 }
2647 tw32_f(SG_DIG_CTRL, 0x01388400);
2648 }
2649 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2650 tg3_setup_flow_control(tp, 0, 0);
2651 current_link_up = 1;
2652 }
2653 goto out;
2654 }
2655
2656 /* Want auto-negotiation. */
2657 expected_sg_dig_ctrl = 0x81388400;
2658
2659 /* Pause capability */
2660 expected_sg_dig_ctrl |= (1 << 11);
2661
2662 /* Asymettric pause */
2663 expected_sg_dig_ctrl |= (1 << 12);
2664
2665 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07002666 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2667 tp->serdes_counter &&
2668 ((mac_status & (MAC_STATUS_PCS_SYNCED |
2669 MAC_STATUS_RCVD_CFG)) ==
2670 MAC_STATUS_PCS_SYNCED)) {
2671 tp->serdes_counter--;
2672 current_link_up = 1;
2673 goto out;
2674 }
2675restart_autoneg:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676 if (workaround)
2677 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2678 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2679 udelay(5);
2680 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2681
Michael Chan3d3ebe72006-09-27 15:59:15 -07002682 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2683 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2685 MAC_STATUS_SIGNAL_DET)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07002686 sg_dig_status = tr32(SG_DIG_STATUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687 mac_status = tr32(MAC_STATUS);
2688
2689 if ((sg_dig_status & (1 << 1)) &&
2690 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2691 u32 local_adv, remote_adv;
2692
2693 local_adv = ADVERTISE_PAUSE_CAP;
2694 remote_adv = 0;
2695 if (sg_dig_status & (1 << 19))
2696 remote_adv |= LPA_PAUSE_CAP;
2697 if (sg_dig_status & (1 << 20))
2698 remote_adv |= LPA_PAUSE_ASYM;
2699
2700 tg3_setup_flow_control(tp, local_adv, remote_adv);
2701 current_link_up = 1;
Michael Chan3d3ebe72006-09-27 15:59:15 -07002702 tp->serdes_counter = 0;
2703 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 } else if (!(sg_dig_status & (1 << 1))) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07002705 if (tp->serdes_counter)
2706 tp->serdes_counter--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707 else {
2708 if (workaround) {
2709 u32 val = serdes_cfg;
2710
2711 if (port_a)
2712 val |= 0xc010000;
2713 else
2714 val |= 0x4010000;
2715
2716 tw32_f(MAC_SERDES_CFG, val);
2717 }
2718
2719 tw32_f(SG_DIG_CTRL, 0x01388400);
2720 udelay(40);
2721
2722 /* Link parallel detection - link is up */
2723 /* only if we have PCS_SYNC and not */
2724 /* receiving config code words */
2725 mac_status = tr32(MAC_STATUS);
2726 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2727 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2728 tg3_setup_flow_control(tp, 0, 0);
2729 current_link_up = 1;
Michael Chan3d3ebe72006-09-27 15:59:15 -07002730 tp->tg3_flags2 |=
2731 TG3_FLG2_PARALLEL_DETECT;
2732 tp->serdes_counter =
2733 SERDES_PARALLEL_DET_TIMEOUT;
2734 } else
2735 goto restart_autoneg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 }
2737 }
Michael Chan3d3ebe72006-09-27 15:59:15 -07002738 } else {
2739 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2740 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741 }
2742
2743out:
2744 return current_link_up;
2745}
2746
2747static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2748{
2749 int current_link_up = 0;
2750
Michael Chan5cf64b8a2007-05-05 12:11:21 -07002751 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753
2754 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2755 u32 flags;
2756 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002757
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758 if (fiber_autoneg(tp, &flags)) {
2759 u32 local_adv, remote_adv;
2760
2761 local_adv = ADVERTISE_PAUSE_CAP;
2762 remote_adv = 0;
2763 if (flags & MR_LP_ADV_SYM_PAUSE)
2764 remote_adv |= LPA_PAUSE_CAP;
2765 if (flags & MR_LP_ADV_ASYM_PAUSE)
2766 remote_adv |= LPA_PAUSE_ASYM;
2767
2768 tg3_setup_flow_control(tp, local_adv, remote_adv);
2769
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770 current_link_up = 1;
2771 }
2772 for (i = 0; i < 30; i++) {
2773 udelay(20);
2774 tw32_f(MAC_STATUS,
2775 (MAC_STATUS_SYNC_CHANGED |
2776 MAC_STATUS_CFG_CHANGED));
2777 udelay(40);
2778 if ((tr32(MAC_STATUS) &
2779 (MAC_STATUS_SYNC_CHANGED |
2780 MAC_STATUS_CFG_CHANGED)) == 0)
2781 break;
2782 }
2783
2784 mac_status = tr32(MAC_STATUS);
2785 if (current_link_up == 0 &&
2786 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2787 !(mac_status & MAC_STATUS_RCVD_CFG))
2788 current_link_up = 1;
2789 } else {
2790 /* Forcing 1000FD link up. */
2791 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792
2793 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2794 udelay(40);
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002795
2796 tw32_f(MAC_MODE, tp->mac_mode);
2797 udelay(40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798 }
2799
2800out:
2801 return current_link_up;
2802}
2803
2804static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2805{
2806 u32 orig_pause_cfg;
2807 u16 orig_active_speed;
2808 u8 orig_active_duplex;
2809 u32 mac_status;
2810 int current_link_up;
2811 int i;
2812
Matt Carlson8d018622007-12-20 20:05:44 -08002813 orig_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814 orig_active_speed = tp->link_config.active_speed;
2815 orig_active_duplex = tp->link_config.active_duplex;
2816
2817 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2818 netif_carrier_ok(tp->dev) &&
2819 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2820 mac_status = tr32(MAC_STATUS);
2821 mac_status &= (MAC_STATUS_PCS_SYNCED |
2822 MAC_STATUS_SIGNAL_DET |
2823 MAC_STATUS_CFG_CHANGED |
2824 MAC_STATUS_RCVD_CFG);
2825 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2826 MAC_STATUS_SIGNAL_DET)) {
2827 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2828 MAC_STATUS_CFG_CHANGED));
2829 return 0;
2830 }
2831 }
2832
2833 tw32_f(MAC_TX_AUTO_NEG, 0);
2834
2835 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2836 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2837 tw32_f(MAC_MODE, tp->mac_mode);
2838 udelay(40);
2839
2840 if (tp->phy_id == PHY_ID_BCM8002)
2841 tg3_init_bcm8002(tp);
2842
2843 /* Enable link change event even when serdes polling. */
2844 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2845 udelay(40);
2846
2847 current_link_up = 0;
2848 mac_status = tr32(MAC_STATUS);
2849
2850 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2851 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2852 else
2853 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2854
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 tp->hw_status->status =
2856 (SD_STATUS_UPDATED |
2857 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2858
2859 for (i = 0; i < 100; i++) {
2860 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2861 MAC_STATUS_CFG_CHANGED));
2862 udelay(5);
2863 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
Michael Chan3d3ebe72006-09-27 15:59:15 -07002864 MAC_STATUS_CFG_CHANGED |
2865 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866 break;
2867 }
2868
2869 mac_status = tr32(MAC_STATUS);
2870 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2871 current_link_up = 0;
Michael Chan3d3ebe72006-09-27 15:59:15 -07002872 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2873 tp->serdes_counter == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874 tw32_f(MAC_MODE, (tp->mac_mode |
2875 MAC_MODE_SEND_CONFIGS));
2876 udelay(1);
2877 tw32_f(MAC_MODE, tp->mac_mode);
2878 }
2879 }
2880
2881 if (current_link_up == 1) {
2882 tp->link_config.active_speed = SPEED_1000;
2883 tp->link_config.active_duplex = DUPLEX_FULL;
2884 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2885 LED_CTRL_LNKLED_OVERRIDE |
2886 LED_CTRL_1000MBPS_ON));
2887 } else {
2888 tp->link_config.active_speed = SPEED_INVALID;
2889 tp->link_config.active_duplex = DUPLEX_INVALID;
2890 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2891 LED_CTRL_LNKLED_OVERRIDE |
2892 LED_CTRL_TRAFFIC_OVERRIDE));
2893 }
2894
2895 if (current_link_up != netif_carrier_ok(tp->dev)) {
2896 if (current_link_up)
2897 netif_carrier_on(tp->dev);
2898 else
2899 netif_carrier_off(tp->dev);
2900 tg3_link_report(tp);
2901 } else {
Matt Carlson8d018622007-12-20 20:05:44 -08002902 u32 now_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903 if (orig_pause_cfg != now_pause_cfg ||
2904 orig_active_speed != tp->link_config.active_speed ||
2905 orig_active_duplex != tp->link_config.active_duplex)
2906 tg3_link_report(tp);
2907 }
2908
2909 return 0;
2910}
2911
Michael Chan747e8f82005-07-25 12:33:22 -07002912static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2913{
2914 int current_link_up, err = 0;
2915 u32 bmsr, bmcr;
2916 u16 current_speed;
2917 u8 current_duplex;
2918
2919 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2920 tw32_f(MAC_MODE, tp->mac_mode);
2921 udelay(40);
2922
2923 tw32(MAC_EVENT, 0);
2924
2925 tw32_f(MAC_STATUS,
2926 (MAC_STATUS_SYNC_CHANGED |
2927 MAC_STATUS_CFG_CHANGED |
2928 MAC_STATUS_MI_COMPLETION |
2929 MAC_STATUS_LNKSTATE_CHANGED));
2930 udelay(40);
2931
2932 if (force_reset)
2933 tg3_phy_reset(tp);
2934
2935 current_link_up = 0;
2936 current_speed = SPEED_INVALID;
2937 current_duplex = DUPLEX_INVALID;
2938
2939 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2940 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08002941 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2942 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2943 bmsr |= BMSR_LSTATUS;
2944 else
2945 bmsr &= ~BMSR_LSTATUS;
2946 }
Michael Chan747e8f82005-07-25 12:33:22 -07002947
2948 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2949
2950 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2951 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2952 /* do nothing, just check for link up at the end */
2953 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2954 u32 adv, new_adv;
2955
2956 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2957 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2958 ADVERTISE_1000XPAUSE |
2959 ADVERTISE_1000XPSE_ASYM |
2960 ADVERTISE_SLCT);
2961
2962 /* Always advertise symmetric PAUSE just like copper */
2963 new_adv |= ADVERTISE_1000XPAUSE;
2964
2965 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2966 new_adv |= ADVERTISE_1000XHALF;
2967 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2968 new_adv |= ADVERTISE_1000XFULL;
2969
2970 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2971 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2972 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2973 tg3_writephy(tp, MII_BMCR, bmcr);
2974
2975 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
Michael Chan3d3ebe72006-09-27 15:59:15 -07002976 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
Michael Chan747e8f82005-07-25 12:33:22 -07002977 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2978
2979 return err;
2980 }
2981 } else {
2982 u32 new_bmcr;
2983
2984 bmcr &= ~BMCR_SPEED1000;
2985 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2986
2987 if (tp->link_config.duplex == DUPLEX_FULL)
2988 new_bmcr |= BMCR_FULLDPLX;
2989
2990 if (new_bmcr != bmcr) {
2991 /* BMCR_SPEED1000 is a reserved bit that needs
2992 * to be set on write.
2993 */
2994 new_bmcr |= BMCR_SPEED1000;
2995
2996 /* Force a linkdown */
2997 if (netif_carrier_ok(tp->dev)) {
2998 u32 adv;
2999
3000 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3001 adv &= ~(ADVERTISE_1000XFULL |
3002 ADVERTISE_1000XHALF |
3003 ADVERTISE_SLCT);
3004 tg3_writephy(tp, MII_ADVERTISE, adv);
3005 tg3_writephy(tp, MII_BMCR, bmcr |
3006 BMCR_ANRESTART |
3007 BMCR_ANENABLE);
3008 udelay(10);
3009 netif_carrier_off(tp->dev);
3010 }
3011 tg3_writephy(tp, MII_BMCR, new_bmcr);
3012 bmcr = new_bmcr;
3013 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3014 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08003015 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3016 ASIC_REV_5714) {
3017 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3018 bmsr |= BMSR_LSTATUS;
3019 else
3020 bmsr &= ~BMSR_LSTATUS;
3021 }
Michael Chan747e8f82005-07-25 12:33:22 -07003022 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3023 }
3024 }
3025
3026 if (bmsr & BMSR_LSTATUS) {
3027 current_speed = SPEED_1000;
3028 current_link_up = 1;
3029 if (bmcr & BMCR_FULLDPLX)
3030 current_duplex = DUPLEX_FULL;
3031 else
3032 current_duplex = DUPLEX_HALF;
3033
3034 if (bmcr & BMCR_ANENABLE) {
3035 u32 local_adv, remote_adv, common;
3036
3037 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3038 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3039 common = local_adv & remote_adv;
3040 if (common & (ADVERTISE_1000XHALF |
3041 ADVERTISE_1000XFULL)) {
3042 if (common & ADVERTISE_1000XFULL)
3043 current_duplex = DUPLEX_FULL;
3044 else
3045 current_duplex = DUPLEX_HALF;
3046
3047 tg3_setup_flow_control(tp, local_adv,
3048 remote_adv);
3049 }
3050 else
3051 current_link_up = 0;
3052 }
3053 }
3054
3055 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3056 if (tp->link_config.active_duplex == DUPLEX_HALF)
3057 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3058
3059 tw32_f(MAC_MODE, tp->mac_mode);
3060 udelay(40);
3061
3062 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3063
3064 tp->link_config.active_speed = current_speed;
3065 tp->link_config.active_duplex = current_duplex;
3066
3067 if (current_link_up != netif_carrier_ok(tp->dev)) {
3068 if (current_link_up)
3069 netif_carrier_on(tp->dev);
3070 else {
3071 netif_carrier_off(tp->dev);
3072 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3073 }
3074 tg3_link_report(tp);
3075 }
3076 return err;
3077}
3078
3079static void tg3_serdes_parallel_detect(struct tg3 *tp)
3080{
Michael Chan3d3ebe72006-09-27 15:59:15 -07003081 if (tp->serdes_counter) {
Michael Chan747e8f82005-07-25 12:33:22 -07003082 /* Give autoneg time to complete. */
Michael Chan3d3ebe72006-09-27 15:59:15 -07003083 tp->serdes_counter--;
Michael Chan747e8f82005-07-25 12:33:22 -07003084 return;
3085 }
3086 if (!netif_carrier_ok(tp->dev) &&
3087 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3088 u32 bmcr;
3089
3090 tg3_readphy(tp, MII_BMCR, &bmcr);
3091 if (bmcr & BMCR_ANENABLE) {
3092 u32 phy1, phy2;
3093
3094 /* Select shadow register 0x1f */
3095 tg3_writephy(tp, 0x1c, 0x7c00);
3096 tg3_readphy(tp, 0x1c, &phy1);
3097
3098 /* Select expansion interrupt status register */
3099 tg3_writephy(tp, 0x17, 0x0f01);
3100 tg3_readphy(tp, 0x15, &phy2);
3101 tg3_readphy(tp, 0x15, &phy2);
3102
3103 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3104 /* We have signal detect and not receiving
3105 * config code words, link is up by parallel
3106 * detection.
3107 */
3108
3109 bmcr &= ~BMCR_ANENABLE;
3110 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3111 tg3_writephy(tp, MII_BMCR, bmcr);
3112 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3113 }
3114 }
3115 }
3116 else if (netif_carrier_ok(tp->dev) &&
3117 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3118 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3119 u32 phy2;
3120
3121 /* Select expansion interrupt status register */
3122 tg3_writephy(tp, 0x17, 0x0f01);
3123 tg3_readphy(tp, 0x15, &phy2);
3124 if (phy2 & 0x20) {
3125 u32 bmcr;
3126
3127 /* Config code words received, turn on autoneg. */
3128 tg3_readphy(tp, MII_BMCR, &bmcr);
3129 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3130
3131 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3132
3133 }
3134 }
3135}
3136
Linus Torvalds1da177e2005-04-16 15:20:36 -07003137static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3138{
3139 int err;
3140
3141 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3142 err = tg3_setup_fiber_phy(tp, force_reset);
Michael Chan747e8f82005-07-25 12:33:22 -07003143 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3144 err = tg3_setup_fiber_mii_phy(tp, force_reset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003145 } else {
3146 err = tg3_setup_copper_phy(tp, force_reset);
3147 }
3148
Matt Carlsonb5af7122007-11-12 21:22:02 -08003149 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3150 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
Matt Carlsonaa6c91f2007-11-12 21:18:04 -08003151 u32 val, scale;
3152
3153 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3154 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3155 scale = 65;
3156 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3157 scale = 6;
3158 else
3159 scale = 12;
3160
3161 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3162 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3163 tw32(GRC_MISC_CFG, val);
3164 }
3165
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166 if (tp->link_config.active_speed == SPEED_1000 &&
3167 tp->link_config.active_duplex == DUPLEX_HALF)
3168 tw32(MAC_TX_LENGTHS,
3169 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3170 (6 << TX_LENGTHS_IPG_SHIFT) |
3171 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3172 else
3173 tw32(MAC_TX_LENGTHS,
3174 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3175 (6 << TX_LENGTHS_IPG_SHIFT) |
3176 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3177
3178 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3179 if (netif_carrier_ok(tp->dev)) {
3180 tw32(HOSTCC_STAT_COAL_TICKS,
David S. Miller15f98502005-05-18 22:49:26 -07003181 tp->coal.stats_block_coalesce_usecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003182 } else {
3183 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3184 }
3185 }
3186
Matt Carlson8ed5d972007-05-07 00:25:49 -07003187 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3188 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3189 if (!netif_carrier_ok(tp->dev))
3190 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3191 tp->pwrmgmt_thresh;
3192 else
3193 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3194 tw32(PCIE_PWR_MGMT_THRESH, val);
3195 }
3196
Linus Torvalds1da177e2005-04-16 15:20:36 -07003197 return err;
3198}
3199
Michael Chandf3e6542006-05-26 17:48:07 -07003200/* This is called whenever we suspect that the system chipset is re-
3201 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3202 * is bogus tx completions. We try to recover by setting the
3203 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3204 * in the workqueue.
3205 */
3206static void tg3_tx_recover(struct tg3 *tp)
3207{
3208 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3209 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3210
3211 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3212 "mapped I/O cycles to the network device, attempting to "
3213 "recover. Please report the problem to the driver maintainer "
3214 "and include system chipset information.\n", tp->dev->name);
3215
3216 spin_lock(&tp->lock);
Michael Chandf3e6542006-05-26 17:48:07 -07003217 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
Michael Chandf3e6542006-05-26 17:48:07 -07003218 spin_unlock(&tp->lock);
3219}
3220
Michael Chan1b2a7202006-08-07 21:46:02 -07003221static inline u32 tg3_tx_avail(struct tg3 *tp)
3222{
3223 smp_mb();
3224 return (tp->tx_pending -
3225 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3226}
3227
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228/* Tigon3 never reports partial packet sends. So we do not
3229 * need special logic to handle SKBs that have not had all
3230 * of their frags sent yet, like SunGEM does.
3231 */
3232static void tg3_tx(struct tg3 *tp)
3233{
3234 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3235 u32 sw_idx = tp->tx_cons;
3236
3237 while (sw_idx != hw_idx) {
3238 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3239 struct sk_buff *skb = ri->skb;
Michael Chandf3e6542006-05-26 17:48:07 -07003240 int i, tx_bug = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241
Michael Chandf3e6542006-05-26 17:48:07 -07003242 if (unlikely(skb == NULL)) {
3243 tg3_tx_recover(tp);
3244 return;
3245 }
3246
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247 pci_unmap_single(tp->pdev,
3248 pci_unmap_addr(ri, mapping),
3249 skb_headlen(skb),
3250 PCI_DMA_TODEVICE);
3251
3252 ri->skb = NULL;
3253
3254 sw_idx = NEXT_TX(sw_idx);
3255
3256 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003257 ri = &tp->tx_buffers[sw_idx];
Michael Chandf3e6542006-05-26 17:48:07 -07003258 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3259 tx_bug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260
3261 pci_unmap_page(tp->pdev,
3262 pci_unmap_addr(ri, mapping),
3263 skb_shinfo(skb)->frags[i].size,
3264 PCI_DMA_TODEVICE);
3265
3266 sw_idx = NEXT_TX(sw_idx);
3267 }
3268
David S. Millerf47c11e2005-06-24 20:18:35 -07003269 dev_kfree_skb(skb);
Michael Chandf3e6542006-05-26 17:48:07 -07003270
3271 if (unlikely(tx_bug)) {
3272 tg3_tx_recover(tp);
3273 return;
3274 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003275 }
3276
3277 tp->tx_cons = sw_idx;
3278
Michael Chan1b2a7202006-08-07 21:46:02 -07003279 /* Need to make the tx_cons update visible to tg3_start_xmit()
3280 * before checking for netif_queue_stopped(). Without the
3281 * memory barrier, there is a small possibility that tg3_start_xmit()
3282 * will miss it and cause the queue to be stopped forever.
3283 */
3284 smp_mb();
3285
3286 if (unlikely(netif_queue_stopped(tp->dev) &&
Ranjit Manomohan42952232006-10-18 20:54:26 -07003287 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
Michael Chan1b2a7202006-08-07 21:46:02 -07003288 netif_tx_lock(tp->dev);
Michael Chan51b91462005-09-01 17:41:28 -07003289 if (netif_queue_stopped(tp->dev) &&
Ranjit Manomohan42952232006-10-18 20:54:26 -07003290 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
Michael Chan51b91462005-09-01 17:41:28 -07003291 netif_wake_queue(tp->dev);
Michael Chan1b2a7202006-08-07 21:46:02 -07003292 netif_tx_unlock(tp->dev);
Michael Chan51b91462005-09-01 17:41:28 -07003293 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003294}
3295
3296/* Returns size of skb allocated or < 0 on error.
3297 *
3298 * We only need to fill in the address because the other members
3299 * of the RX descriptor are invariant, see tg3_init_rings.
3300 *
3301 * Note the purposeful assymetry of cpu vs. chip accesses. For
3302 * posting buffers we only dirty the first cache line of the RX
3303 * descriptor (containing the address). Whereas for the RX status
3304 * buffers the cpu only reads the last cacheline of the RX descriptor
3305 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3306 */
3307static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3308 int src_idx, u32 dest_idx_unmasked)
3309{
3310 struct tg3_rx_buffer_desc *desc;
3311 struct ring_info *map, *src_map;
3312 struct sk_buff *skb;
3313 dma_addr_t mapping;
3314 int skb_size, dest_idx;
3315
3316 src_map = NULL;
3317 switch (opaque_key) {
3318 case RXD_OPAQUE_RING_STD:
3319 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3320 desc = &tp->rx_std[dest_idx];
3321 map = &tp->rx_std_buffers[dest_idx];
3322 if (src_idx >= 0)
3323 src_map = &tp->rx_std_buffers[src_idx];
Michael Chan7e72aad2005-07-25 12:31:17 -07003324 skb_size = tp->rx_pkt_buf_sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003325 break;
3326
3327 case RXD_OPAQUE_RING_JUMBO:
3328 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3329 desc = &tp->rx_jumbo[dest_idx];
3330 map = &tp->rx_jumbo_buffers[dest_idx];
3331 if (src_idx >= 0)
3332 src_map = &tp->rx_jumbo_buffers[src_idx];
3333 skb_size = RX_JUMBO_PKT_BUF_SZ;
3334 break;
3335
3336 default:
3337 return -EINVAL;
3338 };
3339
3340 /* Do not overwrite any of the map or rp information
3341 * until we are sure we can commit to a new buffer.
3342 *
3343 * Callers depend upon this behavior and assume that
3344 * we leave everything unchanged if we fail.
3345 */
David S. Millera20e9c62006-07-31 22:38:16 -07003346 skb = netdev_alloc_skb(tp->dev, skb_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003347 if (skb == NULL)
3348 return -ENOMEM;
3349
Linus Torvalds1da177e2005-04-16 15:20:36 -07003350 skb_reserve(skb, tp->rx_offset);
3351
3352 mapping = pci_map_single(tp->pdev, skb->data,
3353 skb_size - tp->rx_offset,
3354 PCI_DMA_FROMDEVICE);
3355
3356 map->skb = skb;
3357 pci_unmap_addr_set(map, mapping, mapping);
3358
3359 if (src_map != NULL)
3360 src_map->skb = NULL;
3361
3362 desc->addr_hi = ((u64)mapping >> 32);
3363 desc->addr_lo = ((u64)mapping & 0xffffffff);
3364
3365 return skb_size;
3366}
3367
3368/* We only need to move over in the address because the other
3369 * members of the RX descriptor are invariant. See notes above
3370 * tg3_alloc_rx_skb for full details.
3371 */
3372static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3373 int src_idx, u32 dest_idx_unmasked)
3374{
3375 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3376 struct ring_info *src_map, *dest_map;
3377 int dest_idx;
3378
3379 switch (opaque_key) {
3380 case RXD_OPAQUE_RING_STD:
3381 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3382 dest_desc = &tp->rx_std[dest_idx];
3383 dest_map = &tp->rx_std_buffers[dest_idx];
3384 src_desc = &tp->rx_std[src_idx];
3385 src_map = &tp->rx_std_buffers[src_idx];
3386 break;
3387
3388 case RXD_OPAQUE_RING_JUMBO:
3389 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3390 dest_desc = &tp->rx_jumbo[dest_idx];
3391 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3392 src_desc = &tp->rx_jumbo[src_idx];
3393 src_map = &tp->rx_jumbo_buffers[src_idx];
3394 break;
3395
3396 default:
3397 return;
3398 };
3399
3400 dest_map->skb = src_map->skb;
3401 pci_unmap_addr_set(dest_map, mapping,
3402 pci_unmap_addr(src_map, mapping));
3403 dest_desc->addr_hi = src_desc->addr_hi;
3404 dest_desc->addr_lo = src_desc->addr_lo;
3405
3406 src_map->skb = NULL;
3407}
3408
3409#if TG3_VLAN_TAG_USED
3410static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3411{
3412 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3413}
3414#endif
3415
3416/* The RX ring scheme is composed of multiple rings which post fresh
3417 * buffers to the chip, and one special ring the chip uses to report
3418 * status back to the host.
3419 *
3420 * The special ring reports the status of received packets to the
3421 * host. The chip does not write into the original descriptor the
3422 * RX buffer was obtained from. The chip simply takes the original
3423 * descriptor as provided by the host, updates the status and length
3424 * field, then writes this into the next status ring entry.
3425 *
3426 * Each ring the host uses to post buffers to the chip is described
3427 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3428 * it is first placed into the on-chip ram. When the packet's length
3429 * is known, it walks down the TG3_BDINFO entries to select the ring.
3430 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3431 * which is within the range of the new packet's length is chosen.
3432 *
3433 * The "separate ring for rx status" scheme may sound queer, but it makes
3434 * sense from a cache coherency perspective. If only the host writes
3435 * to the buffer post rings, and only the chip writes to the rx status
3436 * rings, then cache lines never move beyond shared-modified state.
3437 * If both the host and chip were to write into the same ring, cache line
3438 * eviction could occur since both entities want it in an exclusive state.
3439 */
3440static int tg3_rx(struct tg3 *tp, int budget)
3441{
Michael Chanf92905d2006-06-29 20:14:29 -07003442 u32 work_mask, rx_std_posted = 0;
Michael Chan483ba502005-04-25 15:14:03 -07003443 u32 sw_idx = tp->rx_rcb_ptr;
3444 u16 hw_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445 int received;
3446
3447 hw_idx = tp->hw_status->idx[0].rx_producer;
3448 /*
3449 * We need to order the read of hw_idx and the read of
3450 * the opaque cookie.
3451 */
3452 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003453 work_mask = 0;
3454 received = 0;
3455 while (sw_idx != hw_idx && budget > 0) {
3456 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3457 unsigned int len;
3458 struct sk_buff *skb;
3459 dma_addr_t dma_addr;
3460 u32 opaque_key, desc_idx, *post_ptr;
3461
3462 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3463 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3464 if (opaque_key == RXD_OPAQUE_RING_STD) {
3465 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3466 mapping);
3467 skb = tp->rx_std_buffers[desc_idx].skb;
3468 post_ptr = &tp->rx_std_ptr;
Michael Chanf92905d2006-06-29 20:14:29 -07003469 rx_std_posted++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3471 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3472 mapping);
3473 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3474 post_ptr = &tp->rx_jumbo_ptr;
3475 }
3476 else {
3477 goto next_pkt_nopost;
3478 }
3479
3480 work_mask |= opaque_key;
3481
3482 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3483 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3484 drop_it:
3485 tg3_recycle_rx(tp, opaque_key,
3486 desc_idx, *post_ptr);
3487 drop_it_no_recycle:
3488 /* Other statistics kept track of by card. */
3489 tp->net_stats.rx_dropped++;
3490 goto next_pkt;
3491 }
3492
3493 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3494
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003495 if (len > RX_COPY_THRESHOLD
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496 && tp->rx_offset == 2
3497 /* rx_offset != 2 iff this is a 5701 card running
3498 * in PCI-X mode [see tg3_get_invariants()] */
3499 ) {
3500 int skb_size;
3501
3502 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3503 desc_idx, *post_ptr);
3504 if (skb_size < 0)
3505 goto drop_it;
3506
3507 pci_unmap_single(tp->pdev, dma_addr,
3508 skb_size - tp->rx_offset,
3509 PCI_DMA_FROMDEVICE);
3510
3511 skb_put(skb, len);
3512 } else {
3513 struct sk_buff *copy_skb;
3514
3515 tg3_recycle_rx(tp, opaque_key,
3516 desc_idx, *post_ptr);
3517
David S. Millera20e9c62006-07-31 22:38:16 -07003518 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519 if (copy_skb == NULL)
3520 goto drop_it_no_recycle;
3521
Linus Torvalds1da177e2005-04-16 15:20:36 -07003522 skb_reserve(copy_skb, 2);
3523 skb_put(copy_skb, len);
3524 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03003525 skb_copy_from_linear_data(skb, copy_skb->data, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3527
3528 /* We'll reuse the original ring buffer. */
3529 skb = copy_skb;
3530 }
3531
3532 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3533 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3534 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3535 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3536 skb->ip_summed = CHECKSUM_UNNECESSARY;
3537 else
3538 skb->ip_summed = CHECKSUM_NONE;
3539
3540 skb->protocol = eth_type_trans(skb, tp->dev);
3541#if TG3_VLAN_TAG_USED
3542 if (tp->vlgrp != NULL &&
3543 desc->type_flags & RXD_FLAG_VLAN) {
3544 tg3_vlan_rx(tp, skb,
3545 desc->err_vlan & RXD_VLAN_MASK);
3546 } else
3547#endif
3548 netif_receive_skb(skb);
3549
3550 tp->dev->last_rx = jiffies;
3551 received++;
3552 budget--;
3553
3554next_pkt:
3555 (*post_ptr)++;
Michael Chanf92905d2006-06-29 20:14:29 -07003556
3557 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3558 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3559
3560 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3561 TG3_64BIT_REG_LOW, idx);
3562 work_mask &= ~RXD_OPAQUE_RING_STD;
3563 rx_std_posted = 0;
3564 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003565next_pkt_nopost:
Michael Chan483ba502005-04-25 15:14:03 -07003566 sw_idx++;
Eric Dumazet6b31a512007-02-06 13:29:21 -08003567 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
Michael Chan52f6d692005-04-25 15:14:32 -07003568
3569 /* Refresh hw_idx to see if there is new work */
3570 if (sw_idx == hw_idx) {
3571 hw_idx = tp->hw_status->idx[0].rx_producer;
3572 rmb();
3573 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003574 }
3575
3576 /* ACK the status ring. */
Michael Chan483ba502005-04-25 15:14:03 -07003577 tp->rx_rcb_ptr = sw_idx;
3578 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003579
3580 /* Refill RX ring(s). */
3581 if (work_mask & RXD_OPAQUE_RING_STD) {
3582 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3583 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3584 sw_idx);
3585 }
3586 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3587 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3588 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3589 sw_idx);
3590 }
3591 mmiowb();
3592
3593 return received;
3594}
3595
David S. Miller6f535762007-10-11 18:08:29 -07003596static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003598 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003599
Linus Torvalds1da177e2005-04-16 15:20:36 -07003600 /* handle link change and other phy events */
3601 if (!(tp->tg3_flags &
3602 (TG3_FLAG_USE_LINKCHG_REG |
3603 TG3_FLAG_POLL_SERDES))) {
3604 if (sblk->status & SD_STATUS_LINK_CHG) {
3605 sblk->status = SD_STATUS_UPDATED |
3606 (sblk->status & ~SD_STATUS_LINK_CHG);
David S. Millerf47c11e2005-06-24 20:18:35 -07003607 spin_lock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608 tg3_setup_phy(tp, 0);
David S. Millerf47c11e2005-06-24 20:18:35 -07003609 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610 }
3611 }
3612
3613 /* run TX completion thread */
3614 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003615 tg3_tx(tp);
David S. Miller6f535762007-10-11 18:08:29 -07003616 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
Michael Chan4fd7ab52007-10-12 01:39:50 -07003617 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003618 }
3619
Linus Torvalds1da177e2005-04-16 15:20:36 -07003620 /* run RX thread, within the bounds set by NAPI.
3621 * All RX "locking" is done by ensuring outside
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003622 * code synchronizes with tg3->napi.poll()
Linus Torvalds1da177e2005-04-16 15:20:36 -07003623 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003624 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
David S. Miller6f535762007-10-11 18:08:29 -07003625 work_done += tg3_rx(tp, budget - work_done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003626
David S. Miller6f535762007-10-11 18:08:29 -07003627 return work_done;
3628}
David S. Millerf7383c22005-05-18 22:50:53 -07003629
David S. Miller6f535762007-10-11 18:08:29 -07003630static int tg3_poll(struct napi_struct *napi, int budget)
3631{
3632 struct tg3 *tp = container_of(napi, struct tg3, napi);
3633 int work_done = 0;
Michael Chan4fd7ab52007-10-12 01:39:50 -07003634 struct tg3_hw_status *sblk = tp->hw_status;
David S. Miller6f535762007-10-11 18:08:29 -07003635
3636 while (1) {
3637 work_done = tg3_poll_work(tp, work_done, budget);
3638
3639 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3640 goto tx_recovery;
3641
3642 if (unlikely(work_done >= budget))
3643 break;
3644
Michael Chan4fd7ab52007-10-12 01:39:50 -07003645 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3646 /* tp->last_tag is used in tg3_restart_ints() below
3647 * to tell the hw how much work has been processed,
3648 * so we must read it before checking for more work.
3649 */
3650 tp->last_tag = sblk->status_tag;
3651 rmb();
3652 } else
3653 sblk->status &= ~SD_STATUS_UPDATED;
3654
David S. Miller6f535762007-10-11 18:08:29 -07003655 if (likely(!tg3_has_work(tp))) {
David S. Miller6f535762007-10-11 18:08:29 -07003656 netif_rx_complete(tp->dev, napi);
3657 tg3_restart_ints(tp);
3658 break;
3659 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003660 }
3661
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003662 return work_done;
David S. Miller6f535762007-10-11 18:08:29 -07003663
3664tx_recovery:
Michael Chan4fd7ab52007-10-12 01:39:50 -07003665 /* work_done is guaranteed to be less than budget. */
David S. Miller6f535762007-10-11 18:08:29 -07003666 netif_rx_complete(tp->dev, napi);
3667 schedule_work(&tp->reset_task);
Michael Chan4fd7ab52007-10-12 01:39:50 -07003668 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003669}
3670
David S. Millerf47c11e2005-06-24 20:18:35 -07003671static void tg3_irq_quiesce(struct tg3 *tp)
3672{
3673 BUG_ON(tp->irq_sync);
3674
3675 tp->irq_sync = 1;
3676 smp_mb();
3677
3678 synchronize_irq(tp->pdev->irq);
3679}
3680
3681static inline int tg3_irq_sync(struct tg3 *tp)
3682{
3683 return tp->irq_sync;
3684}
3685
3686/* Fully shutdown all tg3 driver activity elsewhere in the system.
3687 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3688 * with as well. Most of the time, this is not necessary except when
3689 * shutting down the device.
3690 */
3691static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3692{
Michael Chan46966542007-07-11 19:47:19 -07003693 spin_lock_bh(&tp->lock);
David S. Millerf47c11e2005-06-24 20:18:35 -07003694 if (irq_sync)
3695 tg3_irq_quiesce(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07003696}
3697
3698static inline void tg3_full_unlock(struct tg3 *tp)
3699{
David S. Millerf47c11e2005-06-24 20:18:35 -07003700 spin_unlock_bh(&tp->lock);
3701}
3702
Michael Chanfcfa0a32006-03-20 22:28:41 -08003703/* One-shot MSI handler - Chip automatically disables interrupt
3704 * after sending MSI so driver doesn't have to do it.
3705 */
David Howells7d12e782006-10-05 14:55:46 +01003706static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
Michael Chanfcfa0a32006-03-20 22:28:41 -08003707{
3708 struct net_device *dev = dev_id;
3709 struct tg3 *tp = netdev_priv(dev);
3710
3711 prefetch(tp->hw_status);
3712 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3713
3714 if (likely(!tg3_irq_sync(tp)))
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003715 netif_rx_schedule(dev, &tp->napi);
Michael Chanfcfa0a32006-03-20 22:28:41 -08003716
3717 return IRQ_HANDLED;
3718}
3719
Michael Chan88b06bc22005-04-21 17:13:25 -07003720/* MSI ISR - No need to check for interrupt sharing and no need to
3721 * flush status block and interrupt mailbox. PCI ordering rules
3722 * guarantee that MSI will arrive after the status block.
3723 */
David Howells7d12e782006-10-05 14:55:46 +01003724static irqreturn_t tg3_msi(int irq, void *dev_id)
Michael Chan88b06bc22005-04-21 17:13:25 -07003725{
3726 struct net_device *dev = dev_id;
3727 struct tg3 *tp = netdev_priv(dev);
Michael Chan88b06bc22005-04-21 17:13:25 -07003728
Michael Chan61487482005-09-05 17:53:19 -07003729 prefetch(tp->hw_status);
3730 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Michael Chan88b06bc22005-04-21 17:13:25 -07003731 /*
David S. Millerfac9b832005-05-18 22:46:34 -07003732 * Writing any value to intr-mbox-0 clears PCI INTA# and
Michael Chan88b06bc22005-04-21 17:13:25 -07003733 * chip-internal interrupt pending events.
David S. Millerfac9b832005-05-18 22:46:34 -07003734 * Writing non-zero to intr-mbox-0 additional tells the
Michael Chan88b06bc22005-04-21 17:13:25 -07003735 * NIC to stop sending us irqs, engaging "in-intr-handler"
3736 * event coalescing.
3737 */
3738 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chan61487482005-09-05 17:53:19 -07003739 if (likely(!tg3_irq_sync(tp)))
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003740 netif_rx_schedule(dev, &tp->napi);
Michael Chan61487482005-09-05 17:53:19 -07003741
Michael Chan88b06bc22005-04-21 17:13:25 -07003742 return IRQ_RETVAL(1);
3743}
3744
David Howells7d12e782006-10-05 14:55:46 +01003745static irqreturn_t tg3_interrupt(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003746{
3747 struct net_device *dev = dev_id;
3748 struct tg3 *tp = netdev_priv(dev);
3749 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003750 unsigned int handled = 1;
3751
Linus Torvalds1da177e2005-04-16 15:20:36 -07003752 /* In INTx mode, it is possible for the interrupt to arrive at
3753 * the CPU before the status block posted prior to the interrupt.
3754 * Reading the PCI State register will confirm whether the
3755 * interrupt is ours and will flush the status block.
3756 */
Michael Chand18edcb2007-03-24 20:57:11 -07003757 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3758 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3759 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3760 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07003761 goto out;
David S. Millerfac9b832005-05-18 22:46:34 -07003762 }
Michael Chand18edcb2007-03-24 20:57:11 -07003763 }
3764
3765 /*
3766 * Writing any value to intr-mbox-0 clears PCI INTA# and
3767 * chip-internal interrupt pending events.
3768 * Writing non-zero to intr-mbox-0 additional tells the
3769 * NIC to stop sending us irqs, engaging "in-intr-handler"
3770 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07003771 *
3772 * Flush the mailbox to de-assert the IRQ immediately to prevent
3773 * spurious interrupts. The flush impacts performance but
3774 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07003775 */
Michael Chanc04cb342007-05-07 00:26:15 -07003776 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chand18edcb2007-03-24 20:57:11 -07003777 if (tg3_irq_sync(tp))
3778 goto out;
3779 sblk->status &= ~SD_STATUS_UPDATED;
3780 if (likely(tg3_has_work(tp))) {
3781 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003782 netif_rx_schedule(dev, &tp->napi);
Michael Chand18edcb2007-03-24 20:57:11 -07003783 } else {
3784 /* No work, shared interrupt perhaps? re-enable
3785 * interrupts, and flush that PCI write
3786 */
3787 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3788 0x00000000);
David S. Millerfac9b832005-05-18 22:46:34 -07003789 }
David S. Millerf47c11e2005-06-24 20:18:35 -07003790out:
David S. Millerfac9b832005-05-18 22:46:34 -07003791 return IRQ_RETVAL(handled);
3792}
3793
David Howells7d12e782006-10-05 14:55:46 +01003794static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
David S. Millerfac9b832005-05-18 22:46:34 -07003795{
3796 struct net_device *dev = dev_id;
3797 struct tg3 *tp = netdev_priv(dev);
3798 struct tg3_hw_status *sblk = tp->hw_status;
David S. Millerfac9b832005-05-18 22:46:34 -07003799 unsigned int handled = 1;
3800
David S. Millerfac9b832005-05-18 22:46:34 -07003801 /* In INTx mode, it is possible for the interrupt to arrive at
3802 * the CPU before the status block posted prior to the interrupt.
3803 * Reading the PCI State register will confirm whether the
3804 * interrupt is ours and will flush the status block.
3805 */
Michael Chand18edcb2007-03-24 20:57:11 -07003806 if (unlikely(sblk->status_tag == tp->last_tag)) {
3807 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3808 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3809 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07003810 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003811 }
Michael Chand18edcb2007-03-24 20:57:11 -07003812 }
3813
3814 /*
3815 * writing any value to intr-mbox-0 clears PCI INTA# and
3816 * chip-internal interrupt pending events.
3817 * writing non-zero to intr-mbox-0 additional tells the
3818 * NIC to stop sending us irqs, engaging "in-intr-handler"
3819 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07003820 *
3821 * Flush the mailbox to de-assert the IRQ immediately to prevent
3822 * spurious interrupts. The flush impacts performance but
3823 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07003824 */
Michael Chanc04cb342007-05-07 00:26:15 -07003825 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chand18edcb2007-03-24 20:57:11 -07003826 if (tg3_irq_sync(tp))
3827 goto out;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003828 if (netif_rx_schedule_prep(dev, &tp->napi)) {
Michael Chand18edcb2007-03-24 20:57:11 -07003829 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3830 /* Update last_tag to mark that this status has been
3831 * seen. Because interrupt may be shared, we may be
3832 * racing with tg3_poll(), so only update last_tag
3833 * if tg3_poll() is not scheduled.
3834 */
3835 tp->last_tag = sblk->status_tag;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003836 __netif_rx_schedule(dev, &tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003837 }
David S. Millerf47c11e2005-06-24 20:18:35 -07003838out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003839 return IRQ_RETVAL(handled);
3840}
3841
Michael Chan79381092005-04-21 17:13:59 -07003842/* ISR for interrupt test */
David Howells7d12e782006-10-05 14:55:46 +01003843static irqreturn_t tg3_test_isr(int irq, void *dev_id)
Michael Chan79381092005-04-21 17:13:59 -07003844{
3845 struct net_device *dev = dev_id;
3846 struct tg3 *tp = netdev_priv(dev);
3847 struct tg3_hw_status *sblk = tp->hw_status;
3848
Michael Chanf9804dd2005-09-27 12:13:10 -07003849 if ((sblk->status & SD_STATUS_UPDATED) ||
3850 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
Michael Chanb16250e2006-09-27 16:10:14 -07003851 tg3_disable_ints(tp);
Michael Chan79381092005-04-21 17:13:59 -07003852 return IRQ_RETVAL(1);
3853 }
3854 return IRQ_RETVAL(0);
3855}
3856
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07003857static int tg3_init_hw(struct tg3 *, int);
Michael Chan944d9802005-05-29 14:57:48 -07003858static int tg3_halt(struct tg3 *, int, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003859
Michael Chanb9ec6c12006-07-25 16:37:27 -07003860/* Restart hardware after configuration changes, self-test, etc.
3861 * Invoked with tp->lock held.
3862 */
3863static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3864{
3865 int err;
3866
3867 err = tg3_init_hw(tp, reset_phy);
3868 if (err) {
3869 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3870 "aborting.\n", tp->dev->name);
3871 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3872 tg3_full_unlock(tp);
3873 del_timer_sync(&tp->timer);
3874 tp->irq_sync = 0;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003875 napi_enable(&tp->napi);
Michael Chanb9ec6c12006-07-25 16:37:27 -07003876 dev_close(tp->dev);
3877 tg3_full_lock(tp, 0);
3878 }
3879 return err;
3880}
3881
Linus Torvalds1da177e2005-04-16 15:20:36 -07003882#ifdef CONFIG_NET_POLL_CONTROLLER
3883static void tg3_poll_controller(struct net_device *dev)
3884{
Michael Chan88b06bc22005-04-21 17:13:25 -07003885 struct tg3 *tp = netdev_priv(dev);
3886
David Howells7d12e782006-10-05 14:55:46 +01003887 tg3_interrupt(tp->pdev->irq, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003888}
3889#endif
3890
David Howellsc4028952006-11-22 14:57:56 +00003891static void tg3_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003892{
David Howellsc4028952006-11-22 14:57:56 +00003893 struct tg3 *tp = container_of(work, struct tg3, reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003894 unsigned int restart_timer;
3895
Michael Chan7faa0062006-02-02 17:29:28 -08003896 tg3_full_lock(tp, 0);
Michael Chan7faa0062006-02-02 17:29:28 -08003897
3898 if (!netif_running(tp->dev)) {
Michael Chan7faa0062006-02-02 17:29:28 -08003899 tg3_full_unlock(tp);
3900 return;
3901 }
3902
3903 tg3_full_unlock(tp);
3904
Linus Torvalds1da177e2005-04-16 15:20:36 -07003905 tg3_netif_stop(tp);
3906
David S. Millerf47c11e2005-06-24 20:18:35 -07003907 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003908
3909 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3910 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3911
Michael Chandf3e6542006-05-26 17:48:07 -07003912 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3913 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3914 tp->write32_rx_mbox = tg3_write_flush_reg32;
3915 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3916 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3917 }
3918
Michael Chan944d9802005-05-29 14:57:48 -07003919 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
Michael Chanb9ec6c12006-07-25 16:37:27 -07003920 if (tg3_init_hw(tp, 1))
3921 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003922
3923 tg3_netif_start(tp);
3924
Linus Torvalds1da177e2005-04-16 15:20:36 -07003925 if (restart_timer)
3926 mod_timer(&tp->timer, jiffies + 1);
Michael Chan7faa0062006-02-02 17:29:28 -08003927
Michael Chanb9ec6c12006-07-25 16:37:27 -07003928out:
Michael Chan7faa0062006-02-02 17:29:28 -08003929 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003930}
3931
Michael Chanb0408752007-02-13 12:18:30 -08003932static void tg3_dump_short_state(struct tg3 *tp)
3933{
3934 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3935 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3936 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3937 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3938}
3939
Linus Torvalds1da177e2005-04-16 15:20:36 -07003940static void tg3_tx_timeout(struct net_device *dev)
3941{
3942 struct tg3 *tp = netdev_priv(dev);
3943
Michael Chanb0408752007-02-13 12:18:30 -08003944 if (netif_msg_tx_err(tp)) {
Michael Chan9f88f292006-12-07 00:22:54 -08003945 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3946 dev->name);
Michael Chanb0408752007-02-13 12:18:30 -08003947 tg3_dump_short_state(tp);
3948 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003949
3950 schedule_work(&tp->reset_task);
3951}
3952
Michael Chanc58ec932005-09-17 00:46:27 -07003953/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3954static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3955{
3956 u32 base = (u32) mapping & 0xffffffff;
3957
3958 return ((base > 0xffffdcc0) &&
3959 (base + len + 8 < base));
3960}
3961
Michael Chan72f2afb2006-03-06 19:28:35 -08003962/* Test for DMA addresses > 40-bit */
3963static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3964 int len)
3965{
3966#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
Michael Chan6728a8e2006-03-27 23:16:49 -08003967 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
Michael Chan72f2afb2006-03-06 19:28:35 -08003968 return (((u64) mapping + len) > DMA_40BIT_MASK);
3969 return 0;
3970#else
3971 return 0;
3972#endif
3973}
3974
Linus Torvalds1da177e2005-04-16 15:20:36 -07003975static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3976
Michael Chan72f2afb2006-03-06 19:28:35 -08003977/* Workaround 4GB and 40-bit hardware DMA bugs. */
3978static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
Michael Chanc58ec932005-09-17 00:46:27 -07003979 u32 last_plus_one, u32 *start,
3980 u32 base_flags, u32 mss)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003981{
3982 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
Michael Chanc58ec932005-09-17 00:46:27 -07003983 dma_addr_t new_addr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003984 u32 entry = *start;
Michael Chanc58ec932005-09-17 00:46:27 -07003985 int i, ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003986
3987 if (!new_skb) {
Michael Chanc58ec932005-09-17 00:46:27 -07003988 ret = -1;
3989 } else {
3990 /* New SKB is guaranteed to be linear. */
3991 entry = *start;
3992 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3993 PCI_DMA_TODEVICE);
3994 /* Make sure new skb does not cross any 4G boundaries.
3995 * Drop the packet if it does.
3996 */
3997 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3998 ret = -1;
3999 dev_kfree_skb(new_skb);
4000 new_skb = NULL;
4001 } else {
4002 tg3_set_txd(tp, entry, new_addr, new_skb->len,
4003 base_flags, 1 | (mss << 1));
4004 *start = NEXT_TX(entry);
4005 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004006 }
4007
Linus Torvalds1da177e2005-04-16 15:20:36 -07004008 /* Now clean up the sw ring entries. */
4009 i = 0;
4010 while (entry != last_plus_one) {
4011 int len;
4012
4013 if (i == 0)
4014 len = skb_headlen(skb);
4015 else
4016 len = skb_shinfo(skb)->frags[i-1].size;
4017 pci_unmap_single(tp->pdev,
4018 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4019 len, PCI_DMA_TODEVICE);
4020 if (i == 0) {
4021 tp->tx_buffers[entry].skb = new_skb;
4022 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4023 } else {
4024 tp->tx_buffers[entry].skb = NULL;
4025 }
4026 entry = NEXT_TX(entry);
4027 i++;
4028 }
4029
4030 dev_kfree_skb(skb);
4031
Michael Chanc58ec932005-09-17 00:46:27 -07004032 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004033}
4034
4035static void tg3_set_txd(struct tg3 *tp, int entry,
4036 dma_addr_t mapping, int len, u32 flags,
4037 u32 mss_and_is_end)
4038{
4039 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4040 int is_end = (mss_and_is_end & 0x1);
4041 u32 mss = (mss_and_is_end >> 1);
4042 u32 vlan_tag = 0;
4043
4044 if (is_end)
4045 flags |= TXD_FLAG_END;
4046 if (flags & TXD_FLAG_VLAN) {
4047 vlan_tag = flags >> 16;
4048 flags &= 0xffff;
4049 }
4050 vlan_tag |= (mss << TXD_MSS_SHIFT);
4051
4052 txd->addr_hi = ((u64) mapping >> 32);
4053 txd->addr_lo = ((u64) mapping & 0xffffffff);
4054 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4055 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4056}
4057
Michael Chan5a6f3072006-03-20 22:28:05 -08004058/* hard_start_xmit for devices that don't have any bugs and
4059 * support TG3_FLG2_HW_TSO_2 only.
4060 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004061static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4062{
4063 struct tg3 *tp = netdev_priv(dev);
4064 dma_addr_t mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004065 u32 len, entry, base_flags, mss;
Michael Chan5a6f3072006-03-20 22:28:05 -08004066
4067 len = skb_headlen(skb);
4068
Michael Chan00b70502006-06-17 21:58:45 -07004069 /* We are running in BH disabled context with netif_tx_lock
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004070 * and TX reclaim runs via tp->napi.poll inside of a software
Michael Chan5a6f3072006-03-20 22:28:05 -08004071 * interrupt. Furthermore, IRQ processing runs lockless so we have
4072 * no IRQ context deadlocks to worry about either. Rejoice!
4073 */
Michael Chan1b2a7202006-08-07 21:46:02 -07004074 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004075 if (!netif_queue_stopped(dev)) {
4076 netif_stop_queue(dev);
4077
4078 /* This is a hard error, log it. */
4079 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4080 "queue awake!\n", dev->name);
4081 }
Michael Chan5a6f3072006-03-20 22:28:05 -08004082 return NETDEV_TX_BUSY;
4083 }
4084
4085 entry = tp->tx_prod;
4086 base_flags = 0;
Michael Chan5a6f3072006-03-20 22:28:05 -08004087 mss = 0;
Matt Carlsonc13e3712007-05-05 11:50:04 -07004088 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004089 int tcp_opt_len, ip_tcp_len;
4090
4091 if (skb_header_cloned(skb) &&
4092 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4093 dev_kfree_skb(skb);
4094 goto out_unlock;
4095 }
4096
Michael Chanb0026622006-07-03 19:42:14 -07004097 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4098 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4099 else {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004100 struct iphdr *iph = ip_hdr(skb);
4101
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004102 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -03004103 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
Michael Chanb0026622006-07-03 19:42:14 -07004104
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004105 iph->check = 0;
4106 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
Michael Chanb0026622006-07-03 19:42:14 -07004107 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4108 }
Michael Chan5a6f3072006-03-20 22:28:05 -08004109
4110 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4111 TXD_FLAG_CPU_POST_DMA);
4112
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004113 tcp_hdr(skb)->check = 0;
Michael Chan5a6f3072006-03-20 22:28:05 -08004114
Michael Chan5a6f3072006-03-20 22:28:05 -08004115 }
Patrick McHardy84fa7932006-08-29 16:44:56 -07004116 else if (skb->ip_summed == CHECKSUM_PARTIAL)
Michael Chan5a6f3072006-03-20 22:28:05 -08004117 base_flags |= TXD_FLAG_TCPUDP_CSUM;
Michael Chan5a6f3072006-03-20 22:28:05 -08004118#if TG3_VLAN_TAG_USED
4119 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4120 base_flags |= (TXD_FLAG_VLAN |
4121 (vlan_tx_tag_get(skb) << 16));
4122#endif
4123
4124 /* Queue skb data, a.k.a. the main skb fragment. */
4125 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4126
4127 tp->tx_buffers[entry].skb = skb;
4128 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4129
4130 tg3_set_txd(tp, entry, mapping, len, base_flags,
4131 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4132
4133 entry = NEXT_TX(entry);
4134
4135 /* Now loop through additional data fragments, and queue them. */
4136 if (skb_shinfo(skb)->nr_frags > 0) {
4137 unsigned int i, last;
4138
4139 last = skb_shinfo(skb)->nr_frags - 1;
4140 for (i = 0; i <= last; i++) {
4141 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4142
4143 len = frag->size;
4144 mapping = pci_map_page(tp->pdev,
4145 frag->page,
4146 frag->page_offset,
4147 len, PCI_DMA_TODEVICE);
4148
4149 tp->tx_buffers[entry].skb = NULL;
4150 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4151
4152 tg3_set_txd(tp, entry, mapping, len,
4153 base_flags, (i == last) | (mss << 1));
4154
4155 entry = NEXT_TX(entry);
4156 }
4157 }
4158
4159 /* Packets are ready, update Tx producer idx local and on card. */
4160 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4161
4162 tp->tx_prod = entry;
Michael Chan1b2a7202006-08-07 21:46:02 -07004163 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004164 netif_stop_queue(dev);
Ranjit Manomohan42952232006-10-18 20:54:26 -07004165 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
Michael Chan5a6f3072006-03-20 22:28:05 -08004166 netif_wake_queue(tp->dev);
4167 }
4168
4169out_unlock:
4170 mmiowb();
Michael Chan5a6f3072006-03-20 22:28:05 -08004171
4172 dev->trans_start = jiffies;
4173
4174 return NETDEV_TX_OK;
4175}
4176
Michael Chan52c0fd82006-06-29 20:15:54 -07004177static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4178
4179/* Use GSO to workaround a rare TSO bug that may be triggered when the
4180 * TSO header is greater than 80 bytes.
4181 */
4182static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4183{
4184 struct sk_buff *segs, *nskb;
4185
4186 /* Estimate the number of fragments in the worst case */
Michael Chan1b2a7202006-08-07 21:46:02 -07004187 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
Michael Chan52c0fd82006-06-29 20:15:54 -07004188 netif_stop_queue(tp->dev);
Michael Chan7f62ad52007-02-20 23:25:40 -08004189 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4190 return NETDEV_TX_BUSY;
4191
4192 netif_wake_queue(tp->dev);
Michael Chan52c0fd82006-06-29 20:15:54 -07004193 }
4194
4195 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4196 if (unlikely(IS_ERR(segs)))
4197 goto tg3_tso_bug_end;
4198
4199 do {
4200 nskb = segs;
4201 segs = segs->next;
4202 nskb->next = NULL;
4203 tg3_start_xmit_dma_bug(nskb, tp->dev);
4204 } while (segs);
4205
4206tg3_tso_bug_end:
4207 dev_kfree_skb(skb);
4208
4209 return NETDEV_TX_OK;
4210}
Michael Chan52c0fd82006-06-29 20:15:54 -07004211
Michael Chan5a6f3072006-03-20 22:28:05 -08004212/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4213 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4214 */
4215static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4216{
4217 struct tg3 *tp = netdev_priv(dev);
4218 dma_addr_t mapping;
4219 u32 len, entry, base_flags, mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004220 int would_hit_hwbug;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004221
4222 len = skb_headlen(skb);
4223
Michael Chan00b70502006-06-17 21:58:45 -07004224 /* We are running in BH disabled context with netif_tx_lock
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004225 * and TX reclaim runs via tp->napi.poll inside of a software
David S. Millerf47c11e2005-06-24 20:18:35 -07004226 * interrupt. Furthermore, IRQ processing runs lockless so we have
4227 * no IRQ context deadlocks to worry about either. Rejoice!
Linus Torvalds1da177e2005-04-16 15:20:36 -07004228 */
Michael Chan1b2a7202006-08-07 21:46:02 -07004229 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
Stephen Hemminger1f064a82005-12-06 17:36:44 -08004230 if (!netif_queue_stopped(dev)) {
4231 netif_stop_queue(dev);
4232
4233 /* This is a hard error, log it. */
4234 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4235 "queue awake!\n", dev->name);
4236 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004237 return NETDEV_TX_BUSY;
4238 }
4239
4240 entry = tp->tx_prod;
4241 base_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07004242 if (skb->ip_summed == CHECKSUM_PARTIAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004243 base_flags |= TXD_FLAG_TCPUDP_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004244 mss = 0;
Matt Carlsonc13e3712007-05-05 11:50:04 -07004245 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004246 struct iphdr *iph;
Michael Chan52c0fd82006-06-29 20:15:54 -07004247 int tcp_opt_len, ip_tcp_len, hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004248
4249 if (skb_header_cloned(skb) &&
4250 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4251 dev_kfree_skb(skb);
4252 goto out_unlock;
4253 }
4254
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004255 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -03004256 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004257
Michael Chan52c0fd82006-06-29 20:15:54 -07004258 hdr_len = ip_tcp_len + tcp_opt_len;
4259 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
Michael Chan7f62ad52007-02-20 23:25:40 -08004260 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
Michael Chan52c0fd82006-06-29 20:15:54 -07004261 return (tg3_tso_bug(tp, skb));
4262
Linus Torvalds1da177e2005-04-16 15:20:36 -07004263 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4264 TXD_FLAG_CPU_POST_DMA);
4265
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004266 iph = ip_hdr(skb);
4267 iph->check = 0;
4268 iph->tot_len = htons(mss + hdr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004269 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004270 tcp_hdr(skb)->check = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004271 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004272 } else
4273 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4274 iph->daddr, 0,
4275 IPPROTO_TCP,
4276 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004277
4278 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4279 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004280 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004281 int tsflags;
4282
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004283 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004284 mss |= (tsflags << 11);
4285 }
4286 } else {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004287 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004288 int tsflags;
4289
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004290 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004291 base_flags |= tsflags << 12;
4292 }
4293 }
4294 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004295#if TG3_VLAN_TAG_USED
4296 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4297 base_flags |= (TXD_FLAG_VLAN |
4298 (vlan_tx_tag_get(skb) << 16));
4299#endif
4300
4301 /* Queue skb data, a.k.a. the main skb fragment. */
4302 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4303
4304 tp->tx_buffers[entry].skb = skb;
4305 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4306
4307 would_hit_hwbug = 0;
4308
4309 if (tg3_4g_overflow_test(mapping, len))
Michael Chanc58ec932005-09-17 00:46:27 -07004310 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004311
4312 tg3_set_txd(tp, entry, mapping, len, base_flags,
4313 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4314
4315 entry = NEXT_TX(entry);
4316
4317 /* Now loop through additional data fragments, and queue them. */
4318 if (skb_shinfo(skb)->nr_frags > 0) {
4319 unsigned int i, last;
4320
4321 last = skb_shinfo(skb)->nr_frags - 1;
4322 for (i = 0; i <= last; i++) {
4323 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4324
4325 len = frag->size;
4326 mapping = pci_map_page(tp->pdev,
4327 frag->page,
4328 frag->page_offset,
4329 len, PCI_DMA_TODEVICE);
4330
4331 tp->tx_buffers[entry].skb = NULL;
4332 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4333
Michael Chanc58ec932005-09-17 00:46:27 -07004334 if (tg3_4g_overflow_test(mapping, len))
4335 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004336
Michael Chan72f2afb2006-03-06 19:28:35 -08004337 if (tg3_40bit_overflow_test(tp, mapping, len))
4338 would_hit_hwbug = 1;
4339
Linus Torvalds1da177e2005-04-16 15:20:36 -07004340 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4341 tg3_set_txd(tp, entry, mapping, len,
4342 base_flags, (i == last)|(mss << 1));
4343 else
4344 tg3_set_txd(tp, entry, mapping, len,
4345 base_flags, (i == last));
4346
4347 entry = NEXT_TX(entry);
4348 }
4349 }
4350
4351 if (would_hit_hwbug) {
4352 u32 last_plus_one = entry;
4353 u32 start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004354
Michael Chanc58ec932005-09-17 00:46:27 -07004355 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4356 start &= (TG3_TX_RING_SIZE - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004357
4358 /* If the workaround fails due to memory/mapping
4359 * failure, silently drop this packet.
4360 */
Michael Chan72f2afb2006-03-06 19:28:35 -08004361 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
Michael Chanc58ec932005-09-17 00:46:27 -07004362 &start, base_flags, mss))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004363 goto out_unlock;
4364
4365 entry = start;
4366 }
4367
4368 /* Packets are ready, update Tx producer idx local and on card. */
4369 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4370
4371 tp->tx_prod = entry;
Michael Chan1b2a7202006-08-07 21:46:02 -07004372 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004373 netif_stop_queue(dev);
Ranjit Manomohan42952232006-10-18 20:54:26 -07004374 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
Michael Chan51b91462005-09-01 17:41:28 -07004375 netif_wake_queue(tp->dev);
4376 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004377
4378out_unlock:
4379 mmiowb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004380
4381 dev->trans_start = jiffies;
4382
4383 return NETDEV_TX_OK;
4384}
4385
4386static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4387 int new_mtu)
4388{
4389 dev->mtu = new_mtu;
4390
Michael Chanef7f5ec2005-07-25 12:32:25 -07004391 if (new_mtu > ETH_DATA_LEN) {
Michael Chana4e2b342005-10-26 15:46:52 -07004392 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chanef7f5ec2005-07-25 12:32:25 -07004393 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4394 ethtool_op_set_tso(dev, 0);
4395 }
4396 else
4397 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4398 } else {
Michael Chana4e2b342005-10-26 15:46:52 -07004399 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chanef7f5ec2005-07-25 12:32:25 -07004400 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
Michael Chan0f893dc2005-07-25 12:30:38 -07004401 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
Michael Chanef7f5ec2005-07-25 12:32:25 -07004402 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004403}
4404
4405static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4406{
4407 struct tg3 *tp = netdev_priv(dev);
Michael Chanb9ec6c12006-07-25 16:37:27 -07004408 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004409
4410 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4411 return -EINVAL;
4412
4413 if (!netif_running(dev)) {
4414 /* We'll just catch it later when the
4415 * device is up'd.
4416 */
4417 tg3_set_mtu(dev, tp, new_mtu);
4418 return 0;
4419 }
4420
4421 tg3_netif_stop(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07004422
4423 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004424
Michael Chan944d9802005-05-29 14:57:48 -07004425 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004426
4427 tg3_set_mtu(dev, tp, new_mtu);
4428
Michael Chanb9ec6c12006-07-25 16:37:27 -07004429 err = tg3_restart_hw(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004430
Michael Chanb9ec6c12006-07-25 16:37:27 -07004431 if (!err)
4432 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004433
David S. Millerf47c11e2005-06-24 20:18:35 -07004434 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004435
Michael Chanb9ec6c12006-07-25 16:37:27 -07004436 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004437}
4438
4439/* Free up pending packets in all rx/tx rings.
4440 *
4441 * The chip has been shut down and the driver detached from
4442 * the networking, so no interrupts or new tx packets will
4443 * end up in the driver. tp->{tx,}lock is not held and we are not
4444 * in an interrupt context and thus may sleep.
4445 */
4446static void tg3_free_rings(struct tg3 *tp)
4447{
4448 struct ring_info *rxp;
4449 int i;
4450
4451 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4452 rxp = &tp->rx_std_buffers[i];
4453
4454 if (rxp->skb == NULL)
4455 continue;
4456 pci_unmap_single(tp->pdev,
4457 pci_unmap_addr(rxp, mapping),
Michael Chan7e72aad2005-07-25 12:31:17 -07004458 tp->rx_pkt_buf_sz - tp->rx_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004459 PCI_DMA_FROMDEVICE);
4460 dev_kfree_skb_any(rxp->skb);
4461 rxp->skb = NULL;
4462 }
4463
4464 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4465 rxp = &tp->rx_jumbo_buffers[i];
4466
4467 if (rxp->skb == NULL)
4468 continue;
4469 pci_unmap_single(tp->pdev,
4470 pci_unmap_addr(rxp, mapping),
4471 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4472 PCI_DMA_FROMDEVICE);
4473 dev_kfree_skb_any(rxp->skb);
4474 rxp->skb = NULL;
4475 }
4476
4477 for (i = 0; i < TG3_TX_RING_SIZE; ) {
4478 struct tx_ring_info *txp;
4479 struct sk_buff *skb;
4480 int j;
4481
4482 txp = &tp->tx_buffers[i];
4483 skb = txp->skb;
4484
4485 if (skb == NULL) {
4486 i++;
4487 continue;
4488 }
4489
4490 pci_unmap_single(tp->pdev,
4491 pci_unmap_addr(txp, mapping),
4492 skb_headlen(skb),
4493 PCI_DMA_TODEVICE);
4494 txp->skb = NULL;
4495
4496 i++;
4497
4498 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4499 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4500 pci_unmap_page(tp->pdev,
4501 pci_unmap_addr(txp, mapping),
4502 skb_shinfo(skb)->frags[j].size,
4503 PCI_DMA_TODEVICE);
4504 i++;
4505 }
4506
4507 dev_kfree_skb_any(skb);
4508 }
4509}
4510
4511/* Initialize tx/rx rings for packet processing.
4512 *
4513 * The chip has been shut down and the driver detached from
4514 * the networking, so no interrupts or new tx packets will
4515 * end up in the driver. tp->{tx,}lock are held and thus
4516 * we may not sleep.
4517 */
Michael Chan32d8c572006-07-25 16:38:29 -07004518static int tg3_init_rings(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004519{
4520 u32 i;
4521
4522 /* Free up all the SKBs. */
4523 tg3_free_rings(tp);
4524
4525 /* Zero out all descriptors. */
4526 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4527 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4528 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4529 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4530
Michael Chan7e72aad2005-07-25 12:31:17 -07004531 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
Michael Chana4e2b342005-10-26 15:46:52 -07004532 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
Michael Chan7e72aad2005-07-25 12:31:17 -07004533 (tp->dev->mtu > ETH_DATA_LEN))
4534 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4535
Linus Torvalds1da177e2005-04-16 15:20:36 -07004536 /* Initialize invariants of the rings, we only set this
4537 * stuff once. This works because the card does not
4538 * write into the rx buffer posting rings.
4539 */
4540 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4541 struct tg3_rx_buffer_desc *rxd;
4542
4543 rxd = &tp->rx_std[i];
Michael Chan7e72aad2005-07-25 12:31:17 -07004544 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004545 << RXD_LEN_SHIFT;
4546 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4547 rxd->opaque = (RXD_OPAQUE_RING_STD |
4548 (i << RXD_OPAQUE_INDEX_SHIFT));
4549 }
4550
Michael Chan0f893dc2005-07-25 12:30:38 -07004551 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004552 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4553 struct tg3_rx_buffer_desc *rxd;
4554
4555 rxd = &tp->rx_jumbo[i];
4556 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4557 << RXD_LEN_SHIFT;
4558 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4559 RXD_FLAG_JUMBO;
4560 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4561 (i << RXD_OPAQUE_INDEX_SHIFT));
4562 }
4563 }
4564
4565 /* Now allocate fresh SKBs for each rx ring. */
4566 for (i = 0; i < tp->rx_pending; i++) {
Michael Chan32d8c572006-07-25 16:38:29 -07004567 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4568 printk(KERN_WARNING PFX
4569 "%s: Using a smaller RX standard ring, "
4570 "only %d out of %d buffers were allocated "
4571 "successfully.\n",
4572 tp->dev->name, i, tp->rx_pending);
4573 if (i == 0)
4574 return -ENOMEM;
4575 tp->rx_pending = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004576 break;
Michael Chan32d8c572006-07-25 16:38:29 -07004577 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004578 }
4579
Michael Chan0f893dc2005-07-25 12:30:38 -07004580 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004581 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4582 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
Michael Chan32d8c572006-07-25 16:38:29 -07004583 -1, i) < 0) {
4584 printk(KERN_WARNING PFX
4585 "%s: Using a smaller RX jumbo ring, "
4586 "only %d out of %d buffers were "
4587 "allocated successfully.\n",
4588 tp->dev->name, i, tp->rx_jumbo_pending);
4589 if (i == 0) {
4590 tg3_free_rings(tp);
4591 return -ENOMEM;
4592 }
4593 tp->rx_jumbo_pending = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004594 break;
Michael Chan32d8c572006-07-25 16:38:29 -07004595 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004596 }
4597 }
Michael Chan32d8c572006-07-25 16:38:29 -07004598 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004599}
4600
4601/*
4602 * Must not be invoked with interrupt sources disabled and
4603 * the hardware shutdown down.
4604 */
4605static void tg3_free_consistent(struct tg3 *tp)
4606{
Jesper Juhlb4558ea2005-10-28 16:53:13 -04004607 kfree(tp->rx_std_buffers);
4608 tp->rx_std_buffers = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004609 if (tp->rx_std) {
4610 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4611 tp->rx_std, tp->rx_std_mapping);
4612 tp->rx_std = NULL;
4613 }
4614 if (tp->rx_jumbo) {
4615 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4616 tp->rx_jumbo, tp->rx_jumbo_mapping);
4617 tp->rx_jumbo = NULL;
4618 }
4619 if (tp->rx_rcb) {
4620 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4621 tp->rx_rcb, tp->rx_rcb_mapping);
4622 tp->rx_rcb = NULL;
4623 }
4624 if (tp->tx_ring) {
4625 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4626 tp->tx_ring, tp->tx_desc_mapping);
4627 tp->tx_ring = NULL;
4628 }
4629 if (tp->hw_status) {
4630 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4631 tp->hw_status, tp->status_mapping);
4632 tp->hw_status = NULL;
4633 }
4634 if (tp->hw_stats) {
4635 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4636 tp->hw_stats, tp->stats_mapping);
4637 tp->hw_stats = NULL;
4638 }
4639}
4640
4641/*
4642 * Must not be invoked with interrupt sources disabled and
4643 * the hardware shutdown down. Can sleep.
4644 */
4645static int tg3_alloc_consistent(struct tg3 *tp)
4646{
Yan Burmanbd2b3342006-12-14 15:25:00 -08004647 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07004648 (TG3_RX_RING_SIZE +
4649 TG3_RX_JUMBO_RING_SIZE)) +
4650 (sizeof(struct tx_ring_info) *
4651 TG3_TX_RING_SIZE),
4652 GFP_KERNEL);
4653 if (!tp->rx_std_buffers)
4654 return -ENOMEM;
4655
Linus Torvalds1da177e2005-04-16 15:20:36 -07004656 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4657 tp->tx_buffers = (struct tx_ring_info *)
4658 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4659
4660 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4661 &tp->rx_std_mapping);
4662 if (!tp->rx_std)
4663 goto err_out;
4664
4665 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4666 &tp->rx_jumbo_mapping);
4667
4668 if (!tp->rx_jumbo)
4669 goto err_out;
4670
4671 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4672 &tp->rx_rcb_mapping);
4673 if (!tp->rx_rcb)
4674 goto err_out;
4675
4676 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4677 &tp->tx_desc_mapping);
4678 if (!tp->tx_ring)
4679 goto err_out;
4680
4681 tp->hw_status = pci_alloc_consistent(tp->pdev,
4682 TG3_HW_STATUS_SIZE,
4683 &tp->status_mapping);
4684 if (!tp->hw_status)
4685 goto err_out;
4686
4687 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4688 sizeof(struct tg3_hw_stats),
4689 &tp->stats_mapping);
4690 if (!tp->hw_stats)
4691 goto err_out;
4692
4693 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4694 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4695
4696 return 0;
4697
4698err_out:
4699 tg3_free_consistent(tp);
4700 return -ENOMEM;
4701}
4702
4703#define MAX_WAIT_CNT 1000
4704
4705/* To stop a block, clear the enable bit and poll till it
4706 * clears. tp->lock is held.
4707 */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004708static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004709{
4710 unsigned int i;
4711 u32 val;
4712
4713 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4714 switch (ofs) {
4715 case RCVLSC_MODE:
4716 case DMAC_MODE:
4717 case MBFREE_MODE:
4718 case BUFMGR_MODE:
4719 case MEMARB_MODE:
4720 /* We can't enable/disable these bits of the
4721 * 5705/5750, just say success.
4722 */
4723 return 0;
4724
4725 default:
4726 break;
4727 };
4728 }
4729
4730 val = tr32(ofs);
4731 val &= ~enable_bit;
4732 tw32_f(ofs, val);
4733
4734 for (i = 0; i < MAX_WAIT_CNT; i++) {
4735 udelay(100);
4736 val = tr32(ofs);
4737 if ((val & enable_bit) == 0)
4738 break;
4739 }
4740
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004741 if (i == MAX_WAIT_CNT && !silent) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004742 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4743 "ofs=%lx enable_bit=%x\n",
4744 ofs, enable_bit);
4745 return -ENODEV;
4746 }
4747
4748 return 0;
4749}
4750
4751/* tp->lock is held. */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004752static int tg3_abort_hw(struct tg3 *tp, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004753{
4754 int i, err;
4755
4756 tg3_disable_ints(tp);
4757
4758 tp->rx_mode &= ~RX_MODE_ENABLE;
4759 tw32_f(MAC_RX_MODE, tp->rx_mode);
4760 udelay(10);
4761
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004762 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4763 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4764 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4765 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4766 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4767 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004768
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004769 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4770 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4771 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4772 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4773 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4774 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4775 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004776
4777 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4778 tw32_f(MAC_MODE, tp->mac_mode);
4779 udelay(40);
4780
4781 tp->tx_mode &= ~TX_MODE_ENABLE;
4782 tw32_f(MAC_TX_MODE, tp->tx_mode);
4783
4784 for (i = 0; i < MAX_WAIT_CNT; i++) {
4785 udelay(100);
4786 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4787 break;
4788 }
4789 if (i >= MAX_WAIT_CNT) {
4790 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4791 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4792 tp->dev->name, tr32(MAC_TX_MODE));
Michael Chane6de8ad2005-05-05 14:42:41 -07004793 err |= -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004794 }
4795
Michael Chane6de8ad2005-05-05 14:42:41 -07004796 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004797 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4798 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004799
4800 tw32(FTQ_RESET, 0xffffffff);
4801 tw32(FTQ_RESET, 0x00000000);
4802
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004803 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4804 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004805
4806 if (tp->hw_status)
4807 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4808 if (tp->hw_stats)
4809 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4810
Linus Torvalds1da177e2005-04-16 15:20:36 -07004811 return err;
4812}
4813
4814/* tp->lock is held. */
4815static int tg3_nvram_lock(struct tg3 *tp)
4816{
4817 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4818 int i;
4819
Michael Chanec41c7d2006-01-17 02:40:55 -08004820 if (tp->nvram_lock_cnt == 0) {
4821 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4822 for (i = 0; i < 8000; i++) {
4823 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4824 break;
4825 udelay(20);
4826 }
4827 if (i == 8000) {
4828 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4829 return -ENODEV;
4830 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004831 }
Michael Chanec41c7d2006-01-17 02:40:55 -08004832 tp->nvram_lock_cnt++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004833 }
4834 return 0;
4835}
4836
4837/* tp->lock is held. */
4838static void tg3_nvram_unlock(struct tg3 *tp)
4839{
Michael Chanec41c7d2006-01-17 02:40:55 -08004840 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4841 if (tp->nvram_lock_cnt > 0)
4842 tp->nvram_lock_cnt--;
4843 if (tp->nvram_lock_cnt == 0)
4844 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4845 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004846}
4847
4848/* tp->lock is held. */
Michael Chane6af3012005-04-21 17:12:05 -07004849static void tg3_enable_nvram_access(struct tg3 *tp)
4850{
4851 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4852 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4853 u32 nvaccess = tr32(NVRAM_ACCESS);
4854
4855 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4856 }
4857}
4858
4859/* tp->lock is held. */
4860static void tg3_disable_nvram_access(struct tg3 *tp)
4861{
4862 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4863 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4864 u32 nvaccess = tr32(NVRAM_ACCESS);
4865
4866 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4867 }
4868}
4869
Matt Carlson0d3031d2007-10-10 18:02:43 -07004870static void tg3_ape_send_event(struct tg3 *tp, u32 event)
4871{
4872 int i;
4873 u32 apedata;
4874
4875 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
4876 if (apedata != APE_SEG_SIG_MAGIC)
4877 return;
4878
4879 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
4880 if (apedata != APE_FW_STATUS_READY)
4881 return;
4882
4883 /* Wait for up to 1 millisecond for APE to service previous event. */
4884 for (i = 0; i < 10; i++) {
4885 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
4886 return;
4887
4888 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
4889
4890 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4891 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
4892 event | APE_EVENT_STATUS_EVENT_PENDING);
4893
4894 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
4895
4896 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4897 break;
4898
4899 udelay(100);
4900 }
4901
4902 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4903 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
4904}
4905
4906static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
4907{
4908 u32 event;
4909 u32 apedata;
4910
4911 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
4912 return;
4913
4914 switch (kind) {
4915 case RESET_KIND_INIT:
4916 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
4917 APE_HOST_SEG_SIG_MAGIC);
4918 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
4919 APE_HOST_SEG_LEN_MAGIC);
4920 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
4921 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
4922 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
4923 APE_HOST_DRIVER_ID_MAGIC);
4924 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
4925 APE_HOST_BEHAV_NO_PHYLOCK);
4926
4927 event = APE_EVENT_STATUS_STATE_START;
4928 break;
4929 case RESET_KIND_SHUTDOWN:
4930 event = APE_EVENT_STATUS_STATE_UNLOAD;
4931 break;
4932 case RESET_KIND_SUSPEND:
4933 event = APE_EVENT_STATUS_STATE_SUSPEND;
4934 break;
4935 default:
4936 return;
4937 }
4938
4939 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
4940
4941 tg3_ape_send_event(tp, event);
4942}
4943
Michael Chane6af3012005-04-21 17:12:05 -07004944/* tp->lock is held. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004945static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4946{
David S. Millerf49639e2006-06-09 11:58:36 -07004947 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4948 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004949
4950 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4951 switch (kind) {
4952 case RESET_KIND_INIT:
4953 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4954 DRV_STATE_START);
4955 break;
4956
4957 case RESET_KIND_SHUTDOWN:
4958 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4959 DRV_STATE_UNLOAD);
4960 break;
4961
4962 case RESET_KIND_SUSPEND:
4963 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4964 DRV_STATE_SUSPEND);
4965 break;
4966
4967 default:
4968 break;
4969 };
4970 }
Matt Carlson0d3031d2007-10-10 18:02:43 -07004971
4972 if (kind == RESET_KIND_INIT ||
4973 kind == RESET_KIND_SUSPEND)
4974 tg3_ape_driver_state_change(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004975}
4976
4977/* tp->lock is held. */
4978static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4979{
4980 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4981 switch (kind) {
4982 case RESET_KIND_INIT:
4983 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4984 DRV_STATE_START_DONE);
4985 break;
4986
4987 case RESET_KIND_SHUTDOWN:
4988 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4989 DRV_STATE_UNLOAD_DONE);
4990 break;
4991
4992 default:
4993 break;
4994 };
4995 }
Matt Carlson0d3031d2007-10-10 18:02:43 -07004996
4997 if (kind == RESET_KIND_SHUTDOWN)
4998 tg3_ape_driver_state_change(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004999}
5000
5001/* tp->lock is held. */
5002static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5003{
5004 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5005 switch (kind) {
5006 case RESET_KIND_INIT:
5007 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5008 DRV_STATE_START);
5009 break;
5010
5011 case RESET_KIND_SHUTDOWN:
5012 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5013 DRV_STATE_UNLOAD);
5014 break;
5015
5016 case RESET_KIND_SUSPEND:
5017 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5018 DRV_STATE_SUSPEND);
5019 break;
5020
5021 default:
5022 break;
5023 };
5024 }
5025}
5026
Michael Chan7a6f4362006-09-27 16:03:31 -07005027static int tg3_poll_fw(struct tg3 *tp)
5028{
5029 int i;
5030 u32 val;
5031
Michael Chanb5d37722006-09-27 16:06:21 -07005032 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Gary Zambrano0ccead12006-11-14 16:34:00 -08005033 /* Wait up to 20ms for init done. */
5034 for (i = 0; i < 200; i++) {
Michael Chanb5d37722006-09-27 16:06:21 -07005035 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5036 return 0;
Gary Zambrano0ccead12006-11-14 16:34:00 -08005037 udelay(100);
Michael Chanb5d37722006-09-27 16:06:21 -07005038 }
5039 return -ENODEV;
5040 }
5041
Michael Chan7a6f4362006-09-27 16:03:31 -07005042 /* Wait for firmware initialization to complete. */
5043 for (i = 0; i < 100000; i++) {
5044 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5045 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5046 break;
5047 udelay(10);
5048 }
5049
5050 /* Chip might not be fitted with firmware. Some Sun onboard
5051 * parts are configured like that. So don't signal the timeout
5052 * of the above loop as an error, but do report the lack of
5053 * running firmware once.
5054 */
5055 if (i >= 100000 &&
5056 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5057 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5058
5059 printk(KERN_INFO PFX "%s: No firmware running.\n",
5060 tp->dev->name);
5061 }
5062
5063 return 0;
5064}
5065
Michael Chanee6a99b2007-07-18 21:49:10 -07005066/* Save PCI command register before chip reset */
5067static void tg3_save_pci_state(struct tg3 *tp)
5068{
Matt Carlson8a6eac92007-10-21 16:17:55 -07005069 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07005070}
5071
5072/* Restore PCI state after chip reset */
5073static void tg3_restore_pci_state(struct tg3 *tp)
5074{
5075 u32 val;
5076
5077 /* Re-enable indirect register accesses. */
5078 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5079 tp->misc_host_ctrl);
5080
5081 /* Set MAX PCI retry to zero. */
5082 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5083 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5084 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5085 val |= PCISTATE_RETRY_SAME_DMA;
Matt Carlson0d3031d2007-10-10 18:02:43 -07005086 /* Allow reads and writes to the APE register and memory space. */
5087 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5088 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5089 PCISTATE_ALLOW_APE_SHMEM_WR;
Michael Chanee6a99b2007-07-18 21:49:10 -07005090 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5091
Matt Carlson8a6eac92007-10-21 16:17:55 -07005092 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07005093
Matt Carlson5f5c51e2007-11-12 21:19:37 -08005094 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5095 pcie_set_readrq(tp->pdev, 4096);
5096 else {
Michael Chan114342f2007-10-15 02:12:26 -07005097 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5098 tp->pci_cacheline_sz);
5099 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5100 tp->pci_lat_timer);
5101 }
Matt Carlson5f5c51e2007-11-12 21:19:37 -08005102
Michael Chanee6a99b2007-07-18 21:49:10 -07005103 /* Make sure PCI-X relaxed ordering bit is clear. */
Matt Carlson9974a352007-10-07 23:27:28 -07005104 if (tp->pcix_cap) {
5105 u16 pcix_cmd;
5106
5107 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5108 &pcix_cmd);
5109 pcix_cmd &= ~PCI_X_CMD_ERO;
5110 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5111 pcix_cmd);
5112 }
Michael Chanee6a99b2007-07-18 21:49:10 -07005113
5114 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chanee6a99b2007-07-18 21:49:10 -07005115
5116 /* Chip reset on 5780 will reset MSI enable bit,
5117 * so need to restore it.
5118 */
5119 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5120 u16 ctrl;
5121
5122 pci_read_config_word(tp->pdev,
5123 tp->msi_cap + PCI_MSI_FLAGS,
5124 &ctrl);
5125 pci_write_config_word(tp->pdev,
5126 tp->msi_cap + PCI_MSI_FLAGS,
5127 ctrl | PCI_MSI_FLAGS_ENABLE);
5128 val = tr32(MSGINT_MODE);
5129 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5130 }
5131 }
5132}
5133
Linus Torvalds1da177e2005-04-16 15:20:36 -07005134static void tg3_stop_fw(struct tg3 *);
5135
5136/* tp->lock is held. */
5137static int tg3_chip_reset(struct tg3 *tp)
5138{
5139 u32 val;
Michael Chan1ee582d2005-08-09 20:16:46 -07005140 void (*write_op)(struct tg3 *, u32, u32);
Michael Chan7a6f4362006-09-27 16:03:31 -07005141 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005142
David S. Millerf49639e2006-06-09 11:58:36 -07005143 tg3_nvram_lock(tp);
5144
5145 /* No matching tg3_nvram_unlock() after this because
5146 * chip reset below will undo the nvram lock.
5147 */
5148 tp->nvram_lock_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005149
Michael Chanee6a99b2007-07-18 21:49:10 -07005150 /* GRC_MISC_CFG core clock reset will clear the memory
5151 * enable bit in PCI register 4 and the MSI enable bit
5152 * on some chips, so we save relevant registers here.
5153 */
5154 tg3_save_pci_state(tp);
5155
Michael Chand9ab5ad12006-03-20 22:27:35 -08005156 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -08005157 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07005158 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07005159 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5160 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
Michael Chand9ab5ad12006-03-20 22:27:35 -08005161 tw32(GRC_FASTBOOT_PC, 0);
5162
Linus Torvalds1da177e2005-04-16 15:20:36 -07005163 /*
5164 * We must avoid the readl() that normally takes place.
5165 * It locks machines, causes machine checks, and other
5166 * fun things. So, temporarily disable the 5701
5167 * hardware workaround, while we do the reset.
5168 */
Michael Chan1ee582d2005-08-09 20:16:46 -07005169 write_op = tp->write32;
5170 if (write_op == tg3_write_flush_reg32)
5171 tp->write32 = tg3_write32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005172
Michael Chand18edcb2007-03-24 20:57:11 -07005173 /* Prevent the irq handler from reading or writing PCI registers
5174 * during chip reset when the memory enable bit in the PCI command
5175 * register may be cleared. The chip does not generate interrupt
5176 * at this time, but the irq handler may still be called due to irq
5177 * sharing or irqpoll.
5178 */
5179 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
Michael Chanb8fa2f32007-04-06 17:35:37 -07005180 if (tp->hw_status) {
5181 tp->hw_status->status = 0;
5182 tp->hw_status->status_tag = 0;
5183 }
Michael Chand18edcb2007-03-24 20:57:11 -07005184 tp->last_tag = 0;
5185 smp_mb();
5186 synchronize_irq(tp->pdev->irq);
5187
Linus Torvalds1da177e2005-04-16 15:20:36 -07005188 /* do the reset */
5189 val = GRC_MISC_CFG_CORECLK_RESET;
5190
5191 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5192 if (tr32(0x7e2c) == 0x60) {
5193 tw32(0x7e2c, 0x20);
5194 }
5195 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5196 tw32(GRC_MISC_CFG, (1 << 29));
5197 val |= (1 << 29);
5198 }
5199 }
5200
Michael Chanb5d37722006-09-27 16:06:21 -07005201 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5202 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5203 tw32(GRC_VCPU_EXT_CTRL,
5204 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5205 }
5206
Linus Torvalds1da177e2005-04-16 15:20:36 -07005207 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5208 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5209 tw32(GRC_MISC_CFG, val);
5210
Michael Chan1ee582d2005-08-09 20:16:46 -07005211 /* restore 5701 hardware bug workaround write method */
5212 tp->write32 = write_op;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005213
5214 /* Unfortunately, we have to delay before the PCI read back.
5215 * Some 575X chips even will not respond to a PCI cfg access
5216 * when the reset command is given to the chip.
5217 *
5218 * How do these hardware designers expect things to work
5219 * properly if the PCI write is posted for a long period
5220 * of time? It is always necessary to have some method by
5221 * which a register read back can occur to push the write
5222 * out which does the reset.
5223 *
5224 * For most tg3 variants the trick below was working.
5225 * Ho hum...
5226 */
5227 udelay(120);
5228
5229 /* Flush PCI posted writes. The normal MMIO registers
5230 * are inaccessible at this time so this is the only
5231 * way to make this reliably (actually, this is no longer
5232 * the case, see above). I tried to use indirect
5233 * register read/write but this upset some 5701 variants.
5234 */
5235 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5236
5237 udelay(120);
5238
5239 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5240 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5241 int i;
5242 u32 cfg_val;
5243
5244 /* Wait for link training to complete. */
5245 for (i = 0; i < 5000; i++)
5246 udelay(100);
5247
5248 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5249 pci_write_config_dword(tp->pdev, 0xc4,
5250 cfg_val | (1 << 15));
5251 }
5252 /* Set PCIE max payload size and clear error status. */
5253 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5254 }
5255
Michael Chanee6a99b2007-07-18 21:49:10 -07005256 tg3_restore_pci_state(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005257
Michael Chand18edcb2007-03-24 20:57:11 -07005258 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5259
Michael Chanee6a99b2007-07-18 21:49:10 -07005260 val = 0;
5261 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan4cf78e42005-07-25 12:29:19 -07005262 val = tr32(MEMARB_MODE);
Michael Chanee6a99b2007-07-18 21:49:10 -07005263 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005264
5265 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5266 tg3_stop_fw(tp);
5267 tw32(0x5000, 0x400);
5268 }
5269
5270 tw32(GRC_MODE, tp->grc_mode);
5271
5272 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01005273 val = tr32(0xc4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005274
5275 tw32(0xc4, val | (1 << 15));
5276 }
5277
5278 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5279 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5280 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5281 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5282 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5283 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5284 }
5285
5286 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5287 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5288 tw32_f(MAC_MODE, tp->mac_mode);
Michael Chan747e8f82005-07-25 12:33:22 -07005289 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5290 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5291 tw32_f(MAC_MODE, tp->mac_mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005292 } else
5293 tw32_f(MAC_MODE, 0);
5294 udelay(40);
5295
Michael Chan7a6f4362006-09-27 16:03:31 -07005296 err = tg3_poll_fw(tp);
5297 if (err)
5298 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005299
5300 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5301 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01005302 val = tr32(0x7c00);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005303
5304 tw32(0x7c00, val | (1 << 25));
5305 }
5306
5307 /* Reprobe ASF enable state. */
5308 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5309 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5310 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5311 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5312 u32 nic_cfg;
5313
5314 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5315 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5316 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
John W. Linvillecbf46852005-04-21 17:01:29 -07005317 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005318 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5319 }
5320 }
5321
5322 return 0;
5323}
5324
5325/* tp->lock is held. */
5326static void tg3_stop_fw(struct tg3 *tp)
5327{
Matt Carlson0d3031d2007-10-10 18:02:43 -07005328 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5329 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005330 u32 val;
5331 int i;
5332
5333 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5334 val = tr32(GRC_RX_CPU_EVENT);
5335 val |= (1 << 14);
5336 tw32(GRC_RX_CPU_EVENT, val);
5337
5338 /* Wait for RX cpu to ACK the event. */
5339 for (i = 0; i < 100; i++) {
5340 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5341 break;
5342 udelay(1);
5343 }
5344 }
5345}
5346
5347/* tp->lock is held. */
Michael Chan944d9802005-05-29 14:57:48 -07005348static int tg3_halt(struct tg3 *tp, int kind, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005349{
5350 int err;
5351
5352 tg3_stop_fw(tp);
5353
Michael Chan944d9802005-05-29 14:57:48 -07005354 tg3_write_sig_pre_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005355
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005356 tg3_abort_hw(tp, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005357 err = tg3_chip_reset(tp);
5358
Michael Chan944d9802005-05-29 14:57:48 -07005359 tg3_write_sig_legacy(tp, kind);
5360 tg3_write_sig_post_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005361
5362 if (err)
5363 return err;
5364
5365 return 0;
5366}
5367
5368#define TG3_FW_RELEASE_MAJOR 0x0
5369#define TG3_FW_RELASE_MINOR 0x0
5370#define TG3_FW_RELEASE_FIX 0x0
5371#define TG3_FW_START_ADDR 0x08000000
5372#define TG3_FW_TEXT_ADDR 0x08000000
5373#define TG3_FW_TEXT_LEN 0x9c0
5374#define TG3_FW_RODATA_ADDR 0x080009c0
5375#define TG3_FW_RODATA_LEN 0x60
5376#define TG3_FW_DATA_ADDR 0x08000a40
5377#define TG3_FW_DATA_LEN 0x20
5378#define TG3_FW_SBSS_ADDR 0x08000a60
5379#define TG3_FW_SBSS_LEN 0xc
5380#define TG3_FW_BSS_ADDR 0x08000a70
5381#define TG3_FW_BSS_LEN 0x10
5382
Andreas Mohr50da8592006-08-14 23:54:30 -07005383static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005384 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5385 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5386 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5387 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5388 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5389 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5390 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5391 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5392 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5393 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5394 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5395 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5396 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5397 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5398 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5399 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5400 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5401 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5402 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5403 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5404 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5405 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5406 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5407 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5408 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5409 0, 0, 0, 0, 0, 0,
5410 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5411 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5412 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5413 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5414 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5415 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5416 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5417 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5418 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5419 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5420 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5421 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5422 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5423 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5424 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5425 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5426 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5427 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5428 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5429 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5430 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5431 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5432 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5433 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5434 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5435 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5436 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5437 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5438 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5439 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5440 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5441 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5442 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5443 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5444 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5445 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5446 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5447 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5448 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5449 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5450 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5451 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5452 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5453 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5454 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5455 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5456 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5457 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5458 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5459 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5460 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5461 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5462 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5463 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5464 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5465 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5466 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5467 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5468 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5469 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5470 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5471 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5472 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5473 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5474 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5475};
5476
Andreas Mohr50da8592006-08-14 23:54:30 -07005477static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005478 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5479 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5480 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5481 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5482 0x00000000
5483};
5484
5485#if 0 /* All zeros, don't eat up space with it. */
5486u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5487 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5488 0x00000000, 0x00000000, 0x00000000, 0x00000000
5489};
5490#endif
5491
5492#define RX_CPU_SCRATCH_BASE 0x30000
5493#define RX_CPU_SCRATCH_SIZE 0x04000
5494#define TX_CPU_SCRATCH_BASE 0x34000
5495#define TX_CPU_SCRATCH_SIZE 0x04000
5496
5497/* tp->lock is held. */
5498static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5499{
5500 int i;
5501
Eric Sesterhenn5d9428d2006-04-02 13:52:48 +02005502 BUG_ON(offset == TX_CPU_BASE &&
5503 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005504
Michael Chanb5d37722006-09-27 16:06:21 -07005505 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5506 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5507
5508 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5509 return 0;
5510 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005511 if (offset == RX_CPU_BASE) {
5512 for (i = 0; i < 10000; i++) {
5513 tw32(offset + CPU_STATE, 0xffffffff);
5514 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5515 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5516 break;
5517 }
5518
5519 tw32(offset + CPU_STATE, 0xffffffff);
5520 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
5521 udelay(10);
5522 } else {
5523 for (i = 0; i < 10000; i++) {
5524 tw32(offset + CPU_STATE, 0xffffffff);
5525 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5526 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5527 break;
5528 }
5529 }
5530
5531 if (i >= 10000) {
5532 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5533 "and %s CPU\n",
5534 tp->dev->name,
5535 (offset == RX_CPU_BASE ? "RX" : "TX"));
5536 return -ENODEV;
5537 }
Michael Chanec41c7d2006-01-17 02:40:55 -08005538
5539 /* Clear firmware's nvram arbitration. */
5540 if (tp->tg3_flags & TG3_FLAG_NVRAM)
5541 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005542 return 0;
5543}
5544
5545struct fw_info {
5546 unsigned int text_base;
5547 unsigned int text_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07005548 const u32 *text_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005549 unsigned int rodata_base;
5550 unsigned int rodata_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07005551 const u32 *rodata_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005552 unsigned int data_base;
5553 unsigned int data_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07005554 const u32 *data_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005555};
5556
5557/* tp->lock is held. */
5558static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5559 int cpu_scratch_size, struct fw_info *info)
5560{
Michael Chanec41c7d2006-01-17 02:40:55 -08005561 int err, lock_err, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005562 void (*write_op)(struct tg3 *, u32, u32);
5563
5564 if (cpu_base == TX_CPU_BASE &&
5565 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5566 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5567 "TX cpu firmware on %s which is 5705.\n",
5568 tp->dev->name);
5569 return -EINVAL;
5570 }
5571
5572 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5573 write_op = tg3_write_mem;
5574 else
5575 write_op = tg3_write_indirect_reg32;
5576
Michael Chan1b628152005-05-29 14:59:49 -07005577 /* It is possible that bootcode is still loading at this point.
5578 * Get the nvram lock first before halting the cpu.
5579 */
Michael Chanec41c7d2006-01-17 02:40:55 -08005580 lock_err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005581 err = tg3_halt_cpu(tp, cpu_base);
Michael Chanec41c7d2006-01-17 02:40:55 -08005582 if (!lock_err)
5583 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005584 if (err)
5585 goto out;
5586
5587 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5588 write_op(tp, cpu_scratch_base + i, 0);
5589 tw32(cpu_base + CPU_STATE, 0xffffffff);
5590 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5591 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5592 write_op(tp, (cpu_scratch_base +
5593 (info->text_base & 0xffff) +
5594 (i * sizeof(u32))),
5595 (info->text_data ?
5596 info->text_data[i] : 0));
5597 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5598 write_op(tp, (cpu_scratch_base +
5599 (info->rodata_base & 0xffff) +
5600 (i * sizeof(u32))),
5601 (info->rodata_data ?
5602 info->rodata_data[i] : 0));
5603 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5604 write_op(tp, (cpu_scratch_base +
5605 (info->data_base & 0xffff) +
5606 (i * sizeof(u32))),
5607 (info->data_data ?
5608 info->data_data[i] : 0));
5609
5610 err = 0;
5611
5612out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005613 return err;
5614}
5615
5616/* tp->lock is held. */
5617static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5618{
5619 struct fw_info info;
5620 int err, i;
5621
5622 info.text_base = TG3_FW_TEXT_ADDR;
5623 info.text_len = TG3_FW_TEXT_LEN;
5624 info.text_data = &tg3FwText[0];
5625 info.rodata_base = TG3_FW_RODATA_ADDR;
5626 info.rodata_len = TG3_FW_RODATA_LEN;
5627 info.rodata_data = &tg3FwRodata[0];
5628 info.data_base = TG3_FW_DATA_ADDR;
5629 info.data_len = TG3_FW_DATA_LEN;
5630 info.data_data = NULL;
5631
5632 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5633 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5634 &info);
5635 if (err)
5636 return err;
5637
5638 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5639 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5640 &info);
5641 if (err)
5642 return err;
5643
5644 /* Now startup only the RX cpu. */
5645 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5646 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5647
5648 for (i = 0; i < 5; i++) {
5649 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5650 break;
5651 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5652 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
5653 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5654 udelay(1000);
5655 }
5656 if (i >= 5) {
5657 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5658 "to set RX CPU PC, is %08x should be %08x\n",
5659 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5660 TG3_FW_TEXT_ADDR);
5661 return -ENODEV;
5662 }
5663 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5664 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
5665
5666 return 0;
5667}
5668
Linus Torvalds1da177e2005-04-16 15:20:36 -07005669
5670#define TG3_TSO_FW_RELEASE_MAJOR 0x1
5671#define TG3_TSO_FW_RELASE_MINOR 0x6
5672#define TG3_TSO_FW_RELEASE_FIX 0x0
5673#define TG3_TSO_FW_START_ADDR 0x08000000
5674#define TG3_TSO_FW_TEXT_ADDR 0x08000000
5675#define TG3_TSO_FW_TEXT_LEN 0x1aa0
5676#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5677#define TG3_TSO_FW_RODATA_LEN 0x60
5678#define TG3_TSO_FW_DATA_ADDR 0x08001b20
5679#define TG3_TSO_FW_DATA_LEN 0x30
5680#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5681#define TG3_TSO_FW_SBSS_LEN 0x2c
5682#define TG3_TSO_FW_BSS_ADDR 0x08001b80
5683#define TG3_TSO_FW_BSS_LEN 0x894
5684
Andreas Mohr50da8592006-08-14 23:54:30 -07005685static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005686 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5687 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5688 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5689 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5690 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5691 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5692 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5693 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5694 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5695 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5696 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5697 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5698 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5699 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5700 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5701 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5702 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5703 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5704 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5705 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5706 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5707 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5708 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5709 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5710 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5711 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5712 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5713 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5714 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5715 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5716 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5717 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5718 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5719 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5720 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5721 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5722 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5723 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5724 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5725 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5726 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5727 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5728 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5729 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5730 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5731 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5732 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5733 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5734 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5735 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5736 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5737 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5738 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5739 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5740 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5741 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5742 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5743 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5744 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5745 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5746 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5747 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5748 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5749 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5750 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5751 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5752 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5753 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5754 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5755 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5756 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5757 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5758 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5759 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5760 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5761 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5762 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5763 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5764 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5765 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5766 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5767 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5768 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5769 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5770 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5771 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5772 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5773 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5774 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5775 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5776 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5777 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5778 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5779 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5780 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5781 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5782 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5783 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5784 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5785 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5786 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5787 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5788 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5789 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5790 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5791 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5792 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5793 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5794 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5795 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5796 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5797 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5798 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5799 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5800 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5801 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5802 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5803 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5804 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5805 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5806 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5807 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5808 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5809 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5810 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5811 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5812 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5813 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5814 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5815 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5816 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5817 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5818 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5819 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5820 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5821 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5822 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5823 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5824 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5825 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5826 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5827 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5828 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5829 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5830 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5831 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5832 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5833 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5834 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5835 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5836 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5837 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5838 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5839 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5840 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5841 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5842 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5843 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5844 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5845 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5846 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5847 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5848 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5849 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5850 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5851 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5852 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5853 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5854 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5855 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5856 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5857 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5858 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5859 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5860 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5861 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5862 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5863 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5864 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5865 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5866 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5867 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5868 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5869 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5870 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5871 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5872 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5873 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5874 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5875 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5876 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5877 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5878 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5879 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5880 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5881 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5882 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5883 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5884 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5885 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5886 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5887 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5888 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5889 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5890 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5891 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5892 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5893 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5894 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5895 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5896 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5897 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5898 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5899 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5900 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5901 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5902 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5903 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5904 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5905 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5906 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5907 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5908 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5909 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5910 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5911 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5912 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5913 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5914 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5915 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5916 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5917 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5918 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5919 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5920 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5921 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5922 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5923 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5924 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5925 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5926 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5927 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5928 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5929 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5930 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5931 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5932 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5933 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5934 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5935 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5936 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5937 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5938 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5939 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5940 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5941 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5942 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5943 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5944 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5945 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5946 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5947 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5948 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5949 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5950 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5951 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5952 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5953 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5954 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5955 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5956 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5957 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5958 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5959 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5960 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5961 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5962 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5963 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5964 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5965 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5966 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5967 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5968 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5969 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5970};
5971
Andreas Mohr50da8592006-08-14 23:54:30 -07005972static const u32 tg3TsoFwRodata[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005973 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5974 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5975 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5976 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5977 0x00000000,
5978};
5979
Andreas Mohr50da8592006-08-14 23:54:30 -07005980static const u32 tg3TsoFwData[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005981 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5982 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5983 0x00000000,
5984};
5985
5986/* 5705 needs a special version of the TSO firmware. */
5987#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5988#define TG3_TSO5_FW_RELASE_MINOR 0x2
5989#define TG3_TSO5_FW_RELEASE_FIX 0x0
5990#define TG3_TSO5_FW_START_ADDR 0x00010000
5991#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5992#define TG3_TSO5_FW_TEXT_LEN 0xe90
5993#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5994#define TG3_TSO5_FW_RODATA_LEN 0x50
5995#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5996#define TG3_TSO5_FW_DATA_LEN 0x20
5997#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5998#define TG3_TSO5_FW_SBSS_LEN 0x28
5999#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
6000#define TG3_TSO5_FW_BSS_LEN 0x88
6001
Andreas Mohr50da8592006-08-14 23:54:30 -07006002static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006003 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6004 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6005 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6006 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6007 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6008 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6009 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6010 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6011 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6012 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6013 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6014 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6015 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6016 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6017 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6018 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6019 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6020 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6021 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6022 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6023 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6024 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6025 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6026 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6027 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6028 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6029 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6030 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6031 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6032 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6033 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6034 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6035 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6036 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6037 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6038 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6039 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6040 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6041 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6042 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6043 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6044 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6045 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6046 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6047 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6048 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6049 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6050 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6051 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6052 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6053 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6054 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6055 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6056 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6057 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6058 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6059 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6060 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6061 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6062 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6063 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6064 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6065 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6066 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6067 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6068 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6069 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6070 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6071 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6072 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6073 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6074 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6075 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6076 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6077 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6078 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6079 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6080 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6081 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6082 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6083 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6084 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6085 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6086 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6087 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6088 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6089 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6090 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6091 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6092 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6093 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6094 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6095 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6096 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6097 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6098 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6099 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6100 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6101 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6102 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6103 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6104 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6105 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6106 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6107 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6108 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6109 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6110 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6111 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6112 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6113 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6114 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6115 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6116 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6117 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6118 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6119 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6120 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6121 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6122 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6123 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6124 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6125 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6126 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6127 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6128 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6129 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6130 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6131 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6132 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6133 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6134 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6135 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6136 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6137 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6138 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6139 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6140 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6141 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6142 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6143 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6144 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6145 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6146 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6147 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6148 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6149 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6150 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6151 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6152 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6153 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6154 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6155 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6156 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6157 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6158 0x00000000, 0x00000000, 0x00000000,
6159};
6160
Andreas Mohr50da8592006-08-14 23:54:30 -07006161static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006162 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6163 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6164 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6165 0x00000000, 0x00000000, 0x00000000,
6166};
6167
Andreas Mohr50da8592006-08-14 23:54:30 -07006168static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006169 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6170 0x00000000, 0x00000000, 0x00000000,
6171};
6172
6173/* tp->lock is held. */
6174static int tg3_load_tso_firmware(struct tg3 *tp)
6175{
6176 struct fw_info info;
6177 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6178 int err, i;
6179
6180 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6181 return 0;
6182
6183 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6184 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6185 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6186 info.text_data = &tg3Tso5FwText[0];
6187 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6188 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6189 info.rodata_data = &tg3Tso5FwRodata[0];
6190 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6191 info.data_len = TG3_TSO5_FW_DATA_LEN;
6192 info.data_data = &tg3Tso5FwData[0];
6193 cpu_base = RX_CPU_BASE;
6194 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6195 cpu_scratch_size = (info.text_len +
6196 info.rodata_len +
6197 info.data_len +
6198 TG3_TSO5_FW_SBSS_LEN +
6199 TG3_TSO5_FW_BSS_LEN);
6200 } else {
6201 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6202 info.text_len = TG3_TSO_FW_TEXT_LEN;
6203 info.text_data = &tg3TsoFwText[0];
6204 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6205 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6206 info.rodata_data = &tg3TsoFwRodata[0];
6207 info.data_base = TG3_TSO_FW_DATA_ADDR;
6208 info.data_len = TG3_TSO_FW_DATA_LEN;
6209 info.data_data = &tg3TsoFwData[0];
6210 cpu_base = TX_CPU_BASE;
6211 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6212 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6213 }
6214
6215 err = tg3_load_firmware_cpu(tp, cpu_base,
6216 cpu_scratch_base, cpu_scratch_size,
6217 &info);
6218 if (err)
6219 return err;
6220
6221 /* Now startup the cpu. */
6222 tw32(cpu_base + CPU_STATE, 0xffffffff);
6223 tw32_f(cpu_base + CPU_PC, info.text_base);
6224
6225 for (i = 0; i < 5; i++) {
6226 if (tr32(cpu_base + CPU_PC) == info.text_base)
6227 break;
6228 tw32(cpu_base + CPU_STATE, 0xffffffff);
6229 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6230 tw32_f(cpu_base + CPU_PC, info.text_base);
6231 udelay(1000);
6232 }
6233 if (i >= 5) {
6234 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6235 "to set CPU PC, is %08x should be %08x\n",
6236 tp->dev->name, tr32(cpu_base + CPU_PC),
6237 info.text_base);
6238 return -ENODEV;
6239 }
6240 tw32(cpu_base + CPU_STATE, 0xffffffff);
6241 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6242 return 0;
6243}
6244
Linus Torvalds1da177e2005-04-16 15:20:36 -07006245
6246/* tp->lock is held. */
Michael Chan986e0ae2007-05-05 12:10:20 -07006247static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006248{
6249 u32 addr_high, addr_low;
6250 int i;
6251
6252 addr_high = ((tp->dev->dev_addr[0] << 8) |
6253 tp->dev->dev_addr[1]);
6254 addr_low = ((tp->dev->dev_addr[2] << 24) |
6255 (tp->dev->dev_addr[3] << 16) |
6256 (tp->dev->dev_addr[4] << 8) |
6257 (tp->dev->dev_addr[5] << 0));
6258 for (i = 0; i < 4; i++) {
Michael Chan986e0ae2007-05-05 12:10:20 -07006259 if (i == 1 && skip_mac_1)
6260 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006261 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6262 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6263 }
6264
6265 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6266 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6267 for (i = 0; i < 12; i++) {
6268 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6269 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6270 }
6271 }
6272
6273 addr_high = (tp->dev->dev_addr[0] +
6274 tp->dev->dev_addr[1] +
6275 tp->dev->dev_addr[2] +
6276 tp->dev->dev_addr[3] +
6277 tp->dev->dev_addr[4] +
6278 tp->dev->dev_addr[5]) &
6279 TX_BACKOFF_SEED_MASK;
6280 tw32(MAC_TX_BACKOFF_SEED, addr_high);
6281}
6282
6283static int tg3_set_mac_addr(struct net_device *dev, void *p)
6284{
6285 struct tg3 *tp = netdev_priv(dev);
6286 struct sockaddr *addr = p;
Michael Chan986e0ae2007-05-05 12:10:20 -07006287 int err = 0, skip_mac_1 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006288
Michael Chanf9804dd2005-09-27 12:13:10 -07006289 if (!is_valid_ether_addr(addr->sa_data))
6290 return -EINVAL;
6291
Linus Torvalds1da177e2005-04-16 15:20:36 -07006292 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6293
Michael Chane75f7c92006-03-20 21:33:26 -08006294 if (!netif_running(dev))
6295 return 0;
6296
Michael Chan58712ef2006-04-29 18:58:01 -07006297 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
Michael Chan986e0ae2007-05-05 12:10:20 -07006298 u32 addr0_high, addr0_low, addr1_high, addr1_low;
Michael Chan58712ef2006-04-29 18:58:01 -07006299
Michael Chan986e0ae2007-05-05 12:10:20 -07006300 addr0_high = tr32(MAC_ADDR_0_HIGH);
6301 addr0_low = tr32(MAC_ADDR_0_LOW);
6302 addr1_high = tr32(MAC_ADDR_1_HIGH);
6303 addr1_low = tr32(MAC_ADDR_1_LOW);
6304
6305 /* Skip MAC addr 1 if ASF is using it. */
6306 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6307 !(addr1_high == 0 && addr1_low == 0))
6308 skip_mac_1 = 1;
Michael Chan58712ef2006-04-29 18:58:01 -07006309 }
Michael Chan986e0ae2007-05-05 12:10:20 -07006310 spin_lock_bh(&tp->lock);
6311 __tg3_set_mac_addr(tp, skip_mac_1);
6312 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006313
Michael Chanb9ec6c12006-07-25 16:37:27 -07006314 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006315}
6316
6317/* tp->lock is held. */
6318static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6319 dma_addr_t mapping, u32 maxlen_flags,
6320 u32 nic_addr)
6321{
6322 tg3_write_mem(tp,
6323 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6324 ((u64) mapping >> 32));
6325 tg3_write_mem(tp,
6326 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6327 ((u64) mapping & 0xffffffff));
6328 tg3_write_mem(tp,
6329 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6330 maxlen_flags);
6331
6332 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6333 tg3_write_mem(tp,
6334 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6335 nic_addr);
6336}
6337
6338static void __tg3_set_rx_mode(struct net_device *);
Michael Chand244c892005-07-05 14:42:33 -07006339static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
David S. Miller15f98502005-05-18 22:49:26 -07006340{
6341 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6342 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6343 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6344 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6345 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6346 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6347 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6348 }
6349 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6350 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6351 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6352 u32 val = ec->stats_block_coalesce_usecs;
6353
6354 if (!netif_carrier_ok(tp->dev))
6355 val = 0;
6356
6357 tw32(HOSTCC_STAT_COAL_TICKS, val);
6358 }
6359}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006360
6361/* tp->lock is held. */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07006362static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006363{
6364 u32 val, rdmac_mode;
6365 int i, err, limit;
6366
6367 tg3_disable_ints(tp);
6368
6369 tg3_stop_fw(tp);
6370
6371 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6372
6373 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
Michael Chane6de8ad2005-05-05 14:42:41 -07006374 tg3_abort_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006375 }
6376
Michael Chan36da4d82006-11-03 01:01:03 -08006377 if (reset_phy)
Michael Chand4d2c552006-03-20 17:47:20 -08006378 tg3_phy_reset(tp);
6379
Linus Torvalds1da177e2005-04-16 15:20:36 -07006380 err = tg3_chip_reset(tp);
6381 if (err)
6382 return err;
6383
6384 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6385
Matt Carlsonb5af7122007-11-12 21:22:02 -08006386 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
6387 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07006388 val = tr32(TG3_CPMU_CTRL);
6389 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6390 tw32(TG3_CPMU_CTRL, val);
Matt Carlson9acb9612007-11-12 21:10:06 -08006391
6392 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
6393 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
6394 val |= CPMU_LSPD_10MB_MACCLK_6_25;
6395 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
6396
6397 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
6398 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
6399 val |= CPMU_LNK_AWARE_MACCLK_6_25;
6400 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
6401
6402 val = tr32(TG3_CPMU_HST_ACC);
6403 val &= ~CPMU_HST_ACC_MACCLK_MASK;
6404 val |= CPMU_HST_ACC_MACCLK_6_25;
6405 tw32(TG3_CPMU_HST_ACC, val);
Matt Carlsond30cdd22007-10-07 23:28:35 -07006406 }
6407
Linus Torvalds1da177e2005-04-16 15:20:36 -07006408 /* This works around an issue with Athlon chipsets on
6409 * B3 tigon3 silicon. This bit has no effect on any
6410 * other revision. But do not set this on PCI Express
Matt Carlson795d01c2007-10-07 23:28:17 -07006411 * chips and don't even touch the clocks if the CPMU is present.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006412 */
Matt Carlson795d01c2007-10-07 23:28:17 -07006413 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6414 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6415 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6416 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6417 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006418
6419 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6420 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6421 val = tr32(TG3PCI_PCISTATE);
6422 val |= PCISTATE_RETRY_SAME_DMA;
6423 tw32(TG3PCI_PCISTATE, val);
6424 }
6425
Matt Carlson0d3031d2007-10-10 18:02:43 -07006426 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6427 /* Allow reads and writes to the
6428 * APE register and memory space.
6429 */
6430 val = tr32(TG3PCI_PCISTATE);
6431 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6432 PCISTATE_ALLOW_APE_SHMEM_WR;
6433 tw32(TG3PCI_PCISTATE, val);
6434 }
6435
Linus Torvalds1da177e2005-04-16 15:20:36 -07006436 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6437 /* Enable some hw fixes. */
6438 val = tr32(TG3PCI_MSI_DATA);
6439 val |= (1 << 26) | (1 << 28) | (1 << 29);
6440 tw32(TG3PCI_MSI_DATA, val);
6441 }
6442
6443 /* Descriptor ring init may make accesses to the
6444 * NIC SRAM area to setup the TX descriptors, so we
6445 * can only do this after the hardware has been
6446 * successfully reset.
6447 */
Michael Chan32d8c572006-07-25 16:38:29 -07006448 err = tg3_init_rings(tp);
6449 if (err)
6450 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006451
Matt Carlson9936bcf2007-10-10 18:03:07 -07006452 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6453 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07006454 /* This value is determined during the probe time DMA
6455 * engine test, tg3_test_dma.
6456 */
6457 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6458 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006459
6460 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6461 GRC_MODE_4X_NIC_SEND_RINGS |
6462 GRC_MODE_NO_TX_PHDR_CSUM |
6463 GRC_MODE_NO_RX_PHDR_CSUM);
6464 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
Michael Chand2d746f2006-04-06 21:45:39 -07006465
6466 /* Pseudo-header checksum is done by hardware logic and not
6467 * the offload processers, so make the chip do the pseudo-
6468 * header checksums on receive. For transmit it is more
6469 * convenient to do the pseudo-header checksum in software
6470 * as Linux does that on transmit for us in all cases.
6471 */
6472 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006473
6474 tw32(GRC_MODE,
6475 tp->grc_mode |
6476 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6477
6478 /* Setup the timer prescalar register. Clock is always 66Mhz. */
6479 val = tr32(GRC_MISC_CFG);
6480 val &= ~0xff;
6481 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6482 tw32(GRC_MISC_CFG, val);
6483
6484 /* Initialize MBUF/DESC pool. */
John W. Linvillecbf46852005-04-21 17:01:29 -07006485 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006486 /* Do nothing. */
6487 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6488 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6489 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6490 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6491 else
6492 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6493 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6494 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6495 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006496 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6497 int fw_len;
6498
6499 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6500 TG3_TSO5_FW_RODATA_LEN +
6501 TG3_TSO5_FW_DATA_LEN +
6502 TG3_TSO5_FW_SBSS_LEN +
6503 TG3_TSO5_FW_BSS_LEN);
6504 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6505 tw32(BUFMGR_MB_POOL_ADDR,
6506 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6507 tw32(BUFMGR_MB_POOL_SIZE,
6508 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6509 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006510
Michael Chan0f893dc2005-07-25 12:30:38 -07006511 if (tp->dev->mtu <= ETH_DATA_LEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006512 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6513 tp->bufmgr_config.mbuf_read_dma_low_water);
6514 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6515 tp->bufmgr_config.mbuf_mac_rx_low_water);
6516 tw32(BUFMGR_MB_HIGH_WATER,
6517 tp->bufmgr_config.mbuf_high_water);
6518 } else {
6519 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6520 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6521 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6522 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6523 tw32(BUFMGR_MB_HIGH_WATER,
6524 tp->bufmgr_config.mbuf_high_water_jumbo);
6525 }
6526 tw32(BUFMGR_DMA_LOW_WATER,
6527 tp->bufmgr_config.dma_low_water);
6528 tw32(BUFMGR_DMA_HIGH_WATER,
6529 tp->bufmgr_config.dma_high_water);
6530
6531 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6532 for (i = 0; i < 2000; i++) {
6533 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6534 break;
6535 udelay(10);
6536 }
6537 if (i >= 2000) {
6538 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6539 tp->dev->name);
6540 return -ENODEV;
6541 }
6542
6543 /* Setup replenish threshold. */
Michael Chanf92905d2006-06-29 20:14:29 -07006544 val = tp->rx_pending / 8;
6545 if (val == 0)
6546 val = 1;
6547 else if (val > tp->rx_std_max_post)
6548 val = tp->rx_std_max_post;
Michael Chanb5d37722006-09-27 16:06:21 -07006549 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6550 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6551 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6552
6553 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6554 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6555 }
Michael Chanf92905d2006-06-29 20:14:29 -07006556
6557 tw32(RCVBDI_STD_THRESH, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006558
6559 /* Initialize TG3_BDINFO's at:
6560 * RCVDBDI_STD_BD: standard eth size rx ring
6561 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6562 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6563 *
6564 * like so:
6565 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6566 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6567 * ring attribute flags
6568 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6569 *
6570 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6571 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6572 *
6573 * The size of each ring is fixed in the firmware, but the location is
6574 * configurable.
6575 */
6576 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6577 ((u64) tp->rx_std_mapping >> 32));
6578 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6579 ((u64) tp->rx_std_mapping & 0xffffffff));
6580 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6581 NIC_SRAM_RX_BUFFER_DESC);
6582
6583 /* Don't even try to program the JUMBO/MINI buffer descriptor
6584 * configs on 5705.
6585 */
6586 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6587 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6588 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6589 } else {
6590 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6591 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6592
6593 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6594 BDINFO_FLAGS_DISABLED);
6595
6596 /* Setup replenish threshold. */
6597 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6598
Michael Chan0f893dc2005-07-25 12:30:38 -07006599 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006600 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6601 ((u64) tp->rx_jumbo_mapping >> 32));
6602 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6603 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6604 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6605 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6606 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6607 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6608 } else {
6609 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6610 BDINFO_FLAGS_DISABLED);
6611 }
6612
6613 }
6614
6615 /* There is only one send ring on 5705/5750, no need to explicitly
6616 * disable the others.
6617 */
6618 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6619 /* Clear out send RCB ring in SRAM. */
6620 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6621 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6622 BDINFO_FLAGS_DISABLED);
6623 }
6624
6625 tp->tx_prod = 0;
6626 tp->tx_cons = 0;
6627 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6628 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6629
6630 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6631 tp->tx_desc_mapping,
6632 (TG3_TX_RING_SIZE <<
6633 BDINFO_FLAGS_MAXLEN_SHIFT),
6634 NIC_SRAM_TX_BUFFER_DESC);
6635
6636 /* There is only one receive return ring on 5705/5750, no need
6637 * to explicitly disable the others.
6638 */
6639 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6640 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6641 i += TG3_BDINFO_SIZE) {
6642 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6643 BDINFO_FLAGS_DISABLED);
6644 }
6645 }
6646
6647 tp->rx_rcb_ptr = 0;
6648 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6649
6650 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6651 tp->rx_rcb_mapping,
6652 (TG3_RX_RCB_RING_SIZE(tp) <<
6653 BDINFO_FLAGS_MAXLEN_SHIFT),
6654 0);
6655
6656 tp->rx_std_ptr = tp->rx_pending;
6657 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6658 tp->rx_std_ptr);
6659
Michael Chan0f893dc2005-07-25 12:30:38 -07006660 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
Linus Torvalds1da177e2005-04-16 15:20:36 -07006661 tp->rx_jumbo_pending : 0;
6662 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6663 tp->rx_jumbo_ptr);
6664
6665 /* Initialize MAC address and backoff seed. */
Michael Chan986e0ae2007-05-05 12:10:20 -07006666 __tg3_set_mac_addr(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006667
6668 /* MTU + ethernet header + FCS + optional VLAN tag */
6669 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6670
6671 /* The slot time is changed by tg3_setup_phy if we
6672 * run at gigabit with half duplex.
6673 */
6674 tw32(MAC_TX_LENGTHS,
6675 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6676 (6 << TX_LENGTHS_IPG_SHIFT) |
6677 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6678
6679 /* Receive rules. */
6680 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6681 tw32(RCVLPC_CONFIG, 0x0181);
6682
6683 /* Calculate RDMAC_MODE setting early, we need it to determine
6684 * the RCVLPC_STATE_ENABLE mask.
6685 */
6686 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6687 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6688 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6689 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6690 RDMAC_MODE_LNGREAD_ENAB);
Michael Chan85e94ce2005-04-21 17:05:28 -07006691
Matt Carlsond30cdd22007-10-07 23:28:35 -07006692 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6693 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6694 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6695 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6696
Michael Chan85e94ce2005-04-21 17:05:28 -07006697 /* If statement applies to 5705 and 5750 PCI devices only */
6698 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6699 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6700 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006701 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
Matt Carlsonc13e3712007-05-05 11:50:04 -07006702 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006703 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6704 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6705 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6706 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6707 }
6708 }
6709
Michael Chan85e94ce2005-04-21 17:05:28 -07006710 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6711 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6712
Linus Torvalds1da177e2005-04-16 15:20:36 -07006713 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6714 rdmac_mode |= (1 << 27);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006715
6716 /* Receive/send statistics. */
Michael Chan16613942006-06-29 20:15:13 -07006717 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6718 val = tr32(RCVLPC_STATS_ENABLE);
6719 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6720 tw32(RCVLPC_STATS_ENABLE, val);
6721 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6722 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006723 val = tr32(RCVLPC_STATS_ENABLE);
6724 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6725 tw32(RCVLPC_STATS_ENABLE, val);
6726 } else {
6727 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6728 }
6729 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6730 tw32(SNDDATAI_STATSENAB, 0xffffff);
6731 tw32(SNDDATAI_STATSCTRL,
6732 (SNDDATAI_SCTRL_ENABLE |
6733 SNDDATAI_SCTRL_FASTUPD));
6734
6735 /* Setup host coalescing engine. */
6736 tw32(HOSTCC_MODE, 0);
6737 for (i = 0; i < 2000; i++) {
6738 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6739 break;
6740 udelay(10);
6741 }
6742
Michael Chand244c892005-07-05 14:42:33 -07006743 __tg3_set_coalesce(tp, &tp->coal);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006744
6745 /* set status block DMA address */
6746 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6747 ((u64) tp->status_mapping >> 32));
6748 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6749 ((u64) tp->status_mapping & 0xffffffff));
6750
6751 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6752 /* Status/statistics block address. See tg3_timer,
6753 * the tg3_periodic_fetch_stats call there, and
6754 * tg3_get_stats to see how this works for 5705/5750 chips.
6755 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006756 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6757 ((u64) tp->stats_mapping >> 32));
6758 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6759 ((u64) tp->stats_mapping & 0xffffffff));
6760 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6761 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6762 }
6763
6764 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6765
6766 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6767 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6768 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6769 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6770
6771 /* Clear statistics/status block in chip, and status block in ram. */
6772 for (i = NIC_SRAM_STATS_BLK;
6773 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6774 i += sizeof(u32)) {
6775 tg3_write_mem(tp, i, 0);
6776 udelay(40);
6777 }
6778 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6779
Michael Chanc94e3942005-09-27 12:12:42 -07006780 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6781 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6782 /* reset to prevent losing 1st rx packet intermittently */
6783 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6784 udelay(10);
6785 }
6786
Linus Torvalds1da177e2005-04-16 15:20:36 -07006787 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6788 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07006789 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6790 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6791 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6792 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006793 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6794 udelay(40);
6795
Michael Chan314fba32005-04-21 17:07:04 -07006796 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
Michael Chan9d26e212006-12-07 00:21:14 -08006797 * If TG3_FLG2_IS_NIC is zero, we should read the
Michael Chan314fba32005-04-21 17:07:04 -07006798 * register to preserve the GPIO settings for LOMs. The GPIOs,
6799 * whether used as inputs or outputs, are set by boot code after
6800 * reset.
6801 */
Michael Chan9d26e212006-12-07 00:21:14 -08006802 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
Michael Chan314fba32005-04-21 17:07:04 -07006803 u32 gpio_mask;
6804
Michael Chan9d26e212006-12-07 00:21:14 -08006805 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6806 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6807 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chan3e7d83b2005-04-21 17:10:36 -07006808
6809 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6810 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6811 GRC_LCLCTRL_GPIO_OUTPUT3;
6812
Michael Chanaf36e6b2006-03-23 01:28:06 -08006813 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6814 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6815
Gary Zambranoaaf84462007-05-05 11:51:45 -07006816 tp->grc_local_ctrl &= ~gpio_mask;
Michael Chan314fba32005-04-21 17:07:04 -07006817 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6818
6819 /* GPIO1 must be driven high for eeprom write protect */
Michael Chan9d26e212006-12-07 00:21:14 -08006820 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6821 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6822 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan314fba32005-04-21 17:07:04 -07006823 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006824 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6825 udelay(100);
6826
Michael Chan09ee9292005-08-09 20:17:00 -07006827 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
David S. Millerfac9b832005-05-18 22:46:34 -07006828 tp->last_tag = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006829
6830 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6831 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6832 udelay(40);
6833 }
6834
6835 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6836 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6837 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6838 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6839 WDMAC_MODE_LNGREAD_ENAB);
6840
Michael Chan85e94ce2005-04-21 17:05:28 -07006841 /* If statement applies to 5705 and 5750 PCI devices only */
6842 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6843 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6844 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006845 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6846 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6847 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6848 /* nothing */
6849 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6850 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6851 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6852 val |= WDMAC_MODE_RX_ACCEL;
6853 }
6854 }
6855
Michael Chand9ab5ad12006-03-20 22:27:35 -08006856 /* Enable host coalescing bug fix */
Michael Chanaf36e6b2006-03-23 01:28:06 -08006857 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07006858 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07006859 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
6860 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
Michael Chand9ab5ad12006-03-20 22:27:35 -08006861 val |= (1 << 29);
6862
Linus Torvalds1da177e2005-04-16 15:20:36 -07006863 tw32_f(WDMAC_MODE, val);
6864 udelay(40);
6865
Matt Carlson9974a352007-10-07 23:27:28 -07006866 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6867 u16 pcix_cmd;
6868
6869 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6870 &pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006871 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
Matt Carlson9974a352007-10-07 23:27:28 -07006872 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
6873 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006874 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
Matt Carlson9974a352007-10-07 23:27:28 -07006875 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
6876 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006877 }
Matt Carlson9974a352007-10-07 23:27:28 -07006878 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6879 pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006880 }
6881
6882 tw32_f(RDMAC_MODE, rdmac_mode);
6883 udelay(40);
6884
6885 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6886 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6887 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
Matt Carlson9936bcf2007-10-10 18:03:07 -07006888
6889 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6890 tw32(SNDDATAC_MODE,
6891 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
6892 else
6893 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6894
Linus Torvalds1da177e2005-04-16 15:20:36 -07006895 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6896 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6897 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6898 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006899 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6900 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006901 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6902 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6903
6904 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6905 err = tg3_load_5701_a0_firmware_fix(tp);
6906 if (err)
6907 return err;
6908 }
6909
Linus Torvalds1da177e2005-04-16 15:20:36 -07006910 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6911 err = tg3_load_tso_firmware(tp);
6912 if (err)
6913 return err;
6914 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006915
6916 tp->tx_mode = TX_MODE_ENABLE;
6917 tw32_f(MAC_TX_MODE, tp->tx_mode);
6918 udelay(100);
6919
6920 tp->rx_mode = RX_MODE_ENABLE;
Matt Carlson9936bcf2007-10-10 18:03:07 -07006921 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
6922 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
Michael Chanaf36e6b2006-03-23 01:28:06 -08006923 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6924
Linus Torvalds1da177e2005-04-16 15:20:36 -07006925 tw32_f(MAC_RX_MODE, tp->rx_mode);
6926 udelay(10);
6927
6928 if (tp->link_config.phy_is_low_power) {
6929 tp->link_config.phy_is_low_power = 0;
6930 tp->link_config.speed = tp->link_config.orig_speed;
6931 tp->link_config.duplex = tp->link_config.orig_duplex;
6932 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6933 }
6934
6935 tp->mi_mode = MAC_MI_MODE_BASE;
6936 tw32_f(MAC_MI_MODE, tp->mi_mode);
6937 udelay(80);
6938
6939 tw32(MAC_LED_CTRL, tp->led_ctrl);
6940
6941 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
Michael Chanc94e3942005-09-27 12:12:42 -07006942 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006943 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6944 udelay(10);
6945 }
6946 tw32_f(MAC_RX_MODE, tp->rx_mode);
6947 udelay(10);
6948
6949 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6950 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6951 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6952 /* Set drive transmission level to 1.2V */
6953 /* only if the signal pre-emphasis bit is not set */
6954 val = tr32(MAC_SERDES_CFG);
6955 val &= 0xfffff000;
6956 val |= 0x880;
6957 tw32(MAC_SERDES_CFG, val);
6958 }
6959 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6960 tw32(MAC_SERDES_CFG, 0x616000);
6961 }
6962
6963 /* Prevent chip from dropping frames when flow control
6964 * is enabled.
6965 */
6966 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6967
6968 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6969 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6970 /* Use hardware link auto-negotiation */
6971 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6972 }
6973
Michael Chand4d2c552006-03-20 17:47:20 -08006974 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6975 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6976 u32 tmp;
6977
6978 tmp = tr32(SERDES_RX_CTRL);
6979 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6980 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6981 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6982 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6983 }
6984
Michael Chan36da4d82006-11-03 01:01:03 -08006985 err = tg3_setup_phy(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006986 if (err)
6987 return err;
6988
Michael Chan715116a2006-09-27 16:09:25 -07006989 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6990 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006991 u32 tmp;
6992
6993 /* Clear CRC stats. */
Michael Chan569a5df2007-02-13 12:18:15 -08006994 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
6995 tg3_writephy(tp, MII_TG3_TEST1,
6996 tmp | MII_TG3_TEST1_CRC_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006997 tg3_readphy(tp, 0x14, &tmp);
6998 }
6999 }
7000
7001 __tg3_set_rx_mode(tp->dev);
7002
7003 /* Initialize receive rules. */
7004 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7005 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7006 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7007 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7008
Michael Chan4cf78e42005-07-25 12:29:19 -07007009 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
Michael Chana4e2b342005-10-26 15:46:52 -07007010 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007011 limit = 8;
7012 else
7013 limit = 16;
7014 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7015 limit -= 4;
7016 switch (limit) {
7017 case 16:
7018 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7019 case 15:
7020 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7021 case 14:
7022 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7023 case 13:
7024 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7025 case 12:
7026 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7027 case 11:
7028 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7029 case 10:
7030 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7031 case 9:
7032 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7033 case 8:
7034 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7035 case 7:
7036 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7037 case 6:
7038 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7039 case 5:
7040 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7041 case 4:
7042 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7043 case 3:
7044 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7045 case 2:
7046 case 1:
7047
7048 default:
7049 break;
7050 };
7051
Matt Carlson9ce768e2007-10-11 19:49:11 -07007052 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7053 /* Write our heartbeat update interval to APE. */
7054 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7055 APE_HOST_HEARTBEAT_INT_DISABLE);
Matt Carlson0d3031d2007-10-10 18:02:43 -07007056
Linus Torvalds1da177e2005-04-16 15:20:36 -07007057 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7058
Linus Torvalds1da177e2005-04-16 15:20:36 -07007059 return 0;
7060}
7061
7062/* Called at device open time to get the chip ready for
7063 * packet processing. Invoked with tp->lock held.
7064 */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007065static int tg3_init_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007066{
7067 int err;
7068
7069 /* Force the chip into D0. */
Michael Chanbc1c7562006-03-20 17:48:03 -08007070 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007071 if (err)
7072 goto out;
7073
7074 tg3_switch_clocks(tp);
7075
7076 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7077
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007078 err = tg3_reset_hw(tp, reset_phy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007079
7080out:
7081 return err;
7082}
7083
7084#define TG3_STAT_ADD32(PSTAT, REG) \
7085do { u32 __val = tr32(REG); \
7086 (PSTAT)->low += __val; \
7087 if ((PSTAT)->low < __val) \
7088 (PSTAT)->high += 1; \
7089} while (0)
7090
7091static void tg3_periodic_fetch_stats(struct tg3 *tp)
7092{
7093 struct tg3_hw_stats *sp = tp->hw_stats;
7094
7095 if (!netif_carrier_ok(tp->dev))
7096 return;
7097
7098 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7099 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7100 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7101 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7102 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7103 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7104 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7105 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7106 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7107 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7108 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7109 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7110 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7111
7112 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7113 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7114 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7115 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7116 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7117 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7118 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7119 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7120 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7121 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7122 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7123 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7124 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7125 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
Michael Chan463d3052006-05-22 16:36:27 -07007126
7127 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7128 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7129 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007130}
7131
7132static void tg3_timer(unsigned long __opaque)
7133{
7134 struct tg3 *tp = (struct tg3 *) __opaque;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007135
Michael Chanf475f162006-03-27 23:20:14 -08007136 if (tp->irq_sync)
7137 goto restart_timer;
7138
David S. Millerf47c11e2005-06-24 20:18:35 -07007139 spin_lock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007140
David S. Millerfac9b832005-05-18 22:46:34 -07007141 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7142 /* All of this garbage is because when using non-tagged
7143 * IRQ status the mailbox/status_block protocol the chip
7144 * uses with the cpu is race prone.
7145 */
7146 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7147 tw32(GRC_LOCAL_CTRL,
7148 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7149 } else {
7150 tw32(HOSTCC_MODE, tp->coalesce_mode |
7151 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7152 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007153
David S. Millerfac9b832005-05-18 22:46:34 -07007154 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7155 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
David S. Millerf47c11e2005-06-24 20:18:35 -07007156 spin_unlock(&tp->lock);
David S. Millerfac9b832005-05-18 22:46:34 -07007157 schedule_work(&tp->reset_task);
7158 return;
7159 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007160 }
7161
Linus Torvalds1da177e2005-04-16 15:20:36 -07007162 /* This part only runs once per second. */
7163 if (!--tp->timer_counter) {
David S. Millerfac9b832005-05-18 22:46:34 -07007164 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7165 tg3_periodic_fetch_stats(tp);
7166
Linus Torvalds1da177e2005-04-16 15:20:36 -07007167 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7168 u32 mac_stat;
7169 int phy_event;
7170
7171 mac_stat = tr32(MAC_STATUS);
7172
7173 phy_event = 0;
7174 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7175 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7176 phy_event = 1;
7177 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7178 phy_event = 1;
7179
7180 if (phy_event)
7181 tg3_setup_phy(tp, 0);
7182 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7183 u32 mac_stat = tr32(MAC_STATUS);
7184 int need_setup = 0;
7185
7186 if (netif_carrier_ok(tp->dev) &&
7187 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7188 need_setup = 1;
7189 }
7190 if (! netif_carrier_ok(tp->dev) &&
7191 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7192 MAC_STATUS_SIGNAL_DET))) {
7193 need_setup = 1;
7194 }
7195 if (need_setup) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07007196 if (!tp->serdes_counter) {
7197 tw32_f(MAC_MODE,
7198 (tp->mac_mode &
7199 ~MAC_MODE_PORT_MODE_MASK));
7200 udelay(40);
7201 tw32_f(MAC_MODE, tp->mac_mode);
7202 udelay(40);
7203 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007204 tg3_setup_phy(tp, 0);
7205 }
Michael Chan747e8f82005-07-25 12:33:22 -07007206 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7207 tg3_serdes_parallel_detect(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007208
7209 tp->timer_counter = tp->timer_multiplier;
7210 }
7211
Michael Chan130b8e42006-09-27 16:00:40 -07007212 /* Heartbeat is only sent once every 2 seconds.
7213 *
7214 * The heartbeat is to tell the ASF firmware that the host
7215 * driver is still alive. In the event that the OS crashes,
7216 * ASF needs to reset the hardware to free up the FIFO space
7217 * that may be filled with rx packets destined for the host.
7218 * If the FIFO is full, ASF will no longer function properly.
7219 *
7220 * Unintended resets have been reported on real time kernels
7221 * where the timer doesn't run on time. Netpoll will also have
7222 * same problem.
7223 *
7224 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7225 * to check the ring condition when the heartbeat is expiring
7226 * before doing the reset. This will prevent most unintended
7227 * resets.
7228 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007229 if (!--tp->asf_counter) {
7230 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7231 u32 val;
7232
Michael Chanbbadf502006-04-06 21:46:34 -07007233 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
Michael Chan130b8e42006-09-27 16:00:40 -07007234 FWCMD_NICDRV_ALIVE3);
Michael Chanbbadf502006-04-06 21:46:34 -07007235 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
Michael Chan28fbef72005-10-26 15:48:35 -07007236 /* 5 seconds timeout */
Michael Chanbbadf502006-04-06 21:46:34 -07007237 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007238 val = tr32(GRC_RX_CPU_EVENT);
7239 val |= (1 << 14);
7240 tw32(GRC_RX_CPU_EVENT, val);
7241 }
7242 tp->asf_counter = tp->asf_multiplier;
7243 }
7244
David S. Millerf47c11e2005-06-24 20:18:35 -07007245 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007246
Michael Chanf475f162006-03-27 23:20:14 -08007247restart_timer:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007248 tp->timer.expires = jiffies + tp->timer_offset;
7249 add_timer(&tp->timer);
7250}
7251
Adrian Bunk81789ef2006-03-20 23:00:14 -08007252static int tg3_request_irq(struct tg3 *tp)
Michael Chanfcfa0a32006-03-20 22:28:41 -08007253{
David Howells7d12e782006-10-05 14:55:46 +01007254 irq_handler_t fn;
Michael Chanfcfa0a32006-03-20 22:28:41 -08007255 unsigned long flags;
7256 struct net_device *dev = tp->dev;
7257
7258 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7259 fn = tg3_msi;
7260 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7261 fn = tg3_msi_1shot;
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07007262 flags = IRQF_SAMPLE_RANDOM;
Michael Chanfcfa0a32006-03-20 22:28:41 -08007263 } else {
7264 fn = tg3_interrupt;
7265 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7266 fn = tg3_interrupt_tagged;
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07007267 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
Michael Chanfcfa0a32006-03-20 22:28:41 -08007268 }
7269 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7270}
7271
Michael Chan79381092005-04-21 17:13:59 -07007272static int tg3_test_interrupt(struct tg3 *tp)
7273{
7274 struct net_device *dev = tp->dev;
Michael Chanb16250e2006-09-27 16:10:14 -07007275 int err, i, intr_ok = 0;
Michael Chan79381092005-04-21 17:13:59 -07007276
Michael Chand4bc3922005-05-29 14:59:20 -07007277 if (!netif_running(dev))
7278 return -ENODEV;
7279
Michael Chan79381092005-04-21 17:13:59 -07007280 tg3_disable_ints(tp);
7281
7282 free_irq(tp->pdev->irq, dev);
7283
7284 err = request_irq(tp->pdev->irq, tg3_test_isr,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07007285 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
Michael Chan79381092005-04-21 17:13:59 -07007286 if (err)
7287 return err;
7288
Michael Chan38f38432005-09-05 17:53:32 -07007289 tp->hw_status->status &= ~SD_STATUS_UPDATED;
Michael Chan79381092005-04-21 17:13:59 -07007290 tg3_enable_ints(tp);
7291
7292 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7293 HOSTCC_MODE_NOW);
7294
7295 for (i = 0; i < 5; i++) {
Michael Chanb16250e2006-09-27 16:10:14 -07007296 u32 int_mbox, misc_host_ctrl;
7297
Michael Chan09ee9292005-08-09 20:17:00 -07007298 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7299 TG3_64BIT_REG_LOW);
Michael Chanb16250e2006-09-27 16:10:14 -07007300 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7301
7302 if ((int_mbox != 0) ||
7303 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7304 intr_ok = 1;
Michael Chan79381092005-04-21 17:13:59 -07007305 break;
Michael Chanb16250e2006-09-27 16:10:14 -07007306 }
7307
Michael Chan79381092005-04-21 17:13:59 -07007308 msleep(10);
7309 }
7310
7311 tg3_disable_ints(tp);
7312
7313 free_irq(tp->pdev->irq, dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04007314
Michael Chanfcfa0a32006-03-20 22:28:41 -08007315 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07007316
7317 if (err)
7318 return err;
7319
Michael Chanb16250e2006-09-27 16:10:14 -07007320 if (intr_ok)
Michael Chan79381092005-04-21 17:13:59 -07007321 return 0;
7322
7323 return -EIO;
7324}
7325
7326/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7327 * successfully restored
7328 */
7329static int tg3_test_msi(struct tg3 *tp)
7330{
7331 struct net_device *dev = tp->dev;
7332 int err;
7333 u16 pci_cmd;
7334
7335 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7336 return 0;
7337
7338 /* Turn off SERR reporting in case MSI terminates with Master
7339 * Abort.
7340 */
7341 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7342 pci_write_config_word(tp->pdev, PCI_COMMAND,
7343 pci_cmd & ~PCI_COMMAND_SERR);
7344
7345 err = tg3_test_interrupt(tp);
7346
7347 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7348
7349 if (!err)
7350 return 0;
7351
7352 /* other failures */
7353 if (err != -EIO)
7354 return err;
7355
7356 /* MSI test failed, go back to INTx mode */
7357 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7358 "switching to INTx mode. Please report this failure to "
7359 "the PCI maintainer and include system chipset information.\n",
7360 tp->dev->name);
7361
7362 free_irq(tp->pdev->irq, dev);
7363 pci_disable_msi(tp->pdev);
7364
7365 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7366
Michael Chanfcfa0a32006-03-20 22:28:41 -08007367 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07007368 if (err)
7369 return err;
7370
7371 /* Need to reset the chip because the MSI cycle may have terminated
7372 * with Master Abort.
7373 */
David S. Millerf47c11e2005-06-24 20:18:35 -07007374 tg3_full_lock(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07007375
Michael Chan944d9802005-05-29 14:57:48 -07007376 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007377 err = tg3_init_hw(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07007378
David S. Millerf47c11e2005-06-24 20:18:35 -07007379 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07007380
7381 if (err)
7382 free_irq(tp->pdev->irq, dev);
7383
7384 return err;
7385}
7386
Linus Torvalds1da177e2005-04-16 15:20:36 -07007387static int tg3_open(struct net_device *dev)
7388{
7389 struct tg3 *tp = netdev_priv(dev);
7390 int err;
7391
Michael Chanc49a1562006-12-17 17:07:29 -08007392 netif_carrier_off(tp->dev);
7393
David S. Millerf47c11e2005-06-24 20:18:35 -07007394 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007395
Michael Chanbc1c7562006-03-20 17:48:03 -08007396 err = tg3_set_power_state(tp, PCI_D0);
Ira W. Snyder12862082006-11-21 17:44:31 -08007397 if (err) {
7398 tg3_full_unlock(tp);
Michael Chanbc1c7562006-03-20 17:48:03 -08007399 return err;
Ira W. Snyder12862082006-11-21 17:44:31 -08007400 }
Michael Chanbc1c7562006-03-20 17:48:03 -08007401
Linus Torvalds1da177e2005-04-16 15:20:36 -07007402 tg3_disable_ints(tp);
7403 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7404
David S. Millerf47c11e2005-06-24 20:18:35 -07007405 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007406
7407 /* The placement of this call is tied
7408 * to the setup and use of Host TX descriptors.
7409 */
7410 err = tg3_alloc_consistent(tp);
7411 if (err)
7412 return err;
7413
Michael Chan7544b092007-05-05 13:08:32 -07007414 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
David S. Millerfac9b832005-05-18 22:46:34 -07007415 /* All MSI supporting chips should support tagged
7416 * status. Assert that this is the case.
7417 */
7418 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7419 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7420 "Not using MSI.\n", tp->dev->name);
7421 } else if (pci_enable_msi(tp->pdev) == 0) {
Michael Chan88b06bc22005-04-21 17:13:25 -07007422 u32 msi_mode;
7423
7424 msi_mode = tr32(MSGINT_MODE);
7425 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7426 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7427 }
7428 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08007429 err = tg3_request_irq(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007430
7431 if (err) {
Michael Chan88b06bc22005-04-21 17:13:25 -07007432 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7433 pci_disable_msi(tp->pdev);
7434 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7435 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007436 tg3_free_consistent(tp);
7437 return err;
7438 }
7439
Stephen Hemmingerbea33482007-10-03 16:41:36 -07007440 napi_enable(&tp->napi);
7441
David S. Millerf47c11e2005-06-24 20:18:35 -07007442 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007443
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007444 err = tg3_init_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007445 if (err) {
Michael Chan944d9802005-05-29 14:57:48 -07007446 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007447 tg3_free_rings(tp);
7448 } else {
David S. Millerfac9b832005-05-18 22:46:34 -07007449 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7450 tp->timer_offset = HZ;
7451 else
7452 tp->timer_offset = HZ / 10;
7453
7454 BUG_ON(tp->timer_offset > HZ);
7455 tp->timer_counter = tp->timer_multiplier =
7456 (HZ / tp->timer_offset);
7457 tp->asf_counter = tp->asf_multiplier =
Michael Chan28fbef72005-10-26 15:48:35 -07007458 ((HZ / tp->timer_offset) * 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007459
7460 init_timer(&tp->timer);
7461 tp->timer.expires = jiffies + tp->timer_offset;
7462 tp->timer.data = (unsigned long) tp;
7463 tp->timer.function = tg3_timer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007464 }
7465
David S. Millerf47c11e2005-06-24 20:18:35 -07007466 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007467
7468 if (err) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07007469 napi_disable(&tp->napi);
Michael Chan88b06bc22005-04-21 17:13:25 -07007470 free_irq(tp->pdev->irq, dev);
7471 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7472 pci_disable_msi(tp->pdev);
7473 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7474 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007475 tg3_free_consistent(tp);
7476 return err;
7477 }
7478
Michael Chan79381092005-04-21 17:13:59 -07007479 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7480 err = tg3_test_msi(tp);
David S. Millerfac9b832005-05-18 22:46:34 -07007481
Michael Chan79381092005-04-21 17:13:59 -07007482 if (err) {
David S. Millerf47c11e2005-06-24 20:18:35 -07007483 tg3_full_lock(tp, 0);
Michael Chan79381092005-04-21 17:13:59 -07007484
7485 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7486 pci_disable_msi(tp->pdev);
7487 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7488 }
Michael Chan944d9802005-05-29 14:57:48 -07007489 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan79381092005-04-21 17:13:59 -07007490 tg3_free_rings(tp);
7491 tg3_free_consistent(tp);
7492
David S. Millerf47c11e2005-06-24 20:18:35 -07007493 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07007494
Stephen Hemmingerbea33482007-10-03 16:41:36 -07007495 napi_disable(&tp->napi);
7496
Michael Chan79381092005-04-21 17:13:59 -07007497 return err;
7498 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08007499
7500 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7501 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
Michael Chanb5d37722006-09-27 16:06:21 -07007502 u32 val = tr32(PCIE_TRANSACTION_CFG);
Michael Chanfcfa0a32006-03-20 22:28:41 -08007503
Michael Chanb5d37722006-09-27 16:06:21 -07007504 tw32(PCIE_TRANSACTION_CFG,
7505 val | PCIE_TRANS_CFG_1SHOT_MSI);
Michael Chanfcfa0a32006-03-20 22:28:41 -08007506 }
7507 }
Michael Chan79381092005-04-21 17:13:59 -07007508 }
7509
David S. Millerf47c11e2005-06-24 20:18:35 -07007510 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007511
Michael Chan79381092005-04-21 17:13:59 -07007512 add_timer(&tp->timer);
7513 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007514 tg3_enable_ints(tp);
7515
David S. Millerf47c11e2005-06-24 20:18:35 -07007516 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007517
7518 netif_start_queue(dev);
7519
7520 return 0;
7521}
7522
7523#if 0
7524/*static*/ void tg3_dump_state(struct tg3 *tp)
7525{
7526 u32 val32, val32_2, val32_3, val32_4, val32_5;
7527 u16 val16;
7528 int i;
7529
7530 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7531 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7532 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7533 val16, val32);
7534
7535 /* MAC block */
7536 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7537 tr32(MAC_MODE), tr32(MAC_STATUS));
7538 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7539 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7540 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7541 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7542 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7543 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7544
7545 /* Send data initiator control block */
7546 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7547 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7548 printk(" SNDDATAI_STATSCTRL[%08x]\n",
7549 tr32(SNDDATAI_STATSCTRL));
7550
7551 /* Send data completion control block */
7552 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7553
7554 /* Send BD ring selector block */
7555 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7556 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7557
7558 /* Send BD initiator control block */
7559 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7560 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7561
7562 /* Send BD completion control block */
7563 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7564
7565 /* Receive list placement control block */
7566 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7567 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7568 printk(" RCVLPC_STATSCTRL[%08x]\n",
7569 tr32(RCVLPC_STATSCTRL));
7570
7571 /* Receive data and receive BD initiator control block */
7572 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7573 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7574
7575 /* Receive data completion control block */
7576 printk("DEBUG: RCVDCC_MODE[%08x]\n",
7577 tr32(RCVDCC_MODE));
7578
7579 /* Receive BD initiator control block */
7580 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7581 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7582
7583 /* Receive BD completion control block */
7584 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7585 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7586
7587 /* Receive list selector control block */
7588 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7589 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7590
7591 /* Mbuf cluster free block */
7592 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7593 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7594
7595 /* Host coalescing control block */
7596 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7597 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7598 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7599 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7600 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7601 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7602 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7603 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7604 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7605 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7606 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7607 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7608
7609 /* Memory arbiter control block */
7610 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7611 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7612
7613 /* Buffer manager control block */
7614 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7615 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7616 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7617 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7618 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7619 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7620 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7621 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7622
7623 /* Read DMA control block */
7624 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7625 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7626
7627 /* Write DMA control block */
7628 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7629 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7630
7631 /* DMA completion block */
7632 printk("DEBUG: DMAC_MODE[%08x]\n",
7633 tr32(DMAC_MODE));
7634
7635 /* GRC block */
7636 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7637 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7638 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7639 tr32(GRC_LOCAL_CTRL));
7640
7641 /* TG3_BDINFOs */
7642 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7643 tr32(RCVDBDI_JUMBO_BD + 0x0),
7644 tr32(RCVDBDI_JUMBO_BD + 0x4),
7645 tr32(RCVDBDI_JUMBO_BD + 0x8),
7646 tr32(RCVDBDI_JUMBO_BD + 0xc));
7647 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7648 tr32(RCVDBDI_STD_BD + 0x0),
7649 tr32(RCVDBDI_STD_BD + 0x4),
7650 tr32(RCVDBDI_STD_BD + 0x8),
7651 tr32(RCVDBDI_STD_BD + 0xc));
7652 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7653 tr32(RCVDBDI_MINI_BD + 0x0),
7654 tr32(RCVDBDI_MINI_BD + 0x4),
7655 tr32(RCVDBDI_MINI_BD + 0x8),
7656 tr32(RCVDBDI_MINI_BD + 0xc));
7657
7658 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7659 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7660 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7661 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7662 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7663 val32, val32_2, val32_3, val32_4);
7664
7665 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7666 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7667 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7668 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7669 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7670 val32, val32_2, val32_3, val32_4);
7671
7672 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7673 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7674 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7675 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7676 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7677 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7678 val32, val32_2, val32_3, val32_4, val32_5);
7679
7680 /* SW status block */
7681 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7682 tp->hw_status->status,
7683 tp->hw_status->status_tag,
7684 tp->hw_status->rx_jumbo_consumer,
7685 tp->hw_status->rx_consumer,
7686 tp->hw_status->rx_mini_consumer,
7687 tp->hw_status->idx[0].rx_producer,
7688 tp->hw_status->idx[0].tx_consumer);
7689
7690 /* SW statistics block */
7691 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7692 ((u32 *)tp->hw_stats)[0],
7693 ((u32 *)tp->hw_stats)[1],
7694 ((u32 *)tp->hw_stats)[2],
7695 ((u32 *)tp->hw_stats)[3]);
7696
7697 /* Mailboxes */
7698 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
Michael Chan09ee9292005-08-09 20:17:00 -07007699 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7700 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7701 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7702 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007703
7704 /* NIC side send descriptors. */
7705 for (i = 0; i < 6; i++) {
7706 unsigned long txd;
7707
7708 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7709 + (i * sizeof(struct tg3_tx_buffer_desc));
7710 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7711 i,
7712 readl(txd + 0x0), readl(txd + 0x4),
7713 readl(txd + 0x8), readl(txd + 0xc));
7714 }
7715
7716 /* NIC side RX descriptors. */
7717 for (i = 0; i < 6; i++) {
7718 unsigned long rxd;
7719
7720 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7721 + (i * sizeof(struct tg3_rx_buffer_desc));
7722 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7723 i,
7724 readl(rxd + 0x0), readl(rxd + 0x4),
7725 readl(rxd + 0x8), readl(rxd + 0xc));
7726 rxd += (4 * sizeof(u32));
7727 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7728 i,
7729 readl(rxd + 0x0), readl(rxd + 0x4),
7730 readl(rxd + 0x8), readl(rxd + 0xc));
7731 }
7732
7733 for (i = 0; i < 6; i++) {
7734 unsigned long rxd;
7735
7736 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7737 + (i * sizeof(struct tg3_rx_buffer_desc));
7738 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7739 i,
7740 readl(rxd + 0x0), readl(rxd + 0x4),
7741 readl(rxd + 0x8), readl(rxd + 0xc));
7742 rxd += (4 * sizeof(u32));
7743 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7744 i,
7745 readl(rxd + 0x0), readl(rxd + 0x4),
7746 readl(rxd + 0x8), readl(rxd + 0xc));
7747 }
7748}
7749#endif
7750
7751static struct net_device_stats *tg3_get_stats(struct net_device *);
7752static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7753
7754static int tg3_close(struct net_device *dev)
7755{
7756 struct tg3 *tp = netdev_priv(dev);
7757
Stephen Hemmingerbea33482007-10-03 16:41:36 -07007758 napi_disable(&tp->napi);
Oleg Nesterov28e53bd2007-05-09 02:34:22 -07007759 cancel_work_sync(&tp->reset_task);
Michael Chan7faa0062006-02-02 17:29:28 -08007760
Linus Torvalds1da177e2005-04-16 15:20:36 -07007761 netif_stop_queue(dev);
7762
7763 del_timer_sync(&tp->timer);
7764
David S. Millerf47c11e2005-06-24 20:18:35 -07007765 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007766#if 0
7767 tg3_dump_state(tp);
7768#endif
7769
7770 tg3_disable_ints(tp);
7771
Michael Chan944d9802005-05-29 14:57:48 -07007772 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007773 tg3_free_rings(tp);
Michael Chan5cf64b8a2007-05-05 12:11:21 -07007774 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007775
David S. Millerf47c11e2005-06-24 20:18:35 -07007776 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007777
Michael Chan88b06bc22005-04-21 17:13:25 -07007778 free_irq(tp->pdev->irq, dev);
7779 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7780 pci_disable_msi(tp->pdev);
7781 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7782 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007783
7784 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7785 sizeof(tp->net_stats_prev));
7786 memcpy(&tp->estats_prev, tg3_get_estats(tp),
7787 sizeof(tp->estats_prev));
7788
7789 tg3_free_consistent(tp);
7790
Michael Chanbc1c7562006-03-20 17:48:03 -08007791 tg3_set_power_state(tp, PCI_D3hot);
7792
7793 netif_carrier_off(tp->dev);
7794
Linus Torvalds1da177e2005-04-16 15:20:36 -07007795 return 0;
7796}
7797
7798static inline unsigned long get_stat64(tg3_stat64_t *val)
7799{
7800 unsigned long ret;
7801
7802#if (BITS_PER_LONG == 32)
7803 ret = val->low;
7804#else
7805 ret = ((u64)val->high << 32) | ((u64)val->low);
7806#endif
7807 return ret;
7808}
7809
7810static unsigned long calc_crc_errors(struct tg3 *tp)
7811{
7812 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7813
7814 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7815 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7816 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007817 u32 val;
7818
David S. Millerf47c11e2005-06-24 20:18:35 -07007819 spin_lock_bh(&tp->lock);
Michael Chan569a5df2007-02-13 12:18:15 -08007820 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7821 tg3_writephy(tp, MII_TG3_TEST1,
7822 val | MII_TG3_TEST1_CRC_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007823 tg3_readphy(tp, 0x14, &val);
7824 } else
7825 val = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07007826 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007827
7828 tp->phy_crc_errors += val;
7829
7830 return tp->phy_crc_errors;
7831 }
7832
7833 return get_stat64(&hw_stats->rx_fcs_errors);
7834}
7835
7836#define ESTAT_ADD(member) \
7837 estats->member = old_estats->member + \
7838 get_stat64(&hw_stats->member)
7839
7840static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7841{
7842 struct tg3_ethtool_stats *estats = &tp->estats;
7843 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7844 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7845
7846 if (!hw_stats)
7847 return old_estats;
7848
7849 ESTAT_ADD(rx_octets);
7850 ESTAT_ADD(rx_fragments);
7851 ESTAT_ADD(rx_ucast_packets);
7852 ESTAT_ADD(rx_mcast_packets);
7853 ESTAT_ADD(rx_bcast_packets);
7854 ESTAT_ADD(rx_fcs_errors);
7855 ESTAT_ADD(rx_align_errors);
7856 ESTAT_ADD(rx_xon_pause_rcvd);
7857 ESTAT_ADD(rx_xoff_pause_rcvd);
7858 ESTAT_ADD(rx_mac_ctrl_rcvd);
7859 ESTAT_ADD(rx_xoff_entered);
7860 ESTAT_ADD(rx_frame_too_long_errors);
7861 ESTAT_ADD(rx_jabbers);
7862 ESTAT_ADD(rx_undersize_packets);
7863 ESTAT_ADD(rx_in_length_errors);
7864 ESTAT_ADD(rx_out_length_errors);
7865 ESTAT_ADD(rx_64_or_less_octet_packets);
7866 ESTAT_ADD(rx_65_to_127_octet_packets);
7867 ESTAT_ADD(rx_128_to_255_octet_packets);
7868 ESTAT_ADD(rx_256_to_511_octet_packets);
7869 ESTAT_ADD(rx_512_to_1023_octet_packets);
7870 ESTAT_ADD(rx_1024_to_1522_octet_packets);
7871 ESTAT_ADD(rx_1523_to_2047_octet_packets);
7872 ESTAT_ADD(rx_2048_to_4095_octet_packets);
7873 ESTAT_ADD(rx_4096_to_8191_octet_packets);
7874 ESTAT_ADD(rx_8192_to_9022_octet_packets);
7875
7876 ESTAT_ADD(tx_octets);
7877 ESTAT_ADD(tx_collisions);
7878 ESTAT_ADD(tx_xon_sent);
7879 ESTAT_ADD(tx_xoff_sent);
7880 ESTAT_ADD(tx_flow_control);
7881 ESTAT_ADD(tx_mac_errors);
7882 ESTAT_ADD(tx_single_collisions);
7883 ESTAT_ADD(tx_mult_collisions);
7884 ESTAT_ADD(tx_deferred);
7885 ESTAT_ADD(tx_excessive_collisions);
7886 ESTAT_ADD(tx_late_collisions);
7887 ESTAT_ADD(tx_collide_2times);
7888 ESTAT_ADD(tx_collide_3times);
7889 ESTAT_ADD(tx_collide_4times);
7890 ESTAT_ADD(tx_collide_5times);
7891 ESTAT_ADD(tx_collide_6times);
7892 ESTAT_ADD(tx_collide_7times);
7893 ESTAT_ADD(tx_collide_8times);
7894 ESTAT_ADD(tx_collide_9times);
7895 ESTAT_ADD(tx_collide_10times);
7896 ESTAT_ADD(tx_collide_11times);
7897 ESTAT_ADD(tx_collide_12times);
7898 ESTAT_ADD(tx_collide_13times);
7899 ESTAT_ADD(tx_collide_14times);
7900 ESTAT_ADD(tx_collide_15times);
7901 ESTAT_ADD(tx_ucast_packets);
7902 ESTAT_ADD(tx_mcast_packets);
7903 ESTAT_ADD(tx_bcast_packets);
7904 ESTAT_ADD(tx_carrier_sense_errors);
7905 ESTAT_ADD(tx_discards);
7906 ESTAT_ADD(tx_errors);
7907
7908 ESTAT_ADD(dma_writeq_full);
7909 ESTAT_ADD(dma_write_prioq_full);
7910 ESTAT_ADD(rxbds_empty);
7911 ESTAT_ADD(rx_discards);
7912 ESTAT_ADD(rx_errors);
7913 ESTAT_ADD(rx_threshold_hit);
7914
7915 ESTAT_ADD(dma_readq_full);
7916 ESTAT_ADD(dma_read_prioq_full);
7917 ESTAT_ADD(tx_comp_queue_full);
7918
7919 ESTAT_ADD(ring_set_send_prod_index);
7920 ESTAT_ADD(ring_status_update);
7921 ESTAT_ADD(nic_irqs);
7922 ESTAT_ADD(nic_avoided_irqs);
7923 ESTAT_ADD(nic_tx_threshold_hit);
7924
7925 return estats;
7926}
7927
7928static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7929{
7930 struct tg3 *tp = netdev_priv(dev);
7931 struct net_device_stats *stats = &tp->net_stats;
7932 struct net_device_stats *old_stats = &tp->net_stats_prev;
7933 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7934
7935 if (!hw_stats)
7936 return old_stats;
7937
7938 stats->rx_packets = old_stats->rx_packets +
7939 get_stat64(&hw_stats->rx_ucast_packets) +
7940 get_stat64(&hw_stats->rx_mcast_packets) +
7941 get_stat64(&hw_stats->rx_bcast_packets);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04007942
Linus Torvalds1da177e2005-04-16 15:20:36 -07007943 stats->tx_packets = old_stats->tx_packets +
7944 get_stat64(&hw_stats->tx_ucast_packets) +
7945 get_stat64(&hw_stats->tx_mcast_packets) +
7946 get_stat64(&hw_stats->tx_bcast_packets);
7947
7948 stats->rx_bytes = old_stats->rx_bytes +
7949 get_stat64(&hw_stats->rx_octets);
7950 stats->tx_bytes = old_stats->tx_bytes +
7951 get_stat64(&hw_stats->tx_octets);
7952
7953 stats->rx_errors = old_stats->rx_errors +
John W. Linville4f63b872005-09-12 14:43:18 -07007954 get_stat64(&hw_stats->rx_errors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007955 stats->tx_errors = old_stats->tx_errors +
7956 get_stat64(&hw_stats->tx_errors) +
7957 get_stat64(&hw_stats->tx_mac_errors) +
7958 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7959 get_stat64(&hw_stats->tx_discards);
7960
7961 stats->multicast = old_stats->multicast +
7962 get_stat64(&hw_stats->rx_mcast_packets);
7963 stats->collisions = old_stats->collisions +
7964 get_stat64(&hw_stats->tx_collisions);
7965
7966 stats->rx_length_errors = old_stats->rx_length_errors +
7967 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7968 get_stat64(&hw_stats->rx_undersize_packets);
7969
7970 stats->rx_over_errors = old_stats->rx_over_errors +
7971 get_stat64(&hw_stats->rxbds_empty);
7972 stats->rx_frame_errors = old_stats->rx_frame_errors +
7973 get_stat64(&hw_stats->rx_align_errors);
7974 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7975 get_stat64(&hw_stats->tx_discards);
7976 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7977 get_stat64(&hw_stats->tx_carrier_sense_errors);
7978
7979 stats->rx_crc_errors = old_stats->rx_crc_errors +
7980 calc_crc_errors(tp);
7981
John W. Linville4f63b872005-09-12 14:43:18 -07007982 stats->rx_missed_errors = old_stats->rx_missed_errors +
7983 get_stat64(&hw_stats->rx_discards);
7984
Linus Torvalds1da177e2005-04-16 15:20:36 -07007985 return stats;
7986}
7987
7988static inline u32 calc_crc(unsigned char *buf, int len)
7989{
7990 u32 reg;
7991 u32 tmp;
7992 int j, k;
7993
7994 reg = 0xffffffff;
7995
7996 for (j = 0; j < len; j++) {
7997 reg ^= buf[j];
7998
7999 for (k = 0; k < 8; k++) {
8000 tmp = reg & 0x01;
8001
8002 reg >>= 1;
8003
8004 if (tmp) {
8005 reg ^= 0xedb88320;
8006 }
8007 }
8008 }
8009
8010 return ~reg;
8011}
8012
8013static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8014{
8015 /* accept or reject all multicast frames */
8016 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8017 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8018 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8019 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8020}
8021
8022static void __tg3_set_rx_mode(struct net_device *dev)
8023{
8024 struct tg3 *tp = netdev_priv(dev);
8025 u32 rx_mode;
8026
8027 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8028 RX_MODE_KEEP_VLAN_TAG);
8029
8030 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8031 * flag clear.
8032 */
8033#if TG3_VLAN_TAG_USED
8034 if (!tp->vlgrp &&
8035 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8036 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8037#else
8038 /* By definition, VLAN is disabled always in this
8039 * case.
8040 */
8041 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8042 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8043#endif
8044
8045 if (dev->flags & IFF_PROMISC) {
8046 /* Promiscuous mode. */
8047 rx_mode |= RX_MODE_PROMISC;
8048 } else if (dev->flags & IFF_ALLMULTI) {
8049 /* Accept all multicast. */
8050 tg3_set_multi (tp, 1);
8051 } else if (dev->mc_count < 1) {
8052 /* Reject all multicast. */
8053 tg3_set_multi (tp, 0);
8054 } else {
8055 /* Accept one or more multicast(s). */
8056 struct dev_mc_list *mclist;
8057 unsigned int i;
8058 u32 mc_filter[4] = { 0, };
8059 u32 regidx;
8060 u32 bit;
8061 u32 crc;
8062
8063 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8064 i++, mclist = mclist->next) {
8065
8066 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8067 bit = ~crc & 0x7f;
8068 regidx = (bit & 0x60) >> 5;
8069 bit &= 0x1f;
8070 mc_filter[regidx] |= (1 << bit);
8071 }
8072
8073 tw32(MAC_HASH_REG_0, mc_filter[0]);
8074 tw32(MAC_HASH_REG_1, mc_filter[1]);
8075 tw32(MAC_HASH_REG_2, mc_filter[2]);
8076 tw32(MAC_HASH_REG_3, mc_filter[3]);
8077 }
8078
8079 if (rx_mode != tp->rx_mode) {
8080 tp->rx_mode = rx_mode;
8081 tw32_f(MAC_RX_MODE, rx_mode);
8082 udelay(10);
8083 }
8084}
8085
8086static void tg3_set_rx_mode(struct net_device *dev)
8087{
8088 struct tg3 *tp = netdev_priv(dev);
8089
Michael Chane75f7c92006-03-20 21:33:26 -08008090 if (!netif_running(dev))
8091 return;
8092
David S. Millerf47c11e2005-06-24 20:18:35 -07008093 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008094 __tg3_set_rx_mode(dev);
David S. Millerf47c11e2005-06-24 20:18:35 -07008095 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008096}
8097
8098#define TG3_REGDUMP_LEN (32 * 1024)
8099
8100static int tg3_get_regs_len(struct net_device *dev)
8101{
8102 return TG3_REGDUMP_LEN;
8103}
8104
8105static void tg3_get_regs(struct net_device *dev,
8106 struct ethtool_regs *regs, void *_p)
8107{
8108 u32 *p = _p;
8109 struct tg3 *tp = netdev_priv(dev);
8110 u8 *orig_p = _p;
8111 int i;
8112
8113 regs->version = 0;
8114
8115 memset(p, 0, TG3_REGDUMP_LEN);
8116
Michael Chanbc1c7562006-03-20 17:48:03 -08008117 if (tp->link_config.phy_is_low_power)
8118 return;
8119
David S. Millerf47c11e2005-06-24 20:18:35 -07008120 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008121
8122#define __GET_REG32(reg) (*(p)++ = tr32(reg))
8123#define GET_REG32_LOOP(base,len) \
8124do { p = (u32 *)(orig_p + (base)); \
8125 for (i = 0; i < len; i += 4) \
8126 __GET_REG32((base) + i); \
8127} while (0)
8128#define GET_REG32_1(reg) \
8129do { p = (u32 *)(orig_p + (reg)); \
8130 __GET_REG32((reg)); \
8131} while (0)
8132
8133 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8134 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8135 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8136 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8137 GET_REG32_1(SNDDATAC_MODE);
8138 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8139 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8140 GET_REG32_1(SNDBDC_MODE);
8141 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8142 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8143 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8144 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8145 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8146 GET_REG32_1(RCVDCC_MODE);
8147 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8148 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8149 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8150 GET_REG32_1(MBFREE_MODE);
8151 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8152 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8153 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8154 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8155 GET_REG32_LOOP(WDMAC_MODE, 0x08);
Chris Elmquist091465d2005-12-20 13:25:19 -08008156 GET_REG32_1(RX_CPU_MODE);
8157 GET_REG32_1(RX_CPU_STATE);
8158 GET_REG32_1(RX_CPU_PGMCTR);
8159 GET_REG32_1(RX_CPU_HWBKPT);
8160 GET_REG32_1(TX_CPU_MODE);
8161 GET_REG32_1(TX_CPU_STATE);
8162 GET_REG32_1(TX_CPU_PGMCTR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008163 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8164 GET_REG32_LOOP(FTQ_RESET, 0x120);
8165 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8166 GET_REG32_1(DMAC_MODE);
8167 GET_REG32_LOOP(GRC_MODE, 0x4c);
8168 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8169 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8170
8171#undef __GET_REG32
8172#undef GET_REG32_LOOP
8173#undef GET_REG32_1
8174
David S. Millerf47c11e2005-06-24 20:18:35 -07008175 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008176}
8177
8178static int tg3_get_eeprom_len(struct net_device *dev)
8179{
8180 struct tg3 *tp = netdev_priv(dev);
8181
8182 return tp->nvram_size;
8183}
8184
8185static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
Al Virob9fc7dc2007-12-17 22:59:57 -08008186static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
Michael Chan18201802006-03-20 22:29:15 -08008187static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008188
8189static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8190{
8191 struct tg3 *tp = netdev_priv(dev);
8192 int ret;
8193 u8 *pd;
Al Virob9fc7dc2007-12-17 22:59:57 -08008194 u32 i, offset, len, b_offset, b_count;
8195 __le32 val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008196
Michael Chanbc1c7562006-03-20 17:48:03 -08008197 if (tp->link_config.phy_is_low_power)
8198 return -EAGAIN;
8199
Linus Torvalds1da177e2005-04-16 15:20:36 -07008200 offset = eeprom->offset;
8201 len = eeprom->len;
8202 eeprom->len = 0;
8203
8204 eeprom->magic = TG3_EEPROM_MAGIC;
8205
8206 if (offset & 3) {
8207 /* adjustments to start on required 4 byte boundary */
8208 b_offset = offset & 3;
8209 b_count = 4 - b_offset;
8210 if (b_count > len) {
8211 /* i.e. offset=1 len=2 */
8212 b_count = len;
8213 }
Al Virob9fc7dc2007-12-17 22:59:57 -08008214 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008215 if (ret)
8216 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008217 memcpy(data, ((char*)&val) + b_offset, b_count);
8218 len -= b_count;
8219 offset += b_count;
8220 eeprom->len += b_count;
8221 }
8222
8223 /* read bytes upto the last 4 byte boundary */
8224 pd = &data[eeprom->len];
8225 for (i = 0; i < (len - (len & 3)); i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -08008226 ret = tg3_nvram_read_le(tp, offset + i, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008227 if (ret) {
8228 eeprom->len += i;
8229 return ret;
8230 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008231 memcpy(pd + i, &val, 4);
8232 }
8233 eeprom->len += i;
8234
8235 if (len & 3) {
8236 /* read last bytes not ending on 4 byte boundary */
8237 pd = &data[eeprom->len];
8238 b_count = len & 3;
8239 b_offset = offset + len - b_count;
Al Virob9fc7dc2007-12-17 22:59:57 -08008240 ret = tg3_nvram_read_le(tp, b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008241 if (ret)
8242 return ret;
Al Virob9fc7dc2007-12-17 22:59:57 -08008243 memcpy(pd, &val, b_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008244 eeprom->len += b_count;
8245 }
8246 return 0;
8247}
8248
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008249static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008250
8251static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8252{
8253 struct tg3 *tp = netdev_priv(dev);
8254 int ret;
Al Virob9fc7dc2007-12-17 22:59:57 -08008255 u32 offset, len, b_offset, odd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008256 u8 *buf;
Al Virob9fc7dc2007-12-17 22:59:57 -08008257 __le32 start, end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008258
Michael Chanbc1c7562006-03-20 17:48:03 -08008259 if (tp->link_config.phy_is_low_power)
8260 return -EAGAIN;
8261
Linus Torvalds1da177e2005-04-16 15:20:36 -07008262 if (eeprom->magic != TG3_EEPROM_MAGIC)
8263 return -EINVAL;
8264
8265 offset = eeprom->offset;
8266 len = eeprom->len;
8267
8268 if ((b_offset = (offset & 3))) {
8269 /* adjustments to start on required 4 byte boundary */
Al Virob9fc7dc2007-12-17 22:59:57 -08008270 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008271 if (ret)
8272 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008273 len += b_offset;
8274 offset &= ~3;
Michael Chan1c8594b2005-04-21 17:12:46 -07008275 if (len < 4)
8276 len = 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008277 }
8278
8279 odd_len = 0;
Michael Chan1c8594b2005-04-21 17:12:46 -07008280 if (len & 3) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008281 /* adjustments to end on required 4 byte boundary */
8282 odd_len = 1;
8283 len = (len + 3) & ~3;
Al Virob9fc7dc2007-12-17 22:59:57 -08008284 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008285 if (ret)
8286 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008287 }
8288
8289 buf = data;
8290 if (b_offset || odd_len) {
8291 buf = kmalloc(len, GFP_KERNEL);
Andy Gospodarekab0049b2007-09-06 20:42:14 +01008292 if (!buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008293 return -ENOMEM;
8294 if (b_offset)
8295 memcpy(buf, &start, 4);
8296 if (odd_len)
8297 memcpy(buf+len-4, &end, 4);
8298 memcpy(buf + b_offset, data, eeprom->len);
8299 }
8300
8301 ret = tg3_nvram_write_block(tp, offset, len, buf);
8302
8303 if (buf != data)
8304 kfree(buf);
8305
8306 return ret;
8307}
8308
8309static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8310{
8311 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008312
Linus Torvalds1da177e2005-04-16 15:20:36 -07008313 cmd->supported = (SUPPORTED_Autoneg);
8314
8315 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8316 cmd->supported |= (SUPPORTED_1000baseT_Half |
8317 SUPPORTED_1000baseT_Full);
8318
Karsten Keilef348142006-05-12 12:49:08 -07008319 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008320 cmd->supported |= (SUPPORTED_100baseT_Half |
8321 SUPPORTED_100baseT_Full |
8322 SUPPORTED_10baseT_Half |
8323 SUPPORTED_10baseT_Full |
Matt Carlson3bebab52007-11-12 21:22:40 -08008324 SUPPORTED_TP);
Karsten Keilef348142006-05-12 12:49:08 -07008325 cmd->port = PORT_TP;
8326 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008327 cmd->supported |= SUPPORTED_FIBRE;
Karsten Keilef348142006-05-12 12:49:08 -07008328 cmd->port = PORT_FIBRE;
8329 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008330
Linus Torvalds1da177e2005-04-16 15:20:36 -07008331 cmd->advertising = tp->link_config.advertising;
8332 if (netif_running(dev)) {
8333 cmd->speed = tp->link_config.active_speed;
8334 cmd->duplex = tp->link_config.active_duplex;
8335 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008336 cmd->phy_address = PHY_ADDR;
8337 cmd->transceiver = 0;
8338 cmd->autoneg = tp->link_config.autoneg;
8339 cmd->maxtxpkt = 0;
8340 cmd->maxrxpkt = 0;
8341 return 0;
8342}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008343
Linus Torvalds1da177e2005-04-16 15:20:36 -07008344static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8345{
8346 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008347
8348 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008349 /* These are the only valid advertisement bits allowed. */
8350 if (cmd->autoneg == AUTONEG_ENABLE &&
8351 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8352 ADVERTISED_1000baseT_Full |
8353 ADVERTISED_Autoneg |
8354 ADVERTISED_FIBRE)))
8355 return -EINVAL;
Michael Chan37ff2382005-10-26 15:49:51 -07008356 /* Fiber can only do SPEED_1000. */
8357 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8358 (cmd->speed != SPEED_1000))
8359 return -EINVAL;
8360 /* Copper cannot force SPEED_1000. */
8361 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8362 (cmd->speed == SPEED_1000))
8363 return -EINVAL;
8364 else if ((cmd->speed == SPEED_1000) &&
8365 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8366 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008367
David S. Millerf47c11e2005-06-24 20:18:35 -07008368 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008369
8370 tp->link_config.autoneg = cmd->autoneg;
8371 if (cmd->autoneg == AUTONEG_ENABLE) {
Andy Gospodarek405d8e52007-10-08 01:08:47 -07008372 tp->link_config.advertising = (cmd->advertising |
8373 ADVERTISED_Autoneg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008374 tp->link_config.speed = SPEED_INVALID;
8375 tp->link_config.duplex = DUPLEX_INVALID;
8376 } else {
8377 tp->link_config.advertising = 0;
8378 tp->link_config.speed = cmd->speed;
8379 tp->link_config.duplex = cmd->duplex;
8380 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008381
Michael Chan24fcad62006-12-17 17:06:46 -08008382 tp->link_config.orig_speed = tp->link_config.speed;
8383 tp->link_config.orig_duplex = tp->link_config.duplex;
8384 tp->link_config.orig_autoneg = tp->link_config.autoneg;
8385
Linus Torvalds1da177e2005-04-16 15:20:36 -07008386 if (netif_running(dev))
8387 tg3_setup_phy(tp, 1);
8388
David S. Millerf47c11e2005-06-24 20:18:35 -07008389 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008390
Linus Torvalds1da177e2005-04-16 15:20:36 -07008391 return 0;
8392}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008393
Linus Torvalds1da177e2005-04-16 15:20:36 -07008394static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8395{
8396 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008397
Linus Torvalds1da177e2005-04-16 15:20:36 -07008398 strcpy(info->driver, DRV_MODULE_NAME);
8399 strcpy(info->version, DRV_MODULE_VERSION);
Michael Chanc4e65752006-03-20 22:29:32 -08008400 strcpy(info->fw_version, tp->fw_ver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008401 strcpy(info->bus_info, pci_name(tp->pdev));
8402}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008403
Linus Torvalds1da177e2005-04-16 15:20:36 -07008404static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8405{
8406 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008407
Gary Zambranoa85feb82007-05-05 11:52:19 -07008408 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8409 wol->supported = WAKE_MAGIC;
8410 else
8411 wol->supported = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008412 wol->wolopts = 0;
8413 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8414 wol->wolopts = WAKE_MAGIC;
8415 memset(&wol->sopass, 0, sizeof(wol->sopass));
8416}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008417
Linus Torvalds1da177e2005-04-16 15:20:36 -07008418static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8419{
8420 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008421
Linus Torvalds1da177e2005-04-16 15:20:36 -07008422 if (wol->wolopts & ~WAKE_MAGIC)
8423 return -EINVAL;
8424 if ((wol->wolopts & WAKE_MAGIC) &&
Gary Zambranoa85feb82007-05-05 11:52:19 -07008425 !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008426 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008427
David S. Millerf47c11e2005-06-24 20:18:35 -07008428 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008429 if (wol->wolopts & WAKE_MAGIC)
8430 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8431 else
8432 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
David S. Millerf47c11e2005-06-24 20:18:35 -07008433 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008434
Linus Torvalds1da177e2005-04-16 15:20:36 -07008435 return 0;
8436}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008437
Linus Torvalds1da177e2005-04-16 15:20:36 -07008438static u32 tg3_get_msglevel(struct net_device *dev)
8439{
8440 struct tg3 *tp = netdev_priv(dev);
8441 return tp->msg_enable;
8442}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008443
Linus Torvalds1da177e2005-04-16 15:20:36 -07008444static void tg3_set_msglevel(struct net_device *dev, u32 value)
8445{
8446 struct tg3 *tp = netdev_priv(dev);
8447 tp->msg_enable = value;
8448}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008449
Linus Torvalds1da177e2005-04-16 15:20:36 -07008450static int tg3_set_tso(struct net_device *dev, u32 value)
8451{
8452 struct tg3 *tp = netdev_priv(dev);
8453
8454 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8455 if (value)
8456 return -EINVAL;
8457 return 0;
8458 }
Michael Chanb5d37722006-09-27 16:06:21 -07008459 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8460 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
Matt Carlson9936bcf2007-10-10 18:03:07 -07008461 if (value) {
Michael Chanb0026622006-07-03 19:42:14 -07008462 dev->features |= NETIF_F_TSO6;
Matt Carlson9936bcf2007-10-10 18:03:07 -07008463 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8464 dev->features |= NETIF_F_TSO_ECN;
8465 } else
8466 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
Michael Chanb0026622006-07-03 19:42:14 -07008467 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008468 return ethtool_op_set_tso(dev, value);
8469}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008470
Linus Torvalds1da177e2005-04-16 15:20:36 -07008471static int tg3_nway_reset(struct net_device *dev)
8472{
8473 struct tg3 *tp = netdev_priv(dev);
8474 u32 bmcr;
8475 int r;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008476
Linus Torvalds1da177e2005-04-16 15:20:36 -07008477 if (!netif_running(dev))
8478 return -EAGAIN;
8479
Michael Chanc94e3942005-09-27 12:12:42 -07008480 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8481 return -EINVAL;
8482
David S. Millerf47c11e2005-06-24 20:18:35 -07008483 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008484 r = -EINVAL;
8485 tg3_readphy(tp, MII_BMCR, &bmcr);
8486 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
Michael Chanc94e3942005-09-27 12:12:42 -07008487 ((bmcr & BMCR_ANENABLE) ||
8488 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8489 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8490 BMCR_ANENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008491 r = 0;
8492 }
David S. Millerf47c11e2005-06-24 20:18:35 -07008493 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008494
Linus Torvalds1da177e2005-04-16 15:20:36 -07008495 return r;
8496}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008497
Linus Torvalds1da177e2005-04-16 15:20:36 -07008498static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8499{
8500 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008501
Linus Torvalds1da177e2005-04-16 15:20:36 -07008502 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8503 ering->rx_mini_max_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08008504 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8505 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8506 else
8507 ering->rx_jumbo_max_pending = 0;
8508
8509 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008510
8511 ering->rx_pending = tp->rx_pending;
8512 ering->rx_mini_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08008513 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8514 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8515 else
8516 ering->rx_jumbo_pending = 0;
8517
Linus Torvalds1da177e2005-04-16 15:20:36 -07008518 ering->tx_pending = tp->tx_pending;
8519}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008520
Linus Torvalds1da177e2005-04-16 15:20:36 -07008521static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8522{
8523 struct tg3 *tp = netdev_priv(dev);
Michael Chanb9ec6c12006-07-25 16:37:27 -07008524 int irq_sync = 0, err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008525
Linus Torvalds1da177e2005-04-16 15:20:36 -07008526 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8527 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
Michael Chanbc3a9252006-10-18 20:55:18 -07008528 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8529 (ering->tx_pending <= MAX_SKB_FRAGS) ||
Michael Chan7f62ad52007-02-20 23:25:40 -08008530 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
Michael Chanbc3a9252006-10-18 20:55:18 -07008531 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008532 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008533
Michael Chanbbe832c2005-06-24 20:20:04 -07008534 if (netif_running(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008535 tg3_netif_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07008536 irq_sync = 1;
8537 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008538
Michael Chanbbe832c2005-06-24 20:20:04 -07008539 tg3_full_lock(tp, irq_sync);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008540
Linus Torvalds1da177e2005-04-16 15:20:36 -07008541 tp->rx_pending = ering->rx_pending;
8542
8543 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8544 tp->rx_pending > 63)
8545 tp->rx_pending = 63;
8546 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8547 tp->tx_pending = ering->tx_pending;
8548
8549 if (netif_running(dev)) {
Michael Chan944d9802005-05-29 14:57:48 -07008550 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chanb9ec6c12006-07-25 16:37:27 -07008551 err = tg3_restart_hw(tp, 1);
8552 if (!err)
8553 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008554 }
8555
David S. Millerf47c11e2005-06-24 20:18:35 -07008556 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008557
Michael Chanb9ec6c12006-07-25 16:37:27 -07008558 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008559}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008560
Linus Torvalds1da177e2005-04-16 15:20:36 -07008561static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8562{
8563 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008564
Linus Torvalds1da177e2005-04-16 15:20:36 -07008565 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
Matt Carlson8d018622007-12-20 20:05:44 -08008566
8567 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
8568 epause->rx_pause = 1;
8569 else
8570 epause->rx_pause = 0;
8571
8572 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
8573 epause->tx_pause = 1;
8574 else
8575 epause->tx_pause = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008576}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008577
Linus Torvalds1da177e2005-04-16 15:20:36 -07008578static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8579{
8580 struct tg3 *tp = netdev_priv(dev);
Michael Chanb9ec6c12006-07-25 16:37:27 -07008581 int irq_sync = 0, err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008582
Michael Chanbbe832c2005-06-24 20:20:04 -07008583 if (netif_running(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008584 tg3_netif_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07008585 irq_sync = 1;
8586 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008587
Michael Chanbbe832c2005-06-24 20:20:04 -07008588 tg3_full_lock(tp, irq_sync);
David S. Millerf47c11e2005-06-24 20:18:35 -07008589
Linus Torvalds1da177e2005-04-16 15:20:36 -07008590 if (epause->autoneg)
8591 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8592 else
8593 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8594 if (epause->rx_pause)
Matt Carlson8d018622007-12-20 20:05:44 -08008595 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008596 else
Matt Carlson8d018622007-12-20 20:05:44 -08008597 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008598 if (epause->tx_pause)
Matt Carlson8d018622007-12-20 20:05:44 -08008599 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008600 else
Matt Carlson8d018622007-12-20 20:05:44 -08008601 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008602
8603 if (netif_running(dev)) {
Michael Chan944d9802005-05-29 14:57:48 -07008604 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chanb9ec6c12006-07-25 16:37:27 -07008605 err = tg3_restart_hw(tp, 1);
8606 if (!err)
8607 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008608 }
David S. Millerf47c11e2005-06-24 20:18:35 -07008609
8610 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008611
Michael Chanb9ec6c12006-07-25 16:37:27 -07008612 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008613}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008614
Linus Torvalds1da177e2005-04-16 15:20:36 -07008615static u32 tg3_get_rx_csum(struct net_device *dev)
8616{
8617 struct tg3 *tp = netdev_priv(dev);
8618 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8619}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008620
Linus Torvalds1da177e2005-04-16 15:20:36 -07008621static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8622{
8623 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008624
Linus Torvalds1da177e2005-04-16 15:20:36 -07008625 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8626 if (data != 0)
8627 return -EINVAL;
8628 return 0;
8629 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008630
David S. Millerf47c11e2005-06-24 20:18:35 -07008631 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008632 if (data)
8633 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8634 else
8635 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
David S. Millerf47c11e2005-06-24 20:18:35 -07008636 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008637
Linus Torvalds1da177e2005-04-16 15:20:36 -07008638 return 0;
8639}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008640
Linus Torvalds1da177e2005-04-16 15:20:36 -07008641static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8642{
8643 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008644
Linus Torvalds1da177e2005-04-16 15:20:36 -07008645 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8646 if (data != 0)
8647 return -EINVAL;
8648 return 0;
8649 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008650
Michael Chanaf36e6b2006-03-23 01:28:06 -08008651 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07008652 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07008653 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8654 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
Michael Chan6460d942007-07-14 19:07:52 -07008655 ethtool_op_set_tx_ipv6_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008656 else
Michael Chan9c27dbd2006-03-20 22:28:27 -08008657 ethtool_op_set_tx_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008658
8659 return 0;
8660}
8661
Jeff Garzikb9f2c042007-10-03 18:07:32 -07008662static int tg3_get_sset_count (struct net_device *dev, int sset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008663{
Jeff Garzikb9f2c042007-10-03 18:07:32 -07008664 switch (sset) {
8665 case ETH_SS_TEST:
8666 return TG3_NUM_TEST;
8667 case ETH_SS_STATS:
8668 return TG3_NUM_STATS;
8669 default:
8670 return -EOPNOTSUPP;
8671 }
Michael Chan4cafd3f2005-05-29 14:56:34 -07008672}
8673
Linus Torvalds1da177e2005-04-16 15:20:36 -07008674static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8675{
8676 switch (stringset) {
8677 case ETH_SS_STATS:
8678 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8679 break;
Michael Chan4cafd3f2005-05-29 14:56:34 -07008680 case ETH_SS_TEST:
8681 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8682 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008683 default:
8684 WARN_ON(1); /* we need a WARN() */
8685 break;
8686 }
8687}
8688
Michael Chan4009a932005-09-05 17:52:54 -07008689static int tg3_phys_id(struct net_device *dev, u32 data)
8690{
8691 struct tg3 *tp = netdev_priv(dev);
8692 int i;
8693
8694 if (!netif_running(tp->dev))
8695 return -EAGAIN;
8696
8697 if (data == 0)
8698 data = 2;
8699
8700 for (i = 0; i < (data * 2); i++) {
8701 if ((i % 2) == 0)
8702 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8703 LED_CTRL_1000MBPS_ON |
8704 LED_CTRL_100MBPS_ON |
8705 LED_CTRL_10MBPS_ON |
8706 LED_CTRL_TRAFFIC_OVERRIDE |
8707 LED_CTRL_TRAFFIC_BLINK |
8708 LED_CTRL_TRAFFIC_LED);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008709
Michael Chan4009a932005-09-05 17:52:54 -07008710 else
8711 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8712 LED_CTRL_TRAFFIC_OVERRIDE);
8713
8714 if (msleep_interruptible(500))
8715 break;
8716 }
8717 tw32(MAC_LED_CTRL, tp->led_ctrl);
8718 return 0;
8719}
8720
Linus Torvalds1da177e2005-04-16 15:20:36 -07008721static void tg3_get_ethtool_stats (struct net_device *dev,
8722 struct ethtool_stats *estats, u64 *tmp_stats)
8723{
8724 struct tg3 *tp = netdev_priv(dev);
8725 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8726}
8727
Michael Chan566f86a2005-05-29 14:56:58 -07008728#define NVRAM_TEST_SIZE 0x100
Matt Carlsona5767de2007-11-12 21:10:58 -08008729#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
8730#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
8731#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
Michael Chanb16250e2006-09-27 16:10:14 -07008732#define NVRAM_SELFBOOT_HW_SIZE 0x20
8733#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
Michael Chan566f86a2005-05-29 14:56:58 -07008734
8735static int tg3_test_nvram(struct tg3 *tp)
8736{
Al Virob9fc7dc2007-12-17 22:59:57 -08008737 u32 csum, magic;
8738 __le32 *buf;
Andy Gospodarekab0049b2007-09-06 20:42:14 +01008739 int i, j, k, err = 0, size;
Michael Chan566f86a2005-05-29 14:56:58 -07008740
Michael Chan18201802006-03-20 22:29:15 -08008741 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Michael Chan1b277772006-03-20 22:27:48 -08008742 return -EIO;
8743
Michael Chan1b277772006-03-20 22:27:48 -08008744 if (magic == TG3_EEPROM_MAGIC)
8745 size = NVRAM_TEST_SIZE;
Michael Chanb16250e2006-09-27 16:10:14 -07008746 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
Matt Carlsona5767de2007-11-12 21:10:58 -08008747 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
8748 TG3_EEPROM_SB_FORMAT_1) {
8749 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
8750 case TG3_EEPROM_SB_REVISION_0:
8751 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
8752 break;
8753 case TG3_EEPROM_SB_REVISION_2:
8754 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
8755 break;
8756 case TG3_EEPROM_SB_REVISION_3:
8757 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
8758 break;
8759 default:
8760 return 0;
8761 }
8762 } else
Michael Chan1b277772006-03-20 22:27:48 -08008763 return 0;
Michael Chanb16250e2006-09-27 16:10:14 -07008764 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8765 size = NVRAM_SELFBOOT_HW_SIZE;
8766 else
Michael Chan1b277772006-03-20 22:27:48 -08008767 return -EIO;
8768
8769 buf = kmalloc(size, GFP_KERNEL);
Michael Chan566f86a2005-05-29 14:56:58 -07008770 if (buf == NULL)
8771 return -ENOMEM;
8772
Michael Chan1b277772006-03-20 22:27:48 -08008773 err = -EIO;
8774 for (i = 0, j = 0; i < size; i += 4, j++) {
Al Virob9fc7dc2007-12-17 22:59:57 -08008775 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
Michael Chan566f86a2005-05-29 14:56:58 -07008776 break;
Michael Chan566f86a2005-05-29 14:56:58 -07008777 }
Michael Chan1b277772006-03-20 22:27:48 -08008778 if (i < size)
Michael Chan566f86a2005-05-29 14:56:58 -07008779 goto out;
8780
Michael Chan1b277772006-03-20 22:27:48 -08008781 /* Selfboot format */
Al Virob9fc7dc2007-12-17 22:59:57 -08008782 magic = swab32(le32_to_cpu(buf[0]));
8783 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -07008784 TG3_EEPROM_MAGIC_FW) {
Michael Chan1b277772006-03-20 22:27:48 -08008785 u8 *buf8 = (u8 *) buf, csum8 = 0;
8786
Al Virob9fc7dc2007-12-17 22:59:57 -08008787 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
Matt Carlsona5767de2007-11-12 21:10:58 -08008788 TG3_EEPROM_SB_REVISION_2) {
8789 /* For rev 2, the csum doesn't include the MBA. */
8790 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
8791 csum8 += buf8[i];
8792 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
8793 csum8 += buf8[i];
8794 } else {
8795 for (i = 0; i < size; i++)
8796 csum8 += buf8[i];
8797 }
Michael Chan1b277772006-03-20 22:27:48 -08008798
Adrian Bunkad96b482006-04-05 22:21:04 -07008799 if (csum8 == 0) {
8800 err = 0;
8801 goto out;
8802 }
8803
8804 err = -EIO;
8805 goto out;
Michael Chan1b277772006-03-20 22:27:48 -08008806 }
Michael Chan566f86a2005-05-29 14:56:58 -07008807
Al Virob9fc7dc2007-12-17 22:59:57 -08008808 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -07008809 TG3_EEPROM_MAGIC_HW) {
8810 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8811 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8812 u8 *buf8 = (u8 *) buf;
Michael Chanb16250e2006-09-27 16:10:14 -07008813
8814 /* Separate the parity bits and the data bytes. */
8815 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8816 if ((i == 0) || (i == 8)) {
8817 int l;
8818 u8 msk;
8819
8820 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8821 parity[k++] = buf8[i] & msk;
8822 i++;
8823 }
8824 else if (i == 16) {
8825 int l;
8826 u8 msk;
8827
8828 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8829 parity[k++] = buf8[i] & msk;
8830 i++;
8831
8832 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8833 parity[k++] = buf8[i] & msk;
8834 i++;
8835 }
8836 data[j++] = buf8[i];
8837 }
8838
8839 err = -EIO;
8840 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8841 u8 hw8 = hweight8(data[i]);
8842
8843 if ((hw8 & 0x1) && parity[i])
8844 goto out;
8845 else if (!(hw8 & 0x1) && !parity[i])
8846 goto out;
8847 }
8848 err = 0;
8849 goto out;
8850 }
8851
Michael Chan566f86a2005-05-29 14:56:58 -07008852 /* Bootstrap checksum at offset 0x10 */
8853 csum = calc_crc((unsigned char *) buf, 0x10);
Al Virob9fc7dc2007-12-17 22:59:57 -08008854 if(csum != le32_to_cpu(buf[0x10/4]))
Michael Chan566f86a2005-05-29 14:56:58 -07008855 goto out;
8856
8857 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8858 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
Al Virob9fc7dc2007-12-17 22:59:57 -08008859 if (csum != le32_to_cpu(buf[0xfc/4]))
Michael Chan566f86a2005-05-29 14:56:58 -07008860 goto out;
8861
8862 err = 0;
8863
8864out:
8865 kfree(buf);
8866 return err;
8867}
8868
Michael Chanca430072005-05-29 14:57:23 -07008869#define TG3_SERDES_TIMEOUT_SEC 2
8870#define TG3_COPPER_TIMEOUT_SEC 6
8871
8872static int tg3_test_link(struct tg3 *tp)
8873{
8874 int i, max;
8875
8876 if (!netif_running(tp->dev))
8877 return -ENODEV;
8878
Michael Chan4c987482005-09-05 17:52:38 -07008879 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Michael Chanca430072005-05-29 14:57:23 -07008880 max = TG3_SERDES_TIMEOUT_SEC;
8881 else
8882 max = TG3_COPPER_TIMEOUT_SEC;
8883
8884 for (i = 0; i < max; i++) {
8885 if (netif_carrier_ok(tp->dev))
8886 return 0;
8887
8888 if (msleep_interruptible(1000))
8889 break;
8890 }
8891
8892 return -EIO;
8893}
8894
Michael Chana71116d2005-05-29 14:58:11 -07008895/* Only test the commonly used registers */
David S. Miller30ca3e32006-03-20 23:02:36 -08008896static int tg3_test_registers(struct tg3 *tp)
Michael Chana71116d2005-05-29 14:58:11 -07008897{
Michael Chanb16250e2006-09-27 16:10:14 -07008898 int i, is_5705, is_5750;
Michael Chana71116d2005-05-29 14:58:11 -07008899 u32 offset, read_mask, write_mask, val, save_val, read_val;
8900 static struct {
8901 u16 offset;
8902 u16 flags;
8903#define TG3_FL_5705 0x1
8904#define TG3_FL_NOT_5705 0x2
8905#define TG3_FL_NOT_5788 0x4
Michael Chanb16250e2006-09-27 16:10:14 -07008906#define TG3_FL_NOT_5750 0x8
Michael Chana71116d2005-05-29 14:58:11 -07008907 u32 read_mask;
8908 u32 write_mask;
8909 } reg_tbl[] = {
8910 /* MAC Control Registers */
8911 { MAC_MODE, TG3_FL_NOT_5705,
8912 0x00000000, 0x00ef6f8c },
8913 { MAC_MODE, TG3_FL_5705,
8914 0x00000000, 0x01ef6b8c },
8915 { MAC_STATUS, TG3_FL_NOT_5705,
8916 0x03800107, 0x00000000 },
8917 { MAC_STATUS, TG3_FL_5705,
8918 0x03800100, 0x00000000 },
8919 { MAC_ADDR_0_HIGH, 0x0000,
8920 0x00000000, 0x0000ffff },
8921 { MAC_ADDR_0_LOW, 0x0000,
8922 0x00000000, 0xffffffff },
8923 { MAC_RX_MTU_SIZE, 0x0000,
8924 0x00000000, 0x0000ffff },
8925 { MAC_TX_MODE, 0x0000,
8926 0x00000000, 0x00000070 },
8927 { MAC_TX_LENGTHS, 0x0000,
8928 0x00000000, 0x00003fff },
8929 { MAC_RX_MODE, TG3_FL_NOT_5705,
8930 0x00000000, 0x000007fc },
8931 { MAC_RX_MODE, TG3_FL_5705,
8932 0x00000000, 0x000007dc },
8933 { MAC_HASH_REG_0, 0x0000,
8934 0x00000000, 0xffffffff },
8935 { MAC_HASH_REG_1, 0x0000,
8936 0x00000000, 0xffffffff },
8937 { MAC_HASH_REG_2, 0x0000,
8938 0x00000000, 0xffffffff },
8939 { MAC_HASH_REG_3, 0x0000,
8940 0x00000000, 0xffffffff },
8941
8942 /* Receive Data and Receive BD Initiator Control Registers. */
8943 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8944 0x00000000, 0xffffffff },
8945 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8946 0x00000000, 0xffffffff },
8947 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8948 0x00000000, 0x00000003 },
8949 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8950 0x00000000, 0xffffffff },
8951 { RCVDBDI_STD_BD+0, 0x0000,
8952 0x00000000, 0xffffffff },
8953 { RCVDBDI_STD_BD+4, 0x0000,
8954 0x00000000, 0xffffffff },
8955 { RCVDBDI_STD_BD+8, 0x0000,
8956 0x00000000, 0xffff0002 },
8957 { RCVDBDI_STD_BD+0xc, 0x0000,
8958 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008959
Michael Chana71116d2005-05-29 14:58:11 -07008960 /* Receive BD Initiator Control Registers. */
8961 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8962 0x00000000, 0xffffffff },
8963 { RCVBDI_STD_THRESH, TG3_FL_5705,
8964 0x00000000, 0x000003ff },
8965 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8966 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008967
Michael Chana71116d2005-05-29 14:58:11 -07008968 /* Host Coalescing Control Registers. */
8969 { HOSTCC_MODE, TG3_FL_NOT_5705,
8970 0x00000000, 0x00000004 },
8971 { HOSTCC_MODE, TG3_FL_5705,
8972 0x00000000, 0x000000f6 },
8973 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8974 0x00000000, 0xffffffff },
8975 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8976 0x00000000, 0x000003ff },
8977 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8978 0x00000000, 0xffffffff },
8979 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8980 0x00000000, 0x000003ff },
8981 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8982 0x00000000, 0xffffffff },
8983 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8984 0x00000000, 0x000000ff },
8985 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8986 0x00000000, 0xffffffff },
8987 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8988 0x00000000, 0x000000ff },
8989 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8990 0x00000000, 0xffffffff },
8991 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8992 0x00000000, 0xffffffff },
8993 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8994 0x00000000, 0xffffffff },
8995 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8996 0x00000000, 0x000000ff },
8997 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8998 0x00000000, 0xffffffff },
8999 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9000 0x00000000, 0x000000ff },
9001 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9002 0x00000000, 0xffffffff },
9003 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9004 0x00000000, 0xffffffff },
9005 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9006 0x00000000, 0xffffffff },
9007 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9008 0x00000000, 0xffffffff },
9009 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9010 0x00000000, 0xffffffff },
9011 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9012 0xffffffff, 0x00000000 },
9013 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9014 0xffffffff, 0x00000000 },
9015
9016 /* Buffer Manager Control Registers. */
Michael Chanb16250e2006-09-27 16:10:14 -07009017 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -07009018 0x00000000, 0x007fff80 },
Michael Chanb16250e2006-09-27 16:10:14 -07009019 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -07009020 0x00000000, 0x007fffff },
9021 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9022 0x00000000, 0x0000003f },
9023 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9024 0x00000000, 0x000001ff },
9025 { BUFMGR_MB_HIGH_WATER, 0x0000,
9026 0x00000000, 0x000001ff },
9027 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9028 0xffffffff, 0x00000000 },
9029 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9030 0xffffffff, 0x00000000 },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009031
Michael Chana71116d2005-05-29 14:58:11 -07009032 /* Mailbox Registers */
9033 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9034 0x00000000, 0x000001ff },
9035 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9036 0x00000000, 0x000001ff },
9037 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9038 0x00000000, 0x000007ff },
9039 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9040 0x00000000, 0x000001ff },
9041
9042 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9043 };
9044
Michael Chanb16250e2006-09-27 16:10:14 -07009045 is_5705 = is_5750 = 0;
9046 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
Michael Chana71116d2005-05-29 14:58:11 -07009047 is_5705 = 1;
Michael Chanb16250e2006-09-27 16:10:14 -07009048 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9049 is_5750 = 1;
9050 }
Michael Chana71116d2005-05-29 14:58:11 -07009051
9052 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9053 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9054 continue;
9055
9056 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9057 continue;
9058
9059 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9060 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9061 continue;
9062
Michael Chanb16250e2006-09-27 16:10:14 -07009063 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9064 continue;
9065
Michael Chana71116d2005-05-29 14:58:11 -07009066 offset = (u32) reg_tbl[i].offset;
9067 read_mask = reg_tbl[i].read_mask;
9068 write_mask = reg_tbl[i].write_mask;
9069
9070 /* Save the original register content */
9071 save_val = tr32(offset);
9072
9073 /* Determine the read-only value. */
9074 read_val = save_val & read_mask;
9075
9076 /* Write zero to the register, then make sure the read-only bits
9077 * are not changed and the read/write bits are all zeros.
9078 */
9079 tw32(offset, 0);
9080
9081 val = tr32(offset);
9082
9083 /* Test the read-only and read/write bits. */
9084 if (((val & read_mask) != read_val) || (val & write_mask))
9085 goto out;
9086
9087 /* Write ones to all the bits defined by RdMask and WrMask, then
9088 * make sure the read-only bits are not changed and the
9089 * read/write bits are all ones.
9090 */
9091 tw32(offset, read_mask | write_mask);
9092
9093 val = tr32(offset);
9094
9095 /* Test the read-only bits. */
9096 if ((val & read_mask) != read_val)
9097 goto out;
9098
9099 /* Test the read/write bits. */
9100 if ((val & write_mask) != write_mask)
9101 goto out;
9102
9103 tw32(offset, save_val);
9104 }
9105
9106 return 0;
9107
9108out:
Michael Chan9f88f292006-12-07 00:22:54 -08009109 if (netif_msg_hw(tp))
9110 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9111 offset);
Michael Chana71116d2005-05-29 14:58:11 -07009112 tw32(offset, save_val);
9113 return -EIO;
9114}
9115
Michael Chan7942e1d2005-05-29 14:58:36 -07009116static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9117{
Arjan van de Venf71e1302006-03-03 21:33:57 -05009118 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
Michael Chan7942e1d2005-05-29 14:58:36 -07009119 int i;
9120 u32 j;
9121
Alejandro Martinez Ruize9edda62007-10-15 03:37:43 +02009122 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
Michael Chan7942e1d2005-05-29 14:58:36 -07009123 for (j = 0; j < len; j += 4) {
9124 u32 val;
9125
9126 tg3_write_mem(tp, offset + j, test_pattern[i]);
9127 tg3_read_mem(tp, offset + j, &val);
9128 if (val != test_pattern[i])
9129 return -EIO;
9130 }
9131 }
9132 return 0;
9133}
9134
9135static int tg3_test_memory(struct tg3 *tp)
9136{
9137 static struct mem_entry {
9138 u32 offset;
9139 u32 len;
9140 } mem_tbl_570x[] = {
Michael Chan38690192005-12-19 16:27:28 -08009141 { 0x00000000, 0x00b50},
Michael Chan7942e1d2005-05-29 14:58:36 -07009142 { 0x00002000, 0x1c000},
9143 { 0xffffffff, 0x00000}
9144 }, mem_tbl_5705[] = {
9145 { 0x00000100, 0x0000c},
9146 { 0x00000200, 0x00008},
Michael Chan7942e1d2005-05-29 14:58:36 -07009147 { 0x00004000, 0x00800},
9148 { 0x00006000, 0x01000},
9149 { 0x00008000, 0x02000},
9150 { 0x00010000, 0x0e000},
9151 { 0xffffffff, 0x00000}
Michael Chan79f4d132006-03-20 22:28:57 -08009152 }, mem_tbl_5755[] = {
9153 { 0x00000200, 0x00008},
9154 { 0x00004000, 0x00800},
9155 { 0x00006000, 0x00800},
9156 { 0x00008000, 0x02000},
9157 { 0x00010000, 0x0c000},
9158 { 0xffffffff, 0x00000}
Michael Chanb16250e2006-09-27 16:10:14 -07009159 }, mem_tbl_5906[] = {
9160 { 0x00000200, 0x00008},
9161 { 0x00004000, 0x00400},
9162 { 0x00006000, 0x00400},
9163 { 0x00008000, 0x01000},
9164 { 0x00010000, 0x01000},
9165 { 0xffffffff, 0x00000}
Michael Chan7942e1d2005-05-29 14:58:36 -07009166 };
9167 struct mem_entry *mem_tbl;
9168 int err = 0;
9169 int i;
9170
Michael Chan79f4d132006-03-20 22:28:57 -08009171 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
Michael Chanaf36e6b2006-03-23 01:28:06 -08009172 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07009173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07009174 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9175 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
Michael Chan79f4d132006-03-20 22:28:57 -08009176 mem_tbl = mem_tbl_5755;
Michael Chanb16250e2006-09-27 16:10:14 -07009177 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9178 mem_tbl = mem_tbl_5906;
Michael Chan79f4d132006-03-20 22:28:57 -08009179 else
9180 mem_tbl = mem_tbl_5705;
9181 } else
Michael Chan7942e1d2005-05-29 14:58:36 -07009182 mem_tbl = mem_tbl_570x;
9183
9184 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9185 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9186 mem_tbl[i].len)) != 0)
9187 break;
9188 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009189
Michael Chan7942e1d2005-05-29 14:58:36 -07009190 return err;
9191}
9192
Michael Chan9f40dea2005-09-05 17:53:06 -07009193#define TG3_MAC_LOOPBACK 0
9194#define TG3_PHY_LOOPBACK 1
9195
9196static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
Michael Chanc76949a2005-05-29 14:58:59 -07009197{
Michael Chan9f40dea2005-09-05 17:53:06 -07009198 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
Michael Chanc76949a2005-05-29 14:58:59 -07009199 u32 desc_idx;
9200 struct sk_buff *skb, *rx_skb;
9201 u8 *tx_data;
9202 dma_addr_t map;
9203 int num_pkts, tx_len, rx_len, i, err;
9204 struct tg3_rx_buffer_desc *desc;
9205
Michael Chan9f40dea2005-09-05 17:53:06 -07009206 if (loopback_mode == TG3_MAC_LOOPBACK) {
Michael Chanc94e3942005-09-27 12:12:42 -07009207 /* HW errata - mac loopback fails in some cases on 5780.
9208 * Normal traffic and PHY loopback are not affected by
9209 * errata.
9210 */
9211 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9212 return 0;
9213
Michael Chan9f40dea2005-09-05 17:53:06 -07009214 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07009215 MAC_MODE_PORT_INT_LPBACK;
9216 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9217 mac_mode |= MAC_MODE_LINK_POLARITY;
Michael Chan3f7045c2006-09-27 16:02:29 -07009218 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9219 mac_mode |= MAC_MODE_PORT_MODE_MII;
9220 else
9221 mac_mode |= MAC_MODE_PORT_MODE_GMII;
Michael Chan9f40dea2005-09-05 17:53:06 -07009222 tw32(MAC_MODE, mac_mode);
9223 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
Michael Chan3f7045c2006-09-27 16:02:29 -07009224 u32 val;
9225
Michael Chanb16250e2006-09-27 16:10:14 -07009226 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9227 u32 phytest;
9228
9229 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9230 u32 phy;
9231
9232 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9233 phytest | MII_TG3_EPHY_SHADOW_EN);
9234 if (!tg3_readphy(tp, 0x1b, &phy))
9235 tg3_writephy(tp, 0x1b, phy & ~0x20);
Michael Chanb16250e2006-09-27 16:10:14 -07009236 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9237 }
Michael Chan5d64ad32006-12-07 00:19:40 -08009238 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9239 } else
9240 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
Michael Chan3f7045c2006-09-27 16:02:29 -07009241
Matt Carlson9ef8ca92007-07-11 19:48:29 -07009242 tg3_phy_toggle_automdix(tp, 0);
9243
Michael Chan3f7045c2006-09-27 16:02:29 -07009244 tg3_writephy(tp, MII_BMCR, val);
Michael Chanc94e3942005-09-27 12:12:42 -07009245 udelay(40);
Michael Chan5d64ad32006-12-07 00:19:40 -08009246
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07009247 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
Michael Chan5d64ad32006-12-07 00:19:40 -08009248 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chanb16250e2006-09-27 16:10:14 -07009249 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
Michael Chan5d64ad32006-12-07 00:19:40 -08009250 mac_mode |= MAC_MODE_PORT_MODE_MII;
9251 } else
9252 mac_mode |= MAC_MODE_PORT_MODE_GMII;
Michael Chanb16250e2006-09-27 16:10:14 -07009253
Michael Chanc94e3942005-09-27 12:12:42 -07009254 /* reset to prevent losing 1st rx packet intermittently */
9255 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9256 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9257 udelay(10);
9258 tw32_f(MAC_RX_MODE, tp->rx_mode);
9259 }
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07009260 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9261 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9262 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9263 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9264 mac_mode |= MAC_MODE_LINK_POLARITY;
Michael Chanff18ff02006-03-27 23:17:27 -08009265 tg3_writephy(tp, MII_TG3_EXT_CTRL,
9266 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9267 }
Michael Chan9f40dea2005-09-05 17:53:06 -07009268 tw32(MAC_MODE, mac_mode);
Michael Chan9f40dea2005-09-05 17:53:06 -07009269 }
9270 else
9271 return -EINVAL;
Michael Chanc76949a2005-05-29 14:58:59 -07009272
9273 err = -EIO;
9274
Michael Chanc76949a2005-05-29 14:58:59 -07009275 tx_len = 1514;
David S. Millera20e9c62006-07-31 22:38:16 -07009276 skb = netdev_alloc_skb(tp->dev, tx_len);
Jesper Juhla50bb7b2006-05-09 23:14:35 -07009277 if (!skb)
9278 return -ENOMEM;
9279
Michael Chanc76949a2005-05-29 14:58:59 -07009280 tx_data = skb_put(skb, tx_len);
9281 memcpy(tx_data, tp->dev->dev_addr, 6);
9282 memset(tx_data + 6, 0x0, 8);
9283
9284 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9285
9286 for (i = 14; i < tx_len; i++)
9287 tx_data[i] = (u8) (i & 0xff);
9288
9289 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9290
9291 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9292 HOSTCC_MODE_NOW);
9293
9294 udelay(10);
9295
9296 rx_start_idx = tp->hw_status->idx[0].rx_producer;
9297
Michael Chanc76949a2005-05-29 14:58:59 -07009298 num_pkts = 0;
9299
Michael Chan9f40dea2005-09-05 17:53:06 -07009300 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
Michael Chanc76949a2005-05-29 14:58:59 -07009301
Michael Chan9f40dea2005-09-05 17:53:06 -07009302 tp->tx_prod++;
Michael Chanc76949a2005-05-29 14:58:59 -07009303 num_pkts++;
9304
Michael Chan9f40dea2005-09-05 17:53:06 -07009305 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9306 tp->tx_prod);
Michael Chan09ee9292005-08-09 20:17:00 -07009307 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
Michael Chanc76949a2005-05-29 14:58:59 -07009308
9309 udelay(10);
9310
Michael Chan3f7045c2006-09-27 16:02:29 -07009311 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
9312 for (i = 0; i < 25; i++) {
Michael Chanc76949a2005-05-29 14:58:59 -07009313 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9314 HOSTCC_MODE_NOW);
9315
9316 udelay(10);
9317
9318 tx_idx = tp->hw_status->idx[0].tx_consumer;
9319 rx_idx = tp->hw_status->idx[0].rx_producer;
Michael Chan9f40dea2005-09-05 17:53:06 -07009320 if ((tx_idx == tp->tx_prod) &&
Michael Chanc76949a2005-05-29 14:58:59 -07009321 (rx_idx == (rx_start_idx + num_pkts)))
9322 break;
9323 }
9324
9325 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9326 dev_kfree_skb(skb);
9327
Michael Chan9f40dea2005-09-05 17:53:06 -07009328 if (tx_idx != tp->tx_prod)
Michael Chanc76949a2005-05-29 14:58:59 -07009329 goto out;
9330
9331 if (rx_idx != rx_start_idx + num_pkts)
9332 goto out;
9333
9334 desc = &tp->rx_rcb[rx_start_idx];
9335 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9336 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9337 if (opaque_key != RXD_OPAQUE_RING_STD)
9338 goto out;
9339
9340 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9341 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9342 goto out;
9343
9344 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9345 if (rx_len != tx_len)
9346 goto out;
9347
9348 rx_skb = tp->rx_std_buffers[desc_idx].skb;
9349
9350 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9351 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9352
9353 for (i = 14; i < tx_len; i++) {
9354 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9355 goto out;
9356 }
9357 err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009358
Michael Chanc76949a2005-05-29 14:58:59 -07009359 /* tg3_free_rings will unmap and free the rx_skb */
9360out:
9361 return err;
9362}
9363
Michael Chan9f40dea2005-09-05 17:53:06 -07009364#define TG3_MAC_LOOPBACK_FAILED 1
9365#define TG3_PHY_LOOPBACK_FAILED 2
9366#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
9367 TG3_PHY_LOOPBACK_FAILED)
9368
9369static int tg3_test_loopback(struct tg3 *tp)
9370{
9371 int err = 0;
Matt Carlson9936bcf2007-10-10 18:03:07 -07009372 u32 cpmuctrl = 0;
Michael Chan9f40dea2005-09-05 17:53:06 -07009373
9374 if (!netif_running(tp->dev))
9375 return TG3_LOOPBACK_FAILED;
9376
Michael Chanb9ec6c12006-07-25 16:37:27 -07009377 err = tg3_reset_hw(tp, 1);
9378 if (err)
9379 return TG3_LOOPBACK_FAILED;
Michael Chan9f40dea2005-09-05 17:53:06 -07009380
Matt Carlsonb5af7122007-11-12 21:22:02 -08009381 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
Matt Carlson9936bcf2007-10-10 18:03:07 -07009382 int i;
9383 u32 status;
9384
9385 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9386
9387 /* Wait for up to 40 microseconds to acquire lock. */
9388 for (i = 0; i < 4; i++) {
9389 status = tr32(TG3_CPMU_MUTEX_GNT);
9390 if (status == CPMU_MUTEX_GNT_DRIVER)
9391 break;
9392 udelay(10);
9393 }
9394
9395 if (status != CPMU_MUTEX_GNT_DRIVER)
9396 return TG3_LOOPBACK_FAILED;
9397
Matt Carlson9936bcf2007-10-10 18:03:07 -07009398 /* Turn off power management based on link speed. */
Matt Carlsone8750932007-11-12 21:11:51 -08009399 cpmuctrl = tr32(TG3_CPMU_CTRL);
Matt Carlson9936bcf2007-10-10 18:03:07 -07009400 tw32(TG3_CPMU_CTRL,
Matt Carlsone8750932007-11-12 21:11:51 -08009401 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
9402 CPMU_CTRL_LINK_AWARE_MODE));
Matt Carlson9936bcf2007-10-10 18:03:07 -07009403 }
9404
Michael Chan9f40dea2005-09-05 17:53:06 -07009405 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9406 err |= TG3_MAC_LOOPBACK_FAILED;
Matt Carlson9936bcf2007-10-10 18:03:07 -07009407
Matt Carlsonb5af7122007-11-12 21:22:02 -08009408 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
Matt Carlson9936bcf2007-10-10 18:03:07 -07009409 tw32(TG3_CPMU_CTRL, cpmuctrl);
9410
9411 /* Release the mutex */
9412 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9413 }
9414
Michael Chan9f40dea2005-09-05 17:53:06 -07009415 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9416 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9417 err |= TG3_PHY_LOOPBACK_FAILED;
9418 }
9419
9420 return err;
9421}
9422
Michael Chan4cafd3f2005-05-29 14:56:34 -07009423static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9424 u64 *data)
9425{
Michael Chan566f86a2005-05-29 14:56:58 -07009426 struct tg3 *tp = netdev_priv(dev);
9427
Michael Chanbc1c7562006-03-20 17:48:03 -08009428 if (tp->link_config.phy_is_low_power)
9429 tg3_set_power_state(tp, PCI_D0);
9430
Michael Chan566f86a2005-05-29 14:56:58 -07009431 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9432
9433 if (tg3_test_nvram(tp) != 0) {
9434 etest->flags |= ETH_TEST_FL_FAILED;
9435 data[0] = 1;
9436 }
Michael Chanca430072005-05-29 14:57:23 -07009437 if (tg3_test_link(tp) != 0) {
9438 etest->flags |= ETH_TEST_FL_FAILED;
9439 data[1] = 1;
9440 }
Michael Chana71116d2005-05-29 14:58:11 -07009441 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chanec41c7d2006-01-17 02:40:55 -08009442 int err, irq_sync = 0;
Michael Chana71116d2005-05-29 14:58:11 -07009443
Michael Chanbbe832c2005-06-24 20:20:04 -07009444 if (netif_running(dev)) {
9445 tg3_netif_stop(tp);
9446 irq_sync = 1;
9447 }
9448
9449 tg3_full_lock(tp, irq_sync);
Michael Chana71116d2005-05-29 14:58:11 -07009450
9451 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
Michael Chanec41c7d2006-01-17 02:40:55 -08009452 err = tg3_nvram_lock(tp);
Michael Chana71116d2005-05-29 14:58:11 -07009453 tg3_halt_cpu(tp, RX_CPU_BASE);
9454 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9455 tg3_halt_cpu(tp, TX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -08009456 if (!err)
9457 tg3_nvram_unlock(tp);
Michael Chana71116d2005-05-29 14:58:11 -07009458
Michael Chand9ab5ad12006-03-20 22:27:35 -08009459 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9460 tg3_phy_reset(tp);
9461
Michael Chana71116d2005-05-29 14:58:11 -07009462 if (tg3_test_registers(tp) != 0) {
9463 etest->flags |= ETH_TEST_FL_FAILED;
9464 data[2] = 1;
9465 }
Michael Chan7942e1d2005-05-29 14:58:36 -07009466 if (tg3_test_memory(tp) != 0) {
9467 etest->flags |= ETH_TEST_FL_FAILED;
9468 data[3] = 1;
9469 }
Michael Chan9f40dea2005-09-05 17:53:06 -07009470 if ((data[4] = tg3_test_loopback(tp)) != 0)
Michael Chanc76949a2005-05-29 14:58:59 -07009471 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chana71116d2005-05-29 14:58:11 -07009472
David S. Millerf47c11e2005-06-24 20:18:35 -07009473 tg3_full_unlock(tp);
9474
Michael Chand4bc3922005-05-29 14:59:20 -07009475 if (tg3_test_interrupt(tp) != 0) {
9476 etest->flags |= ETH_TEST_FL_FAILED;
9477 data[5] = 1;
9478 }
David S. Millerf47c11e2005-06-24 20:18:35 -07009479
9480 tg3_full_lock(tp, 0);
Michael Chand4bc3922005-05-29 14:59:20 -07009481
Michael Chana71116d2005-05-29 14:58:11 -07009482 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9483 if (netif_running(dev)) {
9484 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Michael Chanb9ec6c12006-07-25 16:37:27 -07009485 if (!tg3_restart_hw(tp, 1))
9486 tg3_netif_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -07009487 }
David S. Millerf47c11e2005-06-24 20:18:35 -07009488
9489 tg3_full_unlock(tp);
Michael Chana71116d2005-05-29 14:58:11 -07009490 }
Michael Chanbc1c7562006-03-20 17:48:03 -08009491 if (tp->link_config.phy_is_low_power)
9492 tg3_set_power_state(tp, PCI_D3hot);
9493
Michael Chan4cafd3f2005-05-29 14:56:34 -07009494}
9495
Linus Torvalds1da177e2005-04-16 15:20:36 -07009496static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9497{
9498 struct mii_ioctl_data *data = if_mii(ifr);
9499 struct tg3 *tp = netdev_priv(dev);
9500 int err;
9501
9502 switch(cmd) {
9503 case SIOCGMIIPHY:
9504 data->phy_id = PHY_ADDR;
9505
9506 /* fallthru */
9507 case SIOCGMIIREG: {
9508 u32 mii_regval;
9509
9510 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9511 break; /* We have no PHY */
9512
Michael Chanbc1c7562006-03-20 17:48:03 -08009513 if (tp->link_config.phy_is_low_power)
9514 return -EAGAIN;
9515
David S. Millerf47c11e2005-06-24 20:18:35 -07009516 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009517 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
David S. Millerf47c11e2005-06-24 20:18:35 -07009518 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009519
9520 data->val_out = mii_regval;
9521
9522 return err;
9523 }
9524
9525 case SIOCSMIIREG:
9526 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9527 break; /* We have no PHY */
9528
9529 if (!capable(CAP_NET_ADMIN))
9530 return -EPERM;
9531
Michael Chanbc1c7562006-03-20 17:48:03 -08009532 if (tp->link_config.phy_is_low_power)
9533 return -EAGAIN;
9534
David S. Millerf47c11e2005-06-24 20:18:35 -07009535 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009536 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
David S. Millerf47c11e2005-06-24 20:18:35 -07009537 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009538
9539 return err;
9540
9541 default:
9542 /* do nothing */
9543 break;
9544 }
9545 return -EOPNOTSUPP;
9546}
9547
9548#if TG3_VLAN_TAG_USED
9549static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9550{
9551 struct tg3 *tp = netdev_priv(dev);
9552
Michael Chan29315e82006-06-29 20:12:30 -07009553 if (netif_running(dev))
9554 tg3_netif_stop(tp);
9555
David S. Millerf47c11e2005-06-24 20:18:35 -07009556 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009557
9558 tp->vlgrp = grp;
9559
9560 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9561 __tg3_set_rx_mode(dev);
9562
Michael Chan29315e82006-06-29 20:12:30 -07009563 if (netif_running(dev))
9564 tg3_netif_start(tp);
Michael Chan46966542007-07-11 19:47:19 -07009565
9566 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009567}
Linus Torvalds1da177e2005-04-16 15:20:36 -07009568#endif
9569
David S. Miller15f98502005-05-18 22:49:26 -07009570static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9571{
9572 struct tg3 *tp = netdev_priv(dev);
9573
9574 memcpy(ec, &tp->coal, sizeof(*ec));
9575 return 0;
9576}
9577
Michael Chand244c892005-07-05 14:42:33 -07009578static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9579{
9580 struct tg3 *tp = netdev_priv(dev);
9581 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9582 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9583
9584 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9585 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9586 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9587 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9588 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9589 }
9590
9591 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9592 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9593 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9594 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9595 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9596 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9597 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9598 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9599 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9600 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9601 return -EINVAL;
9602
9603 /* No rx interrupts will be generated if both are zero */
9604 if ((ec->rx_coalesce_usecs == 0) &&
9605 (ec->rx_max_coalesced_frames == 0))
9606 return -EINVAL;
9607
9608 /* No tx interrupts will be generated if both are zero */
9609 if ((ec->tx_coalesce_usecs == 0) &&
9610 (ec->tx_max_coalesced_frames == 0))
9611 return -EINVAL;
9612
9613 /* Only copy relevant parameters, ignore all others. */
9614 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9615 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9616 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9617 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9618 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9619 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9620 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9621 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9622 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9623
9624 if (netif_running(dev)) {
9625 tg3_full_lock(tp, 0);
9626 __tg3_set_coalesce(tp, &tp->coal);
9627 tg3_full_unlock(tp);
9628 }
9629 return 0;
9630}
9631
Jeff Garzik7282d492006-09-13 14:30:00 -04009632static const struct ethtool_ops tg3_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009633 .get_settings = tg3_get_settings,
9634 .set_settings = tg3_set_settings,
9635 .get_drvinfo = tg3_get_drvinfo,
9636 .get_regs_len = tg3_get_regs_len,
9637 .get_regs = tg3_get_regs,
9638 .get_wol = tg3_get_wol,
9639 .set_wol = tg3_set_wol,
9640 .get_msglevel = tg3_get_msglevel,
9641 .set_msglevel = tg3_set_msglevel,
9642 .nway_reset = tg3_nway_reset,
9643 .get_link = ethtool_op_get_link,
9644 .get_eeprom_len = tg3_get_eeprom_len,
9645 .get_eeprom = tg3_get_eeprom,
9646 .set_eeprom = tg3_set_eeprom,
9647 .get_ringparam = tg3_get_ringparam,
9648 .set_ringparam = tg3_set_ringparam,
9649 .get_pauseparam = tg3_get_pauseparam,
9650 .set_pauseparam = tg3_set_pauseparam,
9651 .get_rx_csum = tg3_get_rx_csum,
9652 .set_rx_csum = tg3_set_rx_csum,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009653 .set_tx_csum = tg3_set_tx_csum,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009654 .set_sg = ethtool_op_set_sg,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009655 .set_tso = tg3_set_tso,
Michael Chan4cafd3f2005-05-29 14:56:34 -07009656 .self_test = tg3_self_test,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009657 .get_strings = tg3_get_strings,
Michael Chan4009a932005-09-05 17:52:54 -07009658 .phys_id = tg3_phys_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009659 .get_ethtool_stats = tg3_get_ethtool_stats,
David S. Miller15f98502005-05-18 22:49:26 -07009660 .get_coalesce = tg3_get_coalesce,
Michael Chand244c892005-07-05 14:42:33 -07009661 .set_coalesce = tg3_set_coalesce,
Jeff Garzikb9f2c042007-10-03 18:07:32 -07009662 .get_sset_count = tg3_get_sset_count,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009663};
9664
9665static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9666{
Michael Chan1b277772006-03-20 22:27:48 -08009667 u32 cursize, val, magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009668
9669 tp->nvram_size = EEPROM_CHIP_SIZE;
9670
Michael Chan18201802006-03-20 22:29:15 -08009671 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009672 return;
9673
Michael Chanb16250e2006-09-27 16:10:14 -07009674 if ((magic != TG3_EEPROM_MAGIC) &&
9675 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9676 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009677 return;
9678
9679 /*
9680 * Size the chip by reading offsets at increasing powers of two.
9681 * When we encounter our validation signature, we know the addressing
9682 * has wrapped around, and thus have our chip size.
9683 */
Michael Chan1b277772006-03-20 22:27:48 -08009684 cursize = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009685
9686 while (cursize < tp->nvram_size) {
Michael Chan18201802006-03-20 22:29:15 -08009687 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009688 return;
9689
Michael Chan18201802006-03-20 22:29:15 -08009690 if (val == magic)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009691 break;
9692
9693 cursize <<= 1;
9694 }
9695
9696 tp->nvram_size = cursize;
9697}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009698
Linus Torvalds1da177e2005-04-16 15:20:36 -07009699static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9700{
9701 u32 val;
9702
Michael Chan18201802006-03-20 22:29:15 -08009703 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
Michael Chan1b277772006-03-20 22:27:48 -08009704 return;
9705
9706 /* Selfboot format */
Michael Chan18201802006-03-20 22:29:15 -08009707 if (val != TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -08009708 tg3_get_eeprom_size(tp);
9709 return;
9710 }
9711
Linus Torvalds1da177e2005-04-16 15:20:36 -07009712 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9713 if (val != 0) {
9714 tp->nvram_size = (val >> 16) * 1024;
9715 return;
9716 }
9717 }
Matt Carlson989a9d22007-05-05 11:51:05 -07009718 tp->nvram_size = 0x80000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009719}
9720
9721static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9722{
9723 u32 nvcfg1;
9724
9725 nvcfg1 = tr32(NVRAM_CFG1);
9726 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9727 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9728 }
9729 else {
9730 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9731 tw32(NVRAM_CFG1, nvcfg1);
9732 }
9733
Michael Chan4c987482005-09-05 17:52:38 -07009734 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
Michael Chana4e2b342005-10-26 15:46:52 -07009735 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009736 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9737 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9738 tp->nvram_jedecnum = JEDEC_ATMEL;
9739 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9740 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9741 break;
9742 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9743 tp->nvram_jedecnum = JEDEC_ATMEL;
9744 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9745 break;
9746 case FLASH_VENDOR_ATMEL_EEPROM:
9747 tp->nvram_jedecnum = JEDEC_ATMEL;
9748 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9749 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9750 break;
9751 case FLASH_VENDOR_ST:
9752 tp->nvram_jedecnum = JEDEC_ST;
9753 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9754 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9755 break;
9756 case FLASH_VENDOR_SAIFUN:
9757 tp->nvram_jedecnum = JEDEC_SAIFUN;
9758 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9759 break;
9760 case FLASH_VENDOR_SST_SMALL:
9761 case FLASH_VENDOR_SST_LARGE:
9762 tp->nvram_jedecnum = JEDEC_SST;
9763 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9764 break;
9765 }
9766 }
9767 else {
9768 tp->nvram_jedecnum = JEDEC_ATMEL;
9769 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9770 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9771 }
9772}
9773
Michael Chan361b4ac2005-04-21 17:11:21 -07009774static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9775{
9776 u32 nvcfg1;
9777
9778 nvcfg1 = tr32(NVRAM_CFG1);
9779
Michael Chane6af3012005-04-21 17:12:05 -07009780 /* NVRAM protection for TPM */
9781 if (nvcfg1 & (1 << 27))
9782 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9783
Michael Chan361b4ac2005-04-21 17:11:21 -07009784 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9785 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9786 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9787 tp->nvram_jedecnum = JEDEC_ATMEL;
9788 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9789 break;
9790 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9791 tp->nvram_jedecnum = JEDEC_ATMEL;
9792 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9793 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9794 break;
9795 case FLASH_5752VENDOR_ST_M45PE10:
9796 case FLASH_5752VENDOR_ST_M45PE20:
9797 case FLASH_5752VENDOR_ST_M45PE40:
9798 tp->nvram_jedecnum = JEDEC_ST;
9799 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9800 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9801 break;
9802 }
9803
9804 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9805 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9806 case FLASH_5752PAGE_SIZE_256:
9807 tp->nvram_pagesize = 256;
9808 break;
9809 case FLASH_5752PAGE_SIZE_512:
9810 tp->nvram_pagesize = 512;
9811 break;
9812 case FLASH_5752PAGE_SIZE_1K:
9813 tp->nvram_pagesize = 1024;
9814 break;
9815 case FLASH_5752PAGE_SIZE_2K:
9816 tp->nvram_pagesize = 2048;
9817 break;
9818 case FLASH_5752PAGE_SIZE_4K:
9819 tp->nvram_pagesize = 4096;
9820 break;
9821 case FLASH_5752PAGE_SIZE_264:
9822 tp->nvram_pagesize = 264;
9823 break;
9824 }
9825 }
9826 else {
9827 /* For eeprom, set pagesize to maximum eeprom size */
9828 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9829
9830 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9831 tw32(NVRAM_CFG1, nvcfg1);
9832 }
9833}
9834
Michael Chand3c7b882006-03-23 01:28:25 -08009835static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9836{
Matt Carlson989a9d22007-05-05 11:51:05 -07009837 u32 nvcfg1, protect = 0;
Michael Chand3c7b882006-03-23 01:28:25 -08009838
9839 nvcfg1 = tr32(NVRAM_CFG1);
9840
9841 /* NVRAM protection for TPM */
Matt Carlson989a9d22007-05-05 11:51:05 -07009842 if (nvcfg1 & (1 << 27)) {
Michael Chand3c7b882006-03-23 01:28:25 -08009843 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
Matt Carlson989a9d22007-05-05 11:51:05 -07009844 protect = 1;
9845 }
Michael Chand3c7b882006-03-23 01:28:25 -08009846
Matt Carlson989a9d22007-05-05 11:51:05 -07009847 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9848 switch (nvcfg1) {
Michael Chand3c7b882006-03-23 01:28:25 -08009849 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9850 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9851 case FLASH_5755VENDOR_ATMEL_FLASH_3:
Matt Carlson70b65a22007-07-11 19:48:50 -07009852 case FLASH_5755VENDOR_ATMEL_FLASH_5:
Michael Chand3c7b882006-03-23 01:28:25 -08009853 tp->nvram_jedecnum = JEDEC_ATMEL;
9854 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9855 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9856 tp->nvram_pagesize = 264;
Matt Carlson70b65a22007-07-11 19:48:50 -07009857 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
9858 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
Matt Carlson989a9d22007-05-05 11:51:05 -07009859 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
9860 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
9861 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
9862 else
9863 tp->nvram_size = (protect ? 0x1f200 : 0x20000);
Michael Chand3c7b882006-03-23 01:28:25 -08009864 break;
9865 case FLASH_5752VENDOR_ST_M45PE10:
9866 case FLASH_5752VENDOR_ST_M45PE20:
9867 case FLASH_5752VENDOR_ST_M45PE40:
9868 tp->nvram_jedecnum = JEDEC_ST;
9869 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9870 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9871 tp->nvram_pagesize = 256;
Matt Carlson989a9d22007-05-05 11:51:05 -07009872 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
9873 tp->nvram_size = (protect ? 0x10000 : 0x20000);
9874 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
9875 tp->nvram_size = (protect ? 0x10000 : 0x40000);
9876 else
9877 tp->nvram_size = (protect ? 0x20000 : 0x80000);
Michael Chand3c7b882006-03-23 01:28:25 -08009878 break;
9879 }
9880}
9881
Michael Chan1b277772006-03-20 22:27:48 -08009882static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9883{
9884 u32 nvcfg1;
9885
9886 nvcfg1 = tr32(NVRAM_CFG1);
9887
9888 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9889 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9890 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9891 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9892 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9893 tp->nvram_jedecnum = JEDEC_ATMEL;
9894 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9895 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9896
9897 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9898 tw32(NVRAM_CFG1, nvcfg1);
9899 break;
9900 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9901 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9902 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9903 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9904 tp->nvram_jedecnum = JEDEC_ATMEL;
9905 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9906 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9907 tp->nvram_pagesize = 264;
9908 break;
9909 case FLASH_5752VENDOR_ST_M45PE10:
9910 case FLASH_5752VENDOR_ST_M45PE20:
9911 case FLASH_5752VENDOR_ST_M45PE40:
9912 tp->nvram_jedecnum = JEDEC_ST;
9913 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9914 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9915 tp->nvram_pagesize = 256;
9916 break;
9917 }
9918}
9919
Matt Carlson6b91fa02007-10-10 18:01:09 -07009920static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
9921{
9922 u32 nvcfg1, protect = 0;
9923
9924 nvcfg1 = tr32(NVRAM_CFG1);
9925
9926 /* NVRAM protection for TPM */
9927 if (nvcfg1 & (1 << 27)) {
9928 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9929 protect = 1;
9930 }
9931
9932 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9933 switch (nvcfg1) {
9934 case FLASH_5761VENDOR_ATMEL_ADB021D:
9935 case FLASH_5761VENDOR_ATMEL_ADB041D:
9936 case FLASH_5761VENDOR_ATMEL_ADB081D:
9937 case FLASH_5761VENDOR_ATMEL_ADB161D:
9938 case FLASH_5761VENDOR_ATMEL_MDB021D:
9939 case FLASH_5761VENDOR_ATMEL_MDB041D:
9940 case FLASH_5761VENDOR_ATMEL_MDB081D:
9941 case FLASH_5761VENDOR_ATMEL_MDB161D:
9942 tp->nvram_jedecnum = JEDEC_ATMEL;
9943 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9944 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9945 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
9946 tp->nvram_pagesize = 256;
9947 break;
9948 case FLASH_5761VENDOR_ST_A_M45PE20:
9949 case FLASH_5761VENDOR_ST_A_M45PE40:
9950 case FLASH_5761VENDOR_ST_A_M45PE80:
9951 case FLASH_5761VENDOR_ST_A_M45PE16:
9952 case FLASH_5761VENDOR_ST_M_M45PE20:
9953 case FLASH_5761VENDOR_ST_M_M45PE40:
9954 case FLASH_5761VENDOR_ST_M_M45PE80:
9955 case FLASH_5761VENDOR_ST_M_M45PE16:
9956 tp->nvram_jedecnum = JEDEC_ST;
9957 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9958 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9959 tp->nvram_pagesize = 256;
9960 break;
9961 }
9962
9963 if (protect) {
9964 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
9965 } else {
9966 switch (nvcfg1) {
9967 case FLASH_5761VENDOR_ATMEL_ADB161D:
9968 case FLASH_5761VENDOR_ATMEL_MDB161D:
9969 case FLASH_5761VENDOR_ST_A_M45PE16:
9970 case FLASH_5761VENDOR_ST_M_M45PE16:
9971 tp->nvram_size = 0x100000;
9972 break;
9973 case FLASH_5761VENDOR_ATMEL_ADB081D:
9974 case FLASH_5761VENDOR_ATMEL_MDB081D:
9975 case FLASH_5761VENDOR_ST_A_M45PE80:
9976 case FLASH_5761VENDOR_ST_M_M45PE80:
9977 tp->nvram_size = 0x80000;
9978 break;
9979 case FLASH_5761VENDOR_ATMEL_ADB041D:
9980 case FLASH_5761VENDOR_ATMEL_MDB041D:
9981 case FLASH_5761VENDOR_ST_A_M45PE40:
9982 case FLASH_5761VENDOR_ST_M_M45PE40:
9983 tp->nvram_size = 0x40000;
9984 break;
9985 case FLASH_5761VENDOR_ATMEL_ADB021D:
9986 case FLASH_5761VENDOR_ATMEL_MDB021D:
9987 case FLASH_5761VENDOR_ST_A_M45PE20:
9988 case FLASH_5761VENDOR_ST_M_M45PE20:
9989 tp->nvram_size = 0x20000;
9990 break;
9991 }
9992 }
9993}
9994
Michael Chanb5d37722006-09-27 16:06:21 -07009995static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
9996{
9997 tp->nvram_jedecnum = JEDEC_ATMEL;
9998 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9999 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10000}
10001
Linus Torvalds1da177e2005-04-16 15:20:36 -070010002/* Chips other than 5700/5701 use the NVRAM for fetching info. */
10003static void __devinit tg3_nvram_init(struct tg3 *tp)
10004{
Linus Torvalds1da177e2005-04-16 15:20:36 -070010005 tw32_f(GRC_EEPROM_ADDR,
10006 (EEPROM_ADDR_FSM_RESET |
10007 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10008 EEPROM_ADDR_CLKPERD_SHIFT)));
10009
Michael Chan9d57f012006-12-07 00:23:25 -080010010 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010011
10012 /* Enable seeprom accesses. */
10013 tw32_f(GRC_LOCAL_CTRL,
10014 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10015 udelay(100);
10016
10017 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10018 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10019 tp->tg3_flags |= TG3_FLAG_NVRAM;
10020
Michael Chanec41c7d2006-01-17 02:40:55 -080010021 if (tg3_nvram_lock(tp)) {
10022 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10023 "tg3_nvram_init failed.\n", tp->dev->name);
10024 return;
10025 }
Michael Chane6af3012005-04-21 17:12:05 -070010026 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010027
Matt Carlson989a9d22007-05-05 11:51:05 -070010028 tp->nvram_size = 0;
10029
Michael Chan361b4ac2005-04-21 17:11:21 -070010030 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10031 tg3_get_5752_nvram_info(tp);
Michael Chand3c7b882006-03-23 01:28:25 -080010032 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10033 tg3_get_5755_nvram_info(tp);
Matt Carlsond30cdd22007-10-07 23:28:35 -070010034 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10035 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
Michael Chan1b277772006-03-20 22:27:48 -080010036 tg3_get_5787_nvram_info(tp);
Matt Carlson6b91fa02007-10-10 18:01:09 -070010037 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10038 tg3_get_5761_nvram_info(tp);
Michael Chanb5d37722006-09-27 16:06:21 -070010039 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10040 tg3_get_5906_nvram_info(tp);
Michael Chan361b4ac2005-04-21 17:11:21 -070010041 else
10042 tg3_get_nvram_info(tp);
10043
Matt Carlson989a9d22007-05-05 11:51:05 -070010044 if (tp->nvram_size == 0)
10045 tg3_get_nvram_size(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010046
Michael Chane6af3012005-04-21 17:12:05 -070010047 tg3_disable_nvram_access(tp);
Michael Chan381291b2005-12-13 21:08:21 -080010048 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010049
10050 } else {
10051 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10052
10053 tg3_get_eeprom_size(tp);
10054 }
10055}
10056
10057static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10058 u32 offset, u32 *val)
10059{
10060 u32 tmp;
10061 int i;
10062
10063 if (offset > EEPROM_ADDR_ADDR_MASK ||
10064 (offset % 4) != 0)
10065 return -EINVAL;
10066
10067 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10068 EEPROM_ADDR_DEVID_MASK |
10069 EEPROM_ADDR_READ);
10070 tw32(GRC_EEPROM_ADDR,
10071 tmp |
10072 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10073 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10074 EEPROM_ADDR_ADDR_MASK) |
10075 EEPROM_ADDR_READ | EEPROM_ADDR_START);
10076
Michael Chan9d57f012006-12-07 00:23:25 -080010077 for (i = 0; i < 1000; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010078 tmp = tr32(GRC_EEPROM_ADDR);
10079
10080 if (tmp & EEPROM_ADDR_COMPLETE)
10081 break;
Michael Chan9d57f012006-12-07 00:23:25 -080010082 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010083 }
10084 if (!(tmp & EEPROM_ADDR_COMPLETE))
10085 return -EBUSY;
10086
10087 *val = tr32(GRC_EEPROM_DATA);
10088 return 0;
10089}
10090
10091#define NVRAM_CMD_TIMEOUT 10000
10092
10093static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10094{
10095 int i;
10096
10097 tw32(NVRAM_CMD, nvram_cmd);
10098 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10099 udelay(10);
10100 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10101 udelay(10);
10102 break;
10103 }
10104 }
10105 if (i == NVRAM_CMD_TIMEOUT) {
10106 return -EBUSY;
10107 }
10108 return 0;
10109}
10110
Michael Chan18201802006-03-20 22:29:15 -080010111static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10112{
10113 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10114 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10115 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
Matt Carlson6b91fa02007-10-10 18:01:09 -070010116 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
Michael Chan18201802006-03-20 22:29:15 -080010117 (tp->nvram_jedecnum == JEDEC_ATMEL))
10118
10119 addr = ((addr / tp->nvram_pagesize) <<
10120 ATMEL_AT45DB0X1B_PAGE_POS) +
10121 (addr % tp->nvram_pagesize);
10122
10123 return addr;
10124}
10125
Michael Chanc4e65752006-03-20 22:29:32 -080010126static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10127{
10128 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10129 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10130 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
Matt Carlson6b91fa02007-10-10 18:01:09 -070010131 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
Michael Chanc4e65752006-03-20 22:29:32 -080010132 (tp->nvram_jedecnum == JEDEC_ATMEL))
10133
10134 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10135 tp->nvram_pagesize) +
10136 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10137
10138 return addr;
10139}
10140
Linus Torvalds1da177e2005-04-16 15:20:36 -070010141static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10142{
10143 int ret;
10144
Linus Torvalds1da177e2005-04-16 15:20:36 -070010145 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10146 return tg3_nvram_read_using_eeprom(tp, offset, val);
10147
Michael Chan18201802006-03-20 22:29:15 -080010148 offset = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010149
10150 if (offset > NVRAM_ADDR_MSK)
10151 return -EINVAL;
10152
Michael Chanec41c7d2006-01-17 02:40:55 -080010153 ret = tg3_nvram_lock(tp);
10154 if (ret)
10155 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010156
Michael Chane6af3012005-04-21 17:12:05 -070010157 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010158
10159 tw32(NVRAM_ADDR, offset);
10160 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10161 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10162
10163 if (ret == 0)
10164 *val = swab32(tr32(NVRAM_RDDATA));
10165
Michael Chane6af3012005-04-21 17:12:05 -070010166 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010167
Michael Chan381291b2005-12-13 21:08:21 -080010168 tg3_nvram_unlock(tp);
10169
Linus Torvalds1da177e2005-04-16 15:20:36 -070010170 return ret;
10171}
10172
Al Virob9fc7dc2007-12-17 22:59:57 -080010173static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10174{
10175 u32 v;
10176 int res = tg3_nvram_read(tp, offset, &v);
10177 if (!res)
10178 *val = cpu_to_le32(v);
10179 return res;
10180}
10181
Michael Chan18201802006-03-20 22:29:15 -080010182static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10183{
10184 int err;
10185 u32 tmp;
10186
10187 err = tg3_nvram_read(tp, offset, &tmp);
10188 *val = swab32(tmp);
10189 return err;
10190}
10191
Linus Torvalds1da177e2005-04-16 15:20:36 -070010192static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10193 u32 offset, u32 len, u8 *buf)
10194{
10195 int i, j, rc = 0;
10196 u32 val;
10197
10198 for (i = 0; i < len; i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080010199 u32 addr;
10200 __le32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010201
10202 addr = offset + i;
10203
10204 memcpy(&data, buf + i, 4);
10205
Al Virob9fc7dc2007-12-17 22:59:57 -080010206 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070010207
10208 val = tr32(GRC_EEPROM_ADDR);
10209 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10210
10211 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10212 EEPROM_ADDR_READ);
10213 tw32(GRC_EEPROM_ADDR, val |
10214 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10215 (addr & EEPROM_ADDR_ADDR_MASK) |
10216 EEPROM_ADDR_START |
10217 EEPROM_ADDR_WRITE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010218
Michael Chan9d57f012006-12-07 00:23:25 -080010219 for (j = 0; j < 1000; j++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010220 val = tr32(GRC_EEPROM_ADDR);
10221
10222 if (val & EEPROM_ADDR_COMPLETE)
10223 break;
Michael Chan9d57f012006-12-07 00:23:25 -080010224 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010225 }
10226 if (!(val & EEPROM_ADDR_COMPLETE)) {
10227 rc = -EBUSY;
10228 break;
10229 }
10230 }
10231
10232 return rc;
10233}
10234
10235/* offset and length are dword aligned */
10236static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10237 u8 *buf)
10238{
10239 int ret = 0;
10240 u32 pagesize = tp->nvram_pagesize;
10241 u32 pagemask = pagesize - 1;
10242 u32 nvram_cmd;
10243 u8 *tmp;
10244
10245 tmp = kmalloc(pagesize, GFP_KERNEL);
10246 if (tmp == NULL)
10247 return -ENOMEM;
10248
10249 while (len) {
10250 int j;
Michael Chane6af3012005-04-21 17:12:05 -070010251 u32 phy_addr, page_off, size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010252
10253 phy_addr = offset & ~pagemask;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010254
Linus Torvalds1da177e2005-04-16 15:20:36 -070010255 for (j = 0; j < pagesize; j += 4) {
Al Viro286e3102007-12-17 23:00:31 -080010256 if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
Al Virob9fc7dc2007-12-17 22:59:57 -080010257 (__le32 *) (tmp + j))))
Linus Torvalds1da177e2005-04-16 15:20:36 -070010258 break;
10259 }
10260 if (ret)
10261 break;
10262
10263 page_off = offset & pagemask;
10264 size = pagesize;
10265 if (len < size)
10266 size = len;
10267
10268 len -= size;
10269
10270 memcpy(tmp + page_off, buf, size);
10271
10272 offset = offset + (pagesize - page_off);
10273
Michael Chane6af3012005-04-21 17:12:05 -070010274 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010275
10276 /*
10277 * Before we can erase the flash page, we need
10278 * to issue a special "write enable" command.
10279 */
10280 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10281
10282 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10283 break;
10284
10285 /* Erase the target page */
10286 tw32(NVRAM_ADDR, phy_addr);
10287
10288 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10289 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10290
10291 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10292 break;
10293
10294 /* Issue another write enable to start the write. */
10295 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10296
10297 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10298 break;
10299
10300 for (j = 0; j < pagesize; j += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080010301 __be32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010302
Al Virob9fc7dc2007-12-17 22:59:57 -080010303 data = *((__be32 *) (tmp + j));
10304 /* swab32(le32_to_cpu(data)), actually */
10305 tw32(NVRAM_WRDATA, be32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070010306
10307 tw32(NVRAM_ADDR, phy_addr + j);
10308
10309 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10310 NVRAM_CMD_WR;
10311
10312 if (j == 0)
10313 nvram_cmd |= NVRAM_CMD_FIRST;
10314 else if (j == (pagesize - 4))
10315 nvram_cmd |= NVRAM_CMD_LAST;
10316
10317 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10318 break;
10319 }
10320 if (ret)
10321 break;
10322 }
10323
10324 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10325 tg3_nvram_exec_cmd(tp, nvram_cmd);
10326
10327 kfree(tmp);
10328
10329 return ret;
10330}
10331
10332/* offset and length are dword aligned */
10333static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10334 u8 *buf)
10335{
10336 int i, ret = 0;
10337
10338 for (i = 0; i < len; i += 4, offset += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080010339 u32 page_off, phy_addr, nvram_cmd;
10340 __be32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010341
10342 memcpy(&data, buf + i, 4);
Al Virob9fc7dc2007-12-17 22:59:57 -080010343 tw32(NVRAM_WRDATA, be32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070010344
10345 page_off = offset % tp->nvram_pagesize;
10346
Michael Chan18201802006-03-20 22:29:15 -080010347 phy_addr = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010348
10349 tw32(NVRAM_ADDR, phy_addr);
10350
10351 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10352
10353 if ((page_off == 0) || (i == 0))
10354 nvram_cmd |= NVRAM_CMD_FIRST;
Michael Chanf6d9a252006-04-29 19:00:24 -070010355 if (page_off == (tp->nvram_pagesize - 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -070010356 nvram_cmd |= NVRAM_CMD_LAST;
10357
10358 if (i == (len - 4))
10359 nvram_cmd |= NVRAM_CMD_LAST;
10360
Michael Chan4c987482005-09-05 17:52:38 -070010361 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
Michael Chanaf36e6b2006-03-23 01:28:06 -080010362 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
Michael Chan1b277772006-03-20 22:27:48 -080010363 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
Matt Carlsond30cdd22007-10-07 23:28:35 -070010364 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
Matt Carlson9936bcf2007-10-10 18:03:07 -070010365 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
Michael Chan4c987482005-09-05 17:52:38 -070010366 (tp->nvram_jedecnum == JEDEC_ST) &&
10367 (nvram_cmd & NVRAM_CMD_FIRST)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010368
10369 if ((ret = tg3_nvram_exec_cmd(tp,
10370 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10371 NVRAM_CMD_DONE)))
10372
10373 break;
10374 }
10375 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10376 /* We always do complete word writes to eeprom. */
10377 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10378 }
10379
10380 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10381 break;
10382 }
10383 return ret;
10384}
10385
10386/* offset and length are dword aligned */
10387static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10388{
10389 int ret;
10390
Linus Torvalds1da177e2005-04-16 15:20:36 -070010391 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -070010392 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10393 ~GRC_LCLCTRL_GPIO_OUTPUT1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010394 udelay(40);
10395 }
10396
10397 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10398 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10399 }
10400 else {
10401 u32 grc_mode;
10402
Michael Chanec41c7d2006-01-17 02:40:55 -080010403 ret = tg3_nvram_lock(tp);
10404 if (ret)
10405 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010406
Michael Chane6af3012005-04-21 17:12:05 -070010407 tg3_enable_nvram_access(tp);
10408 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10409 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
Linus Torvalds1da177e2005-04-16 15:20:36 -070010410 tw32(NVRAM_WRITE1, 0x406);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010411
10412 grc_mode = tr32(GRC_MODE);
10413 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10414
10415 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10416 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10417
10418 ret = tg3_nvram_write_block_buffered(tp, offset, len,
10419 buf);
10420 }
10421 else {
10422 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10423 buf);
10424 }
10425
10426 grc_mode = tr32(GRC_MODE);
10427 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10428
Michael Chane6af3012005-04-21 17:12:05 -070010429 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010430 tg3_nvram_unlock(tp);
10431 }
10432
10433 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -070010434 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010435 udelay(40);
10436 }
10437
10438 return ret;
10439}
10440
10441struct subsys_tbl_ent {
10442 u16 subsys_vendor, subsys_devid;
10443 u32 phy_id;
10444};
10445
10446static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10447 /* Broadcom boards. */
10448 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10449 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10450 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10451 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
10452 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10453 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10454 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
10455 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10456 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10457 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10458 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10459
10460 /* 3com boards. */
10461 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10462 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10463 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
10464 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10465 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10466
10467 /* DELL boards. */
10468 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10469 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10470 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10471 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10472
10473 /* Compaq boards. */
10474 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10475 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10476 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
10477 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10478 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10479
10480 /* IBM boards. */
10481 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10482};
10483
10484static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10485{
10486 int i;
10487
10488 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10489 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10490 tp->pdev->subsystem_vendor) &&
10491 (subsys_id_to_phy_id[i].subsys_devid ==
10492 tp->pdev->subsystem_device))
10493 return &subsys_id_to_phy_id[i];
10494 }
10495 return NULL;
10496}
10497
Michael Chan7d0c41e2005-04-21 17:06:20 -070010498static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010499{
Linus Torvalds1da177e2005-04-16 15:20:36 -070010500 u32 val;
Michael Chancaf636c72006-03-22 01:05:31 -080010501 u16 pmcsr;
10502
10503 /* On some early chips the SRAM cannot be accessed in D3hot state,
10504 * so need make sure we're in D0.
10505 */
10506 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10507 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10508 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10509 msleep(1);
Michael Chan7d0c41e2005-04-21 17:06:20 -070010510
10511 /* Make sure register accesses (indirect or otherwise)
10512 * will function correctly.
10513 */
10514 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10515 tp->misc_host_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010516
David S. Millerf49639e2006-06-09 11:58:36 -070010517 /* The memory arbiter has to be enabled in order for SRAM accesses
10518 * to succeed. Normally on powerup the tg3 chip firmware will make
10519 * sure it is enabled, but other entities such as system netboot
10520 * code might disable it.
10521 */
10522 val = tr32(MEMARB_MODE);
10523 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10524
Linus Torvalds1da177e2005-04-16 15:20:36 -070010525 tp->phy_id = PHY_ID_INVALID;
Michael Chan7d0c41e2005-04-21 17:06:20 -070010526 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10527
Gary Zambranoa85feb82007-05-05 11:52:19 -070010528 /* Assume an onboard device and WOL capable by default. */
10529 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
David S. Miller72b845e2006-03-14 14:11:48 -080010530
Michael Chanb5d37722006-09-27 16:06:21 -070010531 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan9d26e212006-12-07 00:21:14 -080010532 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
Michael Chanb5d37722006-09-27 16:06:21 -070010533 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080010534 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10535 }
Matt Carlson0527ba32007-10-10 18:03:30 -070010536 val = tr32(VCPU_CFGSHDW);
10537 if (val & VCPU_CFGSHDW_ASPM_DBNC)
Matt Carlson8ed5d972007-05-07 00:25:49 -070010538 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
Matt Carlson0527ba32007-10-10 18:03:30 -070010539 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10540 (val & VCPU_CFGSHDW_WOL_MAGPKT))
10541 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
Michael Chanb5d37722006-09-27 16:06:21 -070010542 return;
10543 }
10544
Linus Torvalds1da177e2005-04-16 15:20:36 -070010545 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10546 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10547 u32 nic_cfg, led_cfg;
Michael Chan7d0c41e2005-04-21 17:06:20 -070010548 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10549 int eeprom_phy_serdes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010550
10551 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10552 tp->nic_sram_data_cfg = nic_cfg;
10553
10554 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10555 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10556 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10557 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10558 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10559 (ver > 0) && (ver < 0x100))
10560 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10561
Linus Torvalds1da177e2005-04-16 15:20:36 -070010562 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10563 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10564 eeprom_phy_serdes = 1;
10565
10566 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10567 if (nic_phy_id != 0) {
10568 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10569 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10570
10571 eeprom_phy_id = (id1 >> 16) << 10;
10572 eeprom_phy_id |= (id2 & 0xfc00) << 16;
10573 eeprom_phy_id |= (id2 & 0x03ff) << 0;
10574 } else
10575 eeprom_phy_id = 0;
10576
Michael Chan7d0c41e2005-04-21 17:06:20 -070010577 tp->phy_id = eeprom_phy_id;
Michael Chan747e8f82005-07-25 12:33:22 -070010578 if (eeprom_phy_serdes) {
Michael Chana4e2b342005-10-26 15:46:52 -070010579 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan747e8f82005-07-25 12:33:22 -070010580 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10581 else
10582 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10583 }
Michael Chan7d0c41e2005-04-21 17:06:20 -070010584
John W. Linvillecbf46852005-04-21 17:01:29 -070010585 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010586 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10587 SHASTA_EXT_LED_MODE_MASK);
John W. Linvillecbf46852005-04-21 17:01:29 -070010588 else
Linus Torvalds1da177e2005-04-16 15:20:36 -070010589 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10590
10591 switch (led_cfg) {
10592 default:
10593 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10594 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10595 break;
10596
10597 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10598 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10599 break;
10600
10601 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10602 tp->led_ctrl = LED_CTRL_MODE_MAC;
Michael Chan9ba27792005-06-06 15:16:20 -070010603
10604 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10605 * read on some older 5700/5701 bootcode.
10606 */
10607 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10608 ASIC_REV_5700 ||
10609 GET_ASIC_REV(tp->pci_chip_rev_id) ==
10610 ASIC_REV_5701)
10611 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10612
Linus Torvalds1da177e2005-04-16 15:20:36 -070010613 break;
10614
10615 case SHASTA_EXT_LED_SHARED:
10616 tp->led_ctrl = LED_CTRL_MODE_SHARED;
10617 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10618 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10619 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10620 LED_CTRL_MODE_PHY_2);
10621 break;
10622
10623 case SHASTA_EXT_LED_MAC:
10624 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10625 break;
10626
10627 case SHASTA_EXT_LED_COMBO:
10628 tp->led_ctrl = LED_CTRL_MODE_COMBO;
10629 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10630 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10631 LED_CTRL_MODE_PHY_2);
10632 break;
10633
10634 };
10635
10636 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10637 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10638 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10639 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10640
Matt Carlsonb5af7122007-11-12 21:22:02 -080010641 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
10642 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1)
Matt Carlson5f608912007-11-12 21:17:07 -080010643 tp->led_ctrl = LED_CTRL_MODE_MAC;
10644
Michael Chan9d26e212006-12-07 00:21:14 -080010645 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010646 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080010647 if ((tp->pdev->subsystem_vendor ==
10648 PCI_VENDOR_ID_ARIMA) &&
10649 (tp->pdev->subsystem_device == 0x205a ||
10650 tp->pdev->subsystem_device == 0x2063))
10651 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10652 } else {
David S. Millerf49639e2006-06-09 11:58:36 -070010653 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080010654 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10655 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010656
10657 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10658 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
John W. Linvillecbf46852005-04-21 17:01:29 -070010659 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010660 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10661 }
Matt Carlson0d3031d2007-10-10 18:02:43 -070010662 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10663 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
Gary Zambranoa85feb82007-05-05 11:52:19 -070010664 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10665 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10666 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010667
Matt Carlson0527ba32007-10-10 18:03:30 -070010668 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10669 nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10670 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10671
Linus Torvalds1da177e2005-04-16 15:20:36 -070010672 if (cfg2 & (1 << 17))
10673 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10674
10675 /* serdes signal pre-emphasis in register 0x590 set by */
10676 /* bootcode if bit 18 is set */
10677 if (cfg2 & (1 << 18))
10678 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
Matt Carlson8ed5d972007-05-07 00:25:49 -070010679
10680 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10681 u32 cfg3;
10682
10683 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10684 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10685 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10686 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010687 }
Michael Chan7d0c41e2005-04-21 17:06:20 -070010688}
10689
10690static int __devinit tg3_phy_probe(struct tg3 *tp)
10691{
10692 u32 hw_phy_id_1, hw_phy_id_2;
10693 u32 hw_phy_id, hw_phy_id_masked;
10694 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010695
10696 /* Reading the PHY ID register can conflict with ASF
10697 * firwmare access to the PHY hardware.
10698 */
10699 err = 0;
Matt Carlson0d3031d2007-10-10 18:02:43 -070010700 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10701 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010702 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10703 } else {
10704 /* Now read the physical PHY_ID from the chip and verify
10705 * that it is sane. If it doesn't look good, we fall back
10706 * to either the hard-coded table based PHY_ID and failing
10707 * that the value found in the eeprom area.
10708 */
10709 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10710 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10711
10712 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
10713 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10714 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
10715
10716 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10717 }
10718
10719 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10720 tp->phy_id = hw_phy_id;
10721 if (hw_phy_id_masked == PHY_ID_BCM8002)
10722 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
Michael Chanda6b2d02005-08-19 12:54:29 -070010723 else
10724 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010725 } else {
Michael Chan7d0c41e2005-04-21 17:06:20 -070010726 if (tp->phy_id != PHY_ID_INVALID) {
10727 /* Do nothing, phy ID already set up in
10728 * tg3_get_eeprom_hw_cfg().
10729 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070010730 } else {
10731 struct subsys_tbl_ent *p;
10732
10733 /* No eeprom signature? Try the hardcoded
10734 * subsys device table.
10735 */
10736 p = lookup_by_subsys(tp);
10737 if (!p)
10738 return -ENODEV;
10739
10740 tp->phy_id = p->phy_id;
10741 if (!tp->phy_id ||
10742 tp->phy_id == PHY_ID_BCM8002)
10743 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10744 }
10745 }
10746
Michael Chan747e8f82005-07-25 12:33:22 -070010747 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
Matt Carlson0d3031d2007-10-10 18:02:43 -070010748 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070010749 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chan3600d912006-12-07 00:21:48 -080010750 u32 bmsr, adv_reg, tg3_ctrl, mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010751
10752 tg3_readphy(tp, MII_BMSR, &bmsr);
10753 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10754 (bmsr & BMSR_LSTATUS))
10755 goto skip_phy_reset;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010756
Linus Torvalds1da177e2005-04-16 15:20:36 -070010757 err = tg3_phy_reset(tp);
10758 if (err)
10759 return err;
10760
10761 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10762 ADVERTISE_100HALF | ADVERTISE_100FULL |
10763 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10764 tg3_ctrl = 0;
10765 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10766 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10767 MII_TG3_CTRL_ADV_1000_FULL);
10768 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10769 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10770 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10771 MII_TG3_CTRL_ENABLE_AS_MASTER);
10772 }
10773
Michael Chan3600d912006-12-07 00:21:48 -080010774 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10775 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10776 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
10777 if (!tg3_copper_is_advertising_all(tp, mask)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010778 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10779
10780 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10781 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10782
10783 tg3_writephy(tp, MII_BMCR,
10784 BMCR_ANENABLE | BMCR_ANRESTART);
10785 }
10786 tg3_phy_set_wirespeed(tp);
10787
10788 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10789 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10790 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10791 }
10792
10793skip_phy_reset:
10794 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10795 err = tg3_init_5401phy_dsp(tp);
10796 if (err)
10797 return err;
10798 }
10799
10800 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10801 err = tg3_init_5401phy_dsp(tp);
10802 }
10803
Michael Chan747e8f82005-07-25 12:33:22 -070010804 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010805 tp->link_config.advertising =
10806 (ADVERTISED_1000baseT_Half |
10807 ADVERTISED_1000baseT_Full |
10808 ADVERTISED_Autoneg |
10809 ADVERTISED_FIBRE);
10810 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10811 tp->link_config.advertising &=
10812 ~(ADVERTISED_1000baseT_Half |
10813 ADVERTISED_1000baseT_Full);
10814
10815 return err;
10816}
10817
10818static void __devinit tg3_read_partno(struct tg3 *tp)
10819{
10820 unsigned char vpd_data[256];
Michael Chanaf2c6a42006-11-07 14:57:51 -080010821 unsigned int i;
Michael Chan1b277772006-03-20 22:27:48 -080010822 u32 magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010823
Michael Chan18201802006-03-20 22:29:15 -080010824 if (tg3_nvram_read_swab(tp, 0x0, &magic))
David S. Millerf49639e2006-06-09 11:58:36 -070010825 goto out_not_found;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010826
Michael Chan18201802006-03-20 22:29:15 -080010827 if (magic == TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -080010828 for (i = 0; i < 256; i += 4) {
10829 u32 tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010830
Michael Chan1b277772006-03-20 22:27:48 -080010831 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10832 goto out_not_found;
10833
10834 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
10835 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
10836 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10837 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10838 }
10839 } else {
10840 int vpd_cap;
10841
10842 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10843 for (i = 0; i < 256; i += 4) {
10844 u32 tmp, j = 0;
Al Virob9fc7dc2007-12-17 22:59:57 -080010845 __le32 v;
Michael Chan1b277772006-03-20 22:27:48 -080010846 u16 tmp16;
10847
10848 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10849 i);
10850 while (j++ < 100) {
10851 pci_read_config_word(tp->pdev, vpd_cap +
10852 PCI_VPD_ADDR, &tmp16);
10853 if (tmp16 & 0x8000)
10854 break;
10855 msleep(1);
10856 }
David S. Millerf49639e2006-06-09 11:58:36 -070010857 if (!(tmp16 & 0x8000))
10858 goto out_not_found;
10859
Michael Chan1b277772006-03-20 22:27:48 -080010860 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10861 &tmp);
Al Virob9fc7dc2007-12-17 22:59:57 -080010862 v = cpu_to_le32(tmp);
10863 memcpy(&vpd_data[i], &v, 4);
Michael Chan1b277772006-03-20 22:27:48 -080010864 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010865 }
10866
10867 /* Now parse and find the part number. */
Michael Chanaf2c6a42006-11-07 14:57:51 -080010868 for (i = 0; i < 254; ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010869 unsigned char val = vpd_data[i];
Michael Chanaf2c6a42006-11-07 14:57:51 -080010870 unsigned int block_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010871
10872 if (val == 0x82 || val == 0x91) {
10873 i = (i + 3 +
10874 (vpd_data[i + 1] +
10875 (vpd_data[i + 2] << 8)));
10876 continue;
10877 }
10878
10879 if (val != 0x90)
10880 goto out_not_found;
10881
10882 block_end = (i + 3 +
10883 (vpd_data[i + 1] +
10884 (vpd_data[i + 2] << 8)));
10885 i += 3;
Michael Chanaf2c6a42006-11-07 14:57:51 -080010886
10887 if (block_end > 256)
10888 goto out_not_found;
10889
10890 while (i < (block_end - 2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010891 if (vpd_data[i + 0] == 'P' &&
10892 vpd_data[i + 1] == 'N') {
10893 int partno_len = vpd_data[i + 2];
10894
Michael Chanaf2c6a42006-11-07 14:57:51 -080010895 i += 3;
10896 if (partno_len > 24 || (partno_len + i) > 256)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010897 goto out_not_found;
10898
10899 memcpy(tp->board_part_number,
Michael Chanaf2c6a42006-11-07 14:57:51 -080010900 &vpd_data[i], partno_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010901
10902 /* Success. */
10903 return;
10904 }
Michael Chanaf2c6a42006-11-07 14:57:51 -080010905 i += 3 + vpd_data[i + 2];
Linus Torvalds1da177e2005-04-16 15:20:36 -070010906 }
10907
10908 /* Part number not found. */
10909 goto out_not_found;
10910 }
10911
10912out_not_found:
Michael Chanb5d37722006-09-27 16:06:21 -070010913 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10914 strcpy(tp->board_part_number, "BCM95906");
10915 else
10916 strcpy(tp->board_part_number, "none");
Linus Torvalds1da177e2005-04-16 15:20:36 -070010917}
10918
Matt Carlson9c8a6202007-10-21 16:16:08 -070010919static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
10920{
10921 u32 val;
10922
10923 if (tg3_nvram_read_swab(tp, offset, &val) ||
10924 (val & 0xfc000000) != 0x0c000000 ||
10925 tg3_nvram_read_swab(tp, offset + 4, &val) ||
10926 val != 0)
10927 return 0;
10928
10929 return 1;
10930}
10931
Michael Chanc4e65752006-03-20 22:29:32 -080010932static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10933{
10934 u32 val, offset, start;
Matt Carlson9c8a6202007-10-21 16:16:08 -070010935 u32 ver_offset;
10936 int i, bcnt;
Michael Chanc4e65752006-03-20 22:29:32 -080010937
10938 if (tg3_nvram_read_swab(tp, 0, &val))
10939 return;
10940
10941 if (val != TG3_EEPROM_MAGIC)
10942 return;
10943
10944 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10945 tg3_nvram_read_swab(tp, 0x4, &start))
10946 return;
10947
10948 offset = tg3_nvram_logical_addr(tp, offset);
Matt Carlson9c8a6202007-10-21 16:16:08 -070010949
10950 if (!tg3_fw_img_is_valid(tp, offset) ||
10951 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
Michael Chanc4e65752006-03-20 22:29:32 -080010952 return;
10953
Matt Carlson9c8a6202007-10-21 16:16:08 -070010954 offset = offset + ver_offset - start;
10955 for (i = 0; i < 16; i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080010956 __le32 v;
10957 if (tg3_nvram_read_le(tp, offset + i, &v))
Michael Chanc4e65752006-03-20 22:29:32 -080010958 return;
10959
Al Virob9fc7dc2007-12-17 22:59:57 -080010960 memcpy(tp->fw_ver + i, &v, 4);
Michael Chanc4e65752006-03-20 22:29:32 -080010961 }
Matt Carlson9c8a6202007-10-21 16:16:08 -070010962
10963 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
Matt Carlson84af67f2007-11-12 21:08:59 -080010964 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
Matt Carlson9c8a6202007-10-21 16:16:08 -070010965 return;
10966
10967 for (offset = TG3_NVM_DIR_START;
10968 offset < TG3_NVM_DIR_END;
10969 offset += TG3_NVM_DIRENT_SIZE) {
10970 if (tg3_nvram_read_swab(tp, offset, &val))
10971 return;
10972
10973 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
10974 break;
10975 }
10976
10977 if (offset == TG3_NVM_DIR_END)
10978 return;
10979
10980 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10981 start = 0x08000000;
10982 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
10983 return;
10984
10985 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
10986 !tg3_fw_img_is_valid(tp, offset) ||
10987 tg3_nvram_read_swab(tp, offset + 8, &val))
10988 return;
10989
10990 offset += val - start;
10991
10992 bcnt = strlen(tp->fw_ver);
10993
10994 tp->fw_ver[bcnt++] = ',';
10995 tp->fw_ver[bcnt++] = ' ';
10996
10997 for (i = 0; i < 4; i++) {
Al Virob9fc7dc2007-12-17 22:59:57 -080010998 __le32 v;
10999 if (tg3_nvram_read_le(tp, offset, &v))
Matt Carlson9c8a6202007-10-21 16:16:08 -070011000 return;
11001
Al Virob9fc7dc2007-12-17 22:59:57 -080011002 offset += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011003
Al Virob9fc7dc2007-12-17 22:59:57 -080011004 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11005 memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011006 break;
11007 }
11008
Al Virob9fc7dc2007-12-17 22:59:57 -080011009 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11010 bcnt += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011011 }
11012
11013 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
Michael Chanc4e65752006-03-20 22:29:32 -080011014}
11015
Michael Chan7544b092007-05-05 13:08:32 -070011016static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11017
Linus Torvalds1da177e2005-04-16 15:20:36 -070011018static int __devinit tg3_get_invariants(struct tg3 *tp)
11019{
11020 static struct pci_device_id write_reorder_chipsets[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011021 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11022 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
John W. Linvillec165b002006-07-08 13:28:53 -070011023 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11024 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
Michael Chan399de502005-10-03 14:02:39 -070011025 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11026 PCI_DEVICE_ID_VIA_8385_0) },
Linus Torvalds1da177e2005-04-16 15:20:36 -070011027 { },
11028 };
11029 u32 misc_ctrl_reg;
11030 u32 cacheline_sz_reg;
11031 u32 pci_state_reg, grc_misc_cfg;
11032 u32 val;
11033 u16 pci_cmd;
Michael Chanc7835a72006-11-15 21:14:42 -080011034 int err, pcie_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011035
Linus Torvalds1da177e2005-04-16 15:20:36 -070011036 /* Force memory write invalidate off. If we leave it on,
11037 * then on 5700_BX chips we have to enable a workaround.
11038 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11039 * to match the cacheline size. The Broadcom driver have this
11040 * workaround but turns MWI off all the times so never uses
11041 * it. This seems to suggest that the workaround is insufficient.
11042 */
11043 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11044 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11045 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11046
11047 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11048 * has the register indirect write enable bit set before
11049 * we try to access any of the MMIO registers. It is also
11050 * critical that the PCI-X hw workaround situation is decided
11051 * before that as well.
11052 */
11053 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11054 &misc_ctrl_reg);
11055
11056 tp->pci_chip_rev_id = (misc_ctrl_reg >>
11057 MISC_HOST_CTRL_CHIPREV_SHIFT);
Matt Carlson795d01c2007-10-07 23:28:17 -070011058 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11059 u32 prod_id_asic_rev;
11060
11061 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11062 &prod_id_asic_rev);
11063 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11064 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011065
Michael Chanff645be2005-04-21 17:09:53 -070011066 /* Wrong chip ID in 5752 A0. This code can be removed later
11067 * as A0 is not in production.
11068 */
11069 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11070 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11071
Michael Chan68929142005-08-09 20:17:14 -070011072 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11073 * we need to disable memory and use config. cycles
11074 * only to access all registers. The 5702/03 chips
11075 * can mistakenly decode the special cycles from the
11076 * ICH chipsets as memory write cycles, causing corruption
11077 * of register and memory space. Only certain ICH bridges
11078 * will drive special cycles with non-zero data during the
11079 * address phase which can fall within the 5703's address
11080 * range. This is not an ICH bug as the PCI spec allows
11081 * non-zero address during special cycles. However, only
11082 * these ICH bridges are known to drive non-zero addresses
11083 * during special cycles.
11084 *
11085 * Since special cycles do not cross PCI bridges, we only
11086 * enable this workaround if the 5703 is on the secondary
11087 * bus of these ICH bridges.
11088 */
11089 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11090 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11091 static struct tg3_dev_id {
11092 u32 vendor;
11093 u32 device;
11094 u32 rev;
11095 } ich_chipsets[] = {
11096 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11097 PCI_ANY_ID },
11098 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11099 PCI_ANY_ID },
11100 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11101 0xa },
11102 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11103 PCI_ANY_ID },
11104 { },
11105 };
11106 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11107 struct pci_dev *bridge = NULL;
11108
11109 while (pci_id->vendor != 0) {
11110 bridge = pci_get_device(pci_id->vendor, pci_id->device,
11111 bridge);
11112 if (!bridge) {
11113 pci_id++;
11114 continue;
11115 }
11116 if (pci_id->rev != PCI_ANY_ID) {
Auke Kok44c10132007-06-08 15:46:36 -070011117 if (bridge->revision > pci_id->rev)
Michael Chan68929142005-08-09 20:17:14 -070011118 continue;
11119 }
11120 if (bridge->subordinate &&
11121 (bridge->subordinate->number ==
11122 tp->pdev->bus->number)) {
11123
11124 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11125 pci_dev_put(bridge);
11126 break;
11127 }
11128 }
11129 }
11130
Michael Chan4a29cc22006-03-19 13:21:12 -080011131 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11132 * DMA addresses > 40-bit. This bridge may have other additional
11133 * 57xx devices behind it in some 4-port NIC designs for example.
11134 * Any tg3 device found behind the bridge will also need the 40-bit
11135 * DMA workaround.
11136 */
Michael Chana4e2b342005-10-26 15:46:52 -070011137 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11138 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11139 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
Michael Chan4a29cc22006-03-19 13:21:12 -080011140 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
Michael Chan4cf78e42005-07-25 12:29:19 -070011141 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
Michael Chana4e2b342005-10-26 15:46:52 -070011142 }
Michael Chan4a29cc22006-03-19 13:21:12 -080011143 else {
11144 struct pci_dev *bridge = NULL;
11145
11146 do {
11147 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11148 PCI_DEVICE_ID_SERVERWORKS_EPB,
11149 bridge);
11150 if (bridge && bridge->subordinate &&
11151 (bridge->subordinate->number <=
11152 tp->pdev->bus->number) &&
11153 (bridge->subordinate->subordinate >=
11154 tp->pdev->bus->number)) {
11155 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11156 pci_dev_put(bridge);
11157 break;
11158 }
11159 } while (bridge);
11160 }
Michael Chan4cf78e42005-07-25 12:29:19 -070011161
Linus Torvalds1da177e2005-04-16 15:20:36 -070011162 /* Initialize misc host control in PCI block. */
11163 tp->misc_host_ctrl |= (misc_ctrl_reg &
11164 MISC_HOST_CTRL_CHIPREV);
11165 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11166 tp->misc_host_ctrl);
11167
11168 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11169 &cacheline_sz_reg);
11170
11171 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
11172 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
11173 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
11174 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
11175
Michael Chan7544b092007-05-05 13:08:32 -070011176 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11177 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11178 tp->pdev_peer = tg3_find_peer(tp);
11179
John W. Linville2052da92005-04-21 16:56:08 -070011180 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
Michael Chan4cf78e42005-07-25 12:29:19 -070011181 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -080011182 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chand9ab5ad12006-03-20 22:27:35 -080011183 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070011184 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070011185 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Michael Chanb5d37722006-09-27 16:06:21 -070011186 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
Michael Chana4e2b342005-10-26 15:46:52 -070011187 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
John W. Linville6708e5c2005-04-21 17:00:52 -070011188 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11189
John W. Linville1b440c562005-04-21 17:03:18 -070011190 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11191 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11192 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11193
Michael Chan5a6f3072006-03-20 22:28:05 -080011194 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Michael Chan7544b092007-05-05 13:08:32 -070011195 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11196 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11197 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11198 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11199 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11200 tp->pdev_peer == tp->pdev))
11201 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11202
Michael Chanaf36e6b2006-03-23 01:28:06 -080011203 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chanb5d37722006-09-27 16:06:21 -070011204 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070011205 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070011206 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Michael Chanb5d37722006-09-27 16:06:21 -070011207 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan5a6f3072006-03-20 22:28:05 -080011208 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
Michael Chanfcfa0a32006-03-20 22:28:41 -080011209 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
Michael Chan52c0fd82006-06-29 20:15:54 -070011210 } else {
Michael Chan7f62ad52007-02-20 23:25:40 -080011211 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
Michael Chan52c0fd82006-06-29 20:15:54 -070011212 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11213 ASIC_REV_5750 &&
11214 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
Michael Chan7f62ad52007-02-20 23:25:40 -080011215 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
Michael Chan52c0fd82006-06-29 20:15:54 -070011216 }
Michael Chan5a6f3072006-03-20 22:28:05 -080011217 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011218
Michael Chan0f893dc2005-07-25 12:30:38 -070011219 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
11220 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
Michael Chand9ab5ad12006-03-20 22:27:35 -080011221 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
Michael Chanaf36e6b2006-03-23 01:28:06 -080011222 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
Michael Chanb5d37722006-09-27 16:06:21 -070011223 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
Matt Carlsond30cdd22007-10-07 23:28:35 -070011224 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
Matt Carlson9936bcf2007-10-10 18:03:07 -070011225 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
Michael Chanb5d37722006-09-27 16:06:21 -070011226 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
Michael Chan0f893dc2005-07-25 12:30:38 -070011227 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11228
Michael Chanc7835a72006-11-15 21:14:42 -080011229 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11230 if (pcie_cap != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011231 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
Matt Carlson5f5c51e2007-11-12 21:19:37 -080011232
11233 pcie_set_readrq(tp->pdev, 4096);
11234
Michael Chanc7835a72006-11-15 21:14:42 -080011235 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11236 u16 lnkctl;
11237
11238 pci_read_config_word(tp->pdev,
11239 pcie_cap + PCI_EXP_LNKCTL,
11240 &lnkctl);
11241 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11242 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11243 }
11244 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011245
Michael Chan399de502005-10-03 14:02:39 -070011246 /* If we have an AMD 762 or VIA K8T800 chipset, write
11247 * reordering to the mailbox registers done by the host
11248 * controller can cause major troubles. We read back from
11249 * every mailbox register write to force the writes to be
11250 * posted to the chip in order.
11251 */
11252 if (pci_dev_present(write_reorder_chipsets) &&
11253 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11254 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11255
Linus Torvalds1da177e2005-04-16 15:20:36 -070011256 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11257 tp->pci_lat_timer < 64) {
11258 tp->pci_lat_timer = 64;
11259
11260 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
11261 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
11262 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
11263 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
11264
11265 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11266 cacheline_sz_reg);
11267 }
11268
Matt Carlson9974a352007-10-07 23:27:28 -070011269 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11270 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11271 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11272 if (!tp->pcix_cap) {
11273 printk(KERN_ERR PFX "Cannot find PCI-X "
11274 "capability, aborting.\n");
11275 return -EIO;
11276 }
11277 }
11278
Linus Torvalds1da177e2005-04-16 15:20:36 -070011279 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11280 &pci_state_reg);
11281
Matt Carlson9974a352007-10-07 23:27:28 -070011282 if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011283 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11284
11285 /* If this is a 5700 BX chipset, and we are in PCI-X
11286 * mode, enable register write workaround.
11287 *
11288 * The workaround is to use indirect register accesses
11289 * for all chip writes not to mailbox registers.
11290 */
11291 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11292 u32 pm_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011293
11294 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11295
11296 /* The chip can have it's power management PCI config
11297 * space registers clobbered due to this bug.
11298 * So explicitly force the chip into D0 here.
11299 */
Matt Carlson9974a352007-10-07 23:27:28 -070011300 pci_read_config_dword(tp->pdev,
11301 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070011302 &pm_reg);
11303 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11304 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
Matt Carlson9974a352007-10-07 23:27:28 -070011305 pci_write_config_dword(tp->pdev,
11306 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070011307 pm_reg);
11308
11309 /* Also, force SERR#/PERR# in PCI command. */
11310 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11311 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11312 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11313 }
11314 }
11315
Michael Chan087fe252005-08-09 20:17:41 -070011316 /* 5700 BX chips need to have their TX producer index mailboxes
11317 * written twice to workaround a bug.
11318 */
11319 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11320 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11321
Linus Torvalds1da177e2005-04-16 15:20:36 -070011322 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11323 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11324 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11325 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11326
11327 /* Chip-specific fixup from Broadcom driver */
11328 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11329 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11330 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11331 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11332 }
11333
Michael Chan1ee582d2005-08-09 20:16:46 -070011334 /* Default fast path register access methods */
Michael Chan20094932005-08-09 20:16:32 -070011335 tp->read32 = tg3_read32;
Michael Chan1ee582d2005-08-09 20:16:46 -070011336 tp->write32 = tg3_write32;
Michael Chan09ee9292005-08-09 20:17:00 -070011337 tp->read32_mbox = tg3_read32;
Michael Chan20094932005-08-09 20:16:32 -070011338 tp->write32_mbox = tg3_write32;
Michael Chan1ee582d2005-08-09 20:16:46 -070011339 tp->write32_tx_mbox = tg3_write32;
11340 tp->write32_rx_mbox = tg3_write32;
11341
11342 /* Various workaround register access methods */
11343 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11344 tp->write32 = tg3_write_indirect_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070011345 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11346 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11347 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11348 /*
11349 * Back to back register writes can cause problems on these
11350 * chips, the workaround is to read back all reg writes
11351 * except those to mailbox regs.
11352 *
11353 * See tg3_write_indirect_reg32().
11354 */
Michael Chan1ee582d2005-08-09 20:16:46 -070011355 tp->write32 = tg3_write_flush_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070011356 }
11357
Michael Chan1ee582d2005-08-09 20:16:46 -070011358
11359 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11360 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11361 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11362 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11363 tp->write32_rx_mbox = tg3_write_flush_reg32;
11364 }
Michael Chan20094932005-08-09 20:16:32 -070011365
Michael Chan68929142005-08-09 20:17:14 -070011366 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11367 tp->read32 = tg3_read_indirect_reg32;
11368 tp->write32 = tg3_write_indirect_reg32;
11369 tp->read32_mbox = tg3_read_indirect_mbox;
11370 tp->write32_mbox = tg3_write_indirect_mbox;
11371 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11372 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11373
11374 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070011375 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070011376
11377 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11378 pci_cmd &= ~PCI_COMMAND_MEMORY;
11379 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11380 }
Michael Chanb5d37722006-09-27 16:06:21 -070011381 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11382 tp->read32_mbox = tg3_read32_mbox_5906;
11383 tp->write32_mbox = tg3_write32_mbox_5906;
11384 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11385 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11386 }
Michael Chan68929142005-08-09 20:17:14 -070011387
Michael Chanbbadf502006-04-06 21:46:34 -070011388 if (tp->write32 == tg3_write_indirect_reg32 ||
11389 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11390 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
David S. Millerf49639e2006-06-09 11:58:36 -070011391 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
Michael Chanbbadf502006-04-06 21:46:34 -070011392 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11393
Michael Chan7d0c41e2005-04-21 17:06:20 -070011394 /* Get eeprom hw config before calling tg3_set_power_state().
Michael Chan9d26e212006-12-07 00:21:14 -080011395 * In particular, the TG3_FLG2_IS_NIC flag must be
Michael Chan7d0c41e2005-04-21 17:06:20 -070011396 * determined before calling tg3_set_power_state() so that
11397 * we know whether or not to switch out of Vaux power.
11398 * When the flag is set, it means that GPIO1 is used for eeprom
11399 * write protect and also implies that it is a LOM where GPIOs
11400 * are not used to switch power.
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011401 */
Michael Chan7d0c41e2005-04-21 17:06:20 -070011402 tg3_get_eeprom_hw_cfg(tp);
11403
Matt Carlson0d3031d2007-10-10 18:02:43 -070011404 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11405 /* Allow reads and writes to the
11406 * APE register and memory space.
11407 */
11408 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11409 PCISTATE_ALLOW_APE_SHMEM_WR;
11410 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11411 pci_state_reg);
11412 }
11413
Matt Carlson9936bcf2007-10-10 18:03:07 -070011414 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlsonb5af7122007-11-12 21:22:02 -080011415 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
Matt Carlsond30cdd22007-10-07 23:28:35 -070011416 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11417
Matt Carlsonb5af7122007-11-12 21:22:02 -080011418 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
11419 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
11420 tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
11421 tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
11422 tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
11423 }
11424
Michael Chan314fba32005-04-21 17:07:04 -070011425 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11426 * GPIO1 driven high will bring 5700's external PHY out of reset.
11427 * It is also used as eeprom write protect on LOMs.
11428 */
11429 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11430 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11431 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11432 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11433 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan3e7d83b2005-04-21 17:10:36 -070011434 /* Unused GPIO3 must be driven as output on 5752 because there
11435 * are no pull-up resistors on unused GPIO pins.
11436 */
11437 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11438 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chan314fba32005-04-21 17:07:04 -070011439
Michael Chanaf36e6b2006-03-23 01:28:06 -080011440 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11441 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11442
Linus Torvalds1da177e2005-04-16 15:20:36 -070011443 /* Force the chip into D0. */
Michael Chanbc1c7562006-03-20 17:48:03 -080011444 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011445 if (err) {
11446 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11447 pci_name(tp->pdev));
11448 return err;
11449 }
11450
11451 /* 5700 B0 chips do not support checksumming correctly due
11452 * to hardware bugs.
11453 */
11454 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11455 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11456
Linus Torvalds1da177e2005-04-16 15:20:36 -070011457 /* Derive initial jumbo mode from MTU assigned in
11458 * ether_setup() via the alloc_etherdev() call
11459 */
Michael Chan0f893dc2005-07-25 12:30:38 -070011460 if (tp->dev->mtu > ETH_DATA_LEN &&
Michael Chana4e2b342005-10-26 15:46:52 -070011461 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan0f893dc2005-07-25 12:30:38 -070011462 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011463
11464 /* Determine WakeOnLan speed to use. */
11465 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11466 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11467 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11468 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11469 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11470 } else {
11471 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11472 }
11473
11474 /* A few boards don't want Ethernet@WireSpeed phy feature */
11475 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11476 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11477 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
Michael Chan747e8f82005-07-25 12:33:22 -070011478 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
Michael Chanb5d37722006-09-27 16:06:21 -070011479 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
Michael Chan747e8f82005-07-25 12:33:22 -070011480 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011481 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11482
11483 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11484 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11485 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11486 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11487 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11488
Michael Chanc424cb22006-04-29 18:56:34 -070011489 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11490 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070011491 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070011492 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11493 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
Michael Chand4011ad2007-02-13 12:17:25 -080011494 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11495 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11496 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
Michael Chanc1d2a192007-01-08 19:57:20 -080011497 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11498 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11499 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
Michael Chanc424cb22006-04-29 18:56:34 -070011500 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11501 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011502
Linus Torvalds1da177e2005-04-16 15:20:36 -070011503 tp->coalesce_mode = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011504 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11505 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11506 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11507
11508 /* Initialize MAC MI mode, polling disabled. */
11509 tw32_f(MAC_MI_MODE, tp->mi_mode);
11510 udelay(80);
11511
11512 /* Initialize data/descriptor byte/word swapping. */
11513 val = tr32(GRC_MODE);
11514 val &= GRC_MODE_HOST_STACKUP;
11515 tw32(GRC_MODE, val | tp->grc_mode);
11516
11517 tg3_switch_clocks(tp);
11518
11519 /* Clear this out for sanity. */
11520 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11521
11522 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11523 &pci_state_reg);
11524 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11525 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11526 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11527
11528 if (chiprevid == CHIPREV_ID_5701_A0 ||
11529 chiprevid == CHIPREV_ID_5701_B0 ||
11530 chiprevid == CHIPREV_ID_5701_B2 ||
11531 chiprevid == CHIPREV_ID_5701_B5) {
11532 void __iomem *sram_base;
11533
11534 /* Write some dummy words into the SRAM status block
11535 * area, see if it reads back correctly. If the return
11536 * value is bad, force enable the PCIX workaround.
11537 */
11538 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11539
11540 writel(0x00000000, sram_base);
11541 writel(0x00000000, sram_base + 4);
11542 writel(0xffffffff, sram_base + 4);
11543 if (readl(sram_base) != 0x00000000)
11544 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11545 }
11546 }
11547
11548 udelay(50);
11549 tg3_nvram_init(tp);
11550
11551 grc_misc_cfg = tr32(GRC_MISC_CFG);
11552 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11553
Linus Torvalds1da177e2005-04-16 15:20:36 -070011554 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11555 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11556 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11557 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11558
David S. Millerfac9b832005-05-18 22:46:34 -070011559 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11560 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11561 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11562 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11563 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11564 HOSTCC_MODE_CLRTICK_TXBD);
11565
11566 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11567 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11568 tp->misc_host_ctrl);
11569 }
11570
Linus Torvalds1da177e2005-04-16 15:20:36 -070011571 /* these are limited to 10/100 only */
11572 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11573 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11574 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11575 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11576 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11577 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11578 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11579 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11580 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
Michael Chan676917d2006-12-07 00:20:22 -080011581 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11582 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
Michael Chanb5d37722006-09-27 16:06:21 -070011583 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011584 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11585
11586 err = tg3_phy_probe(tp);
11587 if (err) {
11588 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11589 pci_name(tp->pdev), err);
11590 /* ... but do not return immediately ... */
11591 }
11592
11593 tg3_read_partno(tp);
Michael Chanc4e65752006-03-20 22:29:32 -080011594 tg3_read_fw_ver(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011595
11596 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11597 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11598 } else {
11599 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11600 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11601 else
11602 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11603 }
11604
11605 /* 5700 {AX,BX} chips have a broken status block link
11606 * change bit implementation, so we must use the
11607 * status register in those cases.
11608 */
11609 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11610 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11611 else
11612 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11613
11614 /* The led_ctrl is set during tg3_phy_probe, here we might
11615 * have to force the link status polling mechanism based
11616 * upon subsystem IDs.
11617 */
11618 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
Michael Chan007a880d2007-05-31 14:49:51 -070011619 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070011620 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11621 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11622 TG3_FLAG_USE_LINKCHG_REG);
11623 }
11624
11625 /* For all SERDES we poll the MAC status register. */
11626 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11627 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11628 else
11629 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11630
Michael Chan5a6f3072006-03-20 22:28:05 -080011631 /* All chips before 5787 can get confused if TX buffers
Linus Torvalds1da177e2005-04-16 15:20:36 -070011632 * straddle the 4GB address boundary in some cases.
11633 */
Michael Chanaf36e6b2006-03-23 01:28:06 -080011634 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chanb5d37722006-09-27 16:06:21 -070011635 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070011636 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070011637 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Michael Chanb5d37722006-09-27 16:06:21 -070011638 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Michael Chan5a6f3072006-03-20 22:28:05 -080011639 tp->dev->hard_start_xmit = tg3_start_xmit;
11640 else
11641 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011642
11643 tp->rx_offset = 2;
11644 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11645 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11646 tp->rx_offset = 0;
11647
Michael Chanf92905d2006-06-29 20:14:29 -070011648 tp->rx_std_max_post = TG3_RX_RING_SIZE;
11649
11650 /* Increment the rx prod index on the rx std ring by at most
11651 * 8 for these chips to workaround hw errata.
11652 */
11653 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11654 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11655 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11656 tp->rx_std_max_post = 8;
11657
Matt Carlson8ed5d972007-05-07 00:25:49 -070011658 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11659 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11660 PCIE_PWR_MGMT_L1_THRESH_MSK;
11661
Linus Torvalds1da177e2005-04-16 15:20:36 -070011662 return err;
11663}
11664
David S. Miller49b6e95f2007-03-29 01:38:42 -070011665#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070011666static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11667{
11668 struct net_device *dev = tp->dev;
11669 struct pci_dev *pdev = tp->pdev;
David S. Miller49b6e95f2007-03-29 01:38:42 -070011670 struct device_node *dp = pci_device_to_OF_node(pdev);
David S. Miller374d4ca2007-03-29 01:57:57 -070011671 const unsigned char *addr;
David S. Miller49b6e95f2007-03-29 01:38:42 -070011672 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011673
David S. Miller49b6e95f2007-03-29 01:38:42 -070011674 addr = of_get_property(dp, "local-mac-address", &len);
11675 if (addr && len == 6) {
11676 memcpy(dev->dev_addr, addr, 6);
11677 memcpy(dev->perm_addr, dev->dev_addr, 6);
11678 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011679 }
11680 return -ENODEV;
11681}
11682
11683static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11684{
11685 struct net_device *dev = tp->dev;
11686
11687 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
John W. Linville2ff43692005-09-12 14:44:20 -070011688 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011689 return 0;
11690}
11691#endif
11692
11693static int __devinit tg3_get_device_address(struct tg3 *tp)
11694{
11695 struct net_device *dev = tp->dev;
11696 u32 hi, lo, mac_offset;
Michael Chan008652b2006-03-27 23:14:53 -080011697 int addr_ok = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011698
David S. Miller49b6e95f2007-03-29 01:38:42 -070011699#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070011700 if (!tg3_get_macaddr_sparc(tp))
11701 return 0;
11702#endif
11703
11704 mac_offset = 0x7c;
David S. Millerf49639e2006-06-09 11:58:36 -070011705 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
Michael Chana4e2b342005-10-26 15:46:52 -070011706 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011707 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11708 mac_offset = 0xcc;
11709 if (tg3_nvram_lock(tp))
11710 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11711 else
11712 tg3_nvram_unlock(tp);
11713 }
Michael Chanb5d37722006-09-27 16:06:21 -070011714 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11715 mac_offset = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011716
11717 /* First try to get it from MAC address mailbox. */
11718 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11719 if ((hi >> 16) == 0x484b) {
11720 dev->dev_addr[0] = (hi >> 8) & 0xff;
11721 dev->dev_addr[1] = (hi >> 0) & 0xff;
11722
11723 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11724 dev->dev_addr[2] = (lo >> 24) & 0xff;
11725 dev->dev_addr[3] = (lo >> 16) & 0xff;
11726 dev->dev_addr[4] = (lo >> 8) & 0xff;
11727 dev->dev_addr[5] = (lo >> 0) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011728
Michael Chan008652b2006-03-27 23:14:53 -080011729 /* Some old bootcode may report a 0 MAC address in SRAM */
11730 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11731 }
11732 if (!addr_ok) {
11733 /* Next, try NVRAM. */
David S. Millerf49639e2006-06-09 11:58:36 -070011734 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
Michael Chan008652b2006-03-27 23:14:53 -080011735 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11736 dev->dev_addr[0] = ((hi >> 16) & 0xff);
11737 dev->dev_addr[1] = ((hi >> 24) & 0xff);
11738 dev->dev_addr[2] = ((lo >> 0) & 0xff);
11739 dev->dev_addr[3] = ((lo >> 8) & 0xff);
11740 dev->dev_addr[4] = ((lo >> 16) & 0xff);
11741 dev->dev_addr[5] = ((lo >> 24) & 0xff);
11742 }
11743 /* Finally just fetch it out of the MAC control regs. */
11744 else {
11745 hi = tr32(MAC_ADDR_0_HIGH);
11746 lo = tr32(MAC_ADDR_0_LOW);
11747
11748 dev->dev_addr[5] = lo & 0xff;
11749 dev->dev_addr[4] = (lo >> 8) & 0xff;
11750 dev->dev_addr[3] = (lo >> 16) & 0xff;
11751 dev->dev_addr[2] = (lo >> 24) & 0xff;
11752 dev->dev_addr[1] = hi & 0xff;
11753 dev->dev_addr[0] = (hi >> 8) & 0xff;
11754 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011755 }
11756
11757 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11758#ifdef CONFIG_SPARC64
11759 if (!tg3_get_default_macaddr_sparc(tp))
11760 return 0;
11761#endif
11762 return -EINVAL;
11763 }
John W. Linville2ff43692005-09-12 14:44:20 -070011764 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011765 return 0;
11766}
11767
David S. Miller59e6b432005-05-18 22:50:10 -070011768#define BOUNDARY_SINGLE_CACHELINE 1
11769#define BOUNDARY_MULTI_CACHELINE 2
11770
11771static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11772{
11773 int cacheline_size;
11774 u8 byte;
11775 int goal;
11776
11777 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11778 if (byte == 0)
11779 cacheline_size = 1024;
11780 else
11781 cacheline_size = (int) byte * 4;
11782
11783 /* On 5703 and later chips, the boundary bits have no
11784 * effect.
11785 */
11786 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11787 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11788 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11789 goto out;
11790
11791#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11792 goal = BOUNDARY_MULTI_CACHELINE;
11793#else
11794#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11795 goal = BOUNDARY_SINGLE_CACHELINE;
11796#else
11797 goal = 0;
11798#endif
11799#endif
11800
11801 if (!goal)
11802 goto out;
11803
11804 /* PCI controllers on most RISC systems tend to disconnect
11805 * when a device tries to burst across a cache-line boundary.
11806 * Therefore, letting tg3 do so just wastes PCI bandwidth.
11807 *
11808 * Unfortunately, for PCI-E there are only limited
11809 * write-side controls for this, and thus for reads
11810 * we will still get the disconnects. We'll also waste
11811 * these PCI cycles for both read and write for chips
11812 * other than 5700 and 5701 which do not implement the
11813 * boundary bits.
11814 */
11815 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11816 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11817 switch (cacheline_size) {
11818 case 16:
11819 case 32:
11820 case 64:
11821 case 128:
11822 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11823 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11824 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11825 } else {
11826 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11827 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11828 }
11829 break;
11830
11831 case 256:
11832 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11833 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11834 break;
11835
11836 default:
11837 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11838 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11839 break;
11840 };
11841 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11842 switch (cacheline_size) {
11843 case 16:
11844 case 32:
11845 case 64:
11846 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11847 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11848 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11849 break;
11850 }
11851 /* fallthrough */
11852 case 128:
11853 default:
11854 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11855 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11856 break;
11857 };
11858 } else {
11859 switch (cacheline_size) {
11860 case 16:
11861 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11862 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11863 DMA_RWCTRL_WRITE_BNDRY_16);
11864 break;
11865 }
11866 /* fallthrough */
11867 case 32:
11868 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11869 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11870 DMA_RWCTRL_WRITE_BNDRY_32);
11871 break;
11872 }
11873 /* fallthrough */
11874 case 64:
11875 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11876 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11877 DMA_RWCTRL_WRITE_BNDRY_64);
11878 break;
11879 }
11880 /* fallthrough */
11881 case 128:
11882 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11883 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11884 DMA_RWCTRL_WRITE_BNDRY_128);
11885 break;
11886 }
11887 /* fallthrough */
11888 case 256:
11889 val |= (DMA_RWCTRL_READ_BNDRY_256 |
11890 DMA_RWCTRL_WRITE_BNDRY_256);
11891 break;
11892 case 512:
11893 val |= (DMA_RWCTRL_READ_BNDRY_512 |
11894 DMA_RWCTRL_WRITE_BNDRY_512);
11895 break;
11896 case 1024:
11897 default:
11898 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11899 DMA_RWCTRL_WRITE_BNDRY_1024);
11900 break;
11901 };
11902 }
11903
11904out:
11905 return val;
11906}
11907
Linus Torvalds1da177e2005-04-16 15:20:36 -070011908static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11909{
11910 struct tg3_internal_buffer_desc test_desc;
11911 u32 sram_dma_descs;
11912 int i, ret;
11913
11914 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11915
11916 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11917 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11918 tw32(RDMAC_STATUS, 0);
11919 tw32(WDMAC_STATUS, 0);
11920
11921 tw32(BUFMGR_MODE, 0);
11922 tw32(FTQ_RESET, 0);
11923
11924 test_desc.addr_hi = ((u64) buf_dma) >> 32;
11925 test_desc.addr_lo = buf_dma & 0xffffffff;
11926 test_desc.nic_mbuf = 0x00002100;
11927 test_desc.len = size;
11928
11929 /*
11930 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11931 * the *second* time the tg3 driver was getting loaded after an
11932 * initial scan.
11933 *
11934 * Broadcom tells me:
11935 * ...the DMA engine is connected to the GRC block and a DMA
11936 * reset may affect the GRC block in some unpredictable way...
11937 * The behavior of resets to individual blocks has not been tested.
11938 *
11939 * Broadcom noted the GRC reset will also reset all sub-components.
11940 */
11941 if (to_device) {
11942 test_desc.cqid_sqid = (13 << 8) | 2;
11943
11944 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11945 udelay(40);
11946 } else {
11947 test_desc.cqid_sqid = (16 << 8) | 7;
11948
11949 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11950 udelay(40);
11951 }
11952 test_desc.flags = 0x00000005;
11953
11954 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11955 u32 val;
11956
11957 val = *(((u32 *)&test_desc) + i);
11958 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11959 sram_dma_descs + (i * sizeof(u32)));
11960 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11961 }
11962 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11963
11964 if (to_device) {
11965 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11966 } else {
11967 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11968 }
11969
11970 ret = -ENODEV;
11971 for (i = 0; i < 40; i++) {
11972 u32 val;
11973
11974 if (to_device)
11975 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11976 else
11977 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11978 if ((val & 0xffff) == sram_dma_descs) {
11979 ret = 0;
11980 break;
11981 }
11982
11983 udelay(100);
11984 }
11985
11986 return ret;
11987}
11988
David S. Millerded73402005-05-23 13:59:47 -070011989#define TEST_BUFFER_SIZE 0x2000
Linus Torvalds1da177e2005-04-16 15:20:36 -070011990
11991static int __devinit tg3_test_dma(struct tg3 *tp)
11992{
11993 dma_addr_t buf_dma;
David S. Miller59e6b432005-05-18 22:50:10 -070011994 u32 *buf, saved_dma_rwctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011995 int ret;
11996
11997 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11998 if (!buf) {
11999 ret = -ENOMEM;
12000 goto out_nofree;
12001 }
12002
12003 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12004 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12005
David S. Miller59e6b432005-05-18 22:50:10 -070012006 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012007
12008 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12009 /* DMA read watermark not used on PCIE */
12010 tp->dma_rwctrl |= 0x00180000;
12011 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
Michael Chan85e94ce2005-04-21 17:05:28 -070012012 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12013 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012014 tp->dma_rwctrl |= 0x003f0000;
12015 else
12016 tp->dma_rwctrl |= 0x003f000f;
12017 } else {
12018 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12019 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12020 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
Michael Chan49afdeb2007-02-13 12:17:03 -080012021 u32 read_water = 0x7;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012022
Michael Chan4a29cc22006-03-19 13:21:12 -080012023 /* If the 5704 is behind the EPB bridge, we can
12024 * do the less restrictive ONE_DMA workaround for
12025 * better performance.
12026 */
12027 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12028 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12029 tp->dma_rwctrl |= 0x8000;
12030 else if (ccval == 0x6 || ccval == 0x7)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012031 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12032
Michael Chan49afdeb2007-02-13 12:17:03 -080012033 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12034 read_water = 4;
David S. Miller59e6b432005-05-18 22:50:10 -070012035 /* Set bit 23 to enable PCIX hw bug fix */
Michael Chan49afdeb2007-02-13 12:17:03 -080012036 tp->dma_rwctrl |=
12037 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12038 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12039 (1 << 23);
Michael Chan4cf78e42005-07-25 12:29:19 -070012040 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12041 /* 5780 always in PCIX mode */
12042 tp->dma_rwctrl |= 0x00144000;
Michael Chana4e2b342005-10-26 15:46:52 -070012043 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12044 /* 5714 always in PCIX mode */
12045 tp->dma_rwctrl |= 0x00148000;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012046 } else {
12047 tp->dma_rwctrl |= 0x001b000f;
12048 }
12049 }
12050
12051 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12052 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12053 tp->dma_rwctrl &= 0xfffffff0;
12054
12055 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12056 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12057 /* Remove this if it causes problems for some boards. */
12058 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12059
12060 /* On 5700/5701 chips, we need to set this bit.
12061 * Otherwise the chip will issue cacheline transactions
12062 * to streamable DMA memory with not all the byte
12063 * enables turned on. This is an error on several
12064 * RISC PCI controllers, in particular sparc64.
12065 *
12066 * On 5703/5704 chips, this bit has been reassigned
12067 * a different meaning. In particular, it is used
12068 * on those chips to enable a PCI-X workaround.
12069 */
12070 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12071 }
12072
12073 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12074
12075#if 0
12076 /* Unneeded, already done by tg3_get_invariants. */
12077 tg3_switch_clocks(tp);
12078#endif
12079
12080 ret = 0;
12081 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12082 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12083 goto out;
12084
David S. Miller59e6b432005-05-18 22:50:10 -070012085 /* It is best to perform DMA test with maximum write burst size
12086 * to expose the 5700/5701 write DMA bug.
12087 */
12088 saved_dma_rwctrl = tp->dma_rwctrl;
12089 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12090 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12091
Linus Torvalds1da177e2005-04-16 15:20:36 -070012092 while (1) {
12093 u32 *p = buf, i;
12094
12095 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12096 p[i] = i;
12097
12098 /* Send the buffer to the chip. */
12099 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12100 if (ret) {
12101 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12102 break;
12103 }
12104
12105#if 0
12106 /* validate data reached card RAM correctly. */
12107 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12108 u32 val;
12109 tg3_read_mem(tp, 0x2100 + (i*4), &val);
12110 if (le32_to_cpu(val) != p[i]) {
12111 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
12112 /* ret = -ENODEV here? */
12113 }
12114 p[i] = 0;
12115 }
12116#endif
12117 /* Now read it back. */
12118 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12119 if (ret) {
12120 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12121
12122 break;
12123 }
12124
12125 /* Verify it. */
12126 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12127 if (p[i] == i)
12128 continue;
12129
David S. Miller59e6b432005-05-18 22:50:10 -070012130 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12131 DMA_RWCTRL_WRITE_BNDRY_16) {
12132 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012133 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12134 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12135 break;
12136 } else {
12137 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12138 ret = -ENODEV;
12139 goto out;
12140 }
12141 }
12142
12143 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12144 /* Success. */
12145 ret = 0;
12146 break;
12147 }
12148 }
David S. Miller59e6b432005-05-18 22:50:10 -070012149 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12150 DMA_RWCTRL_WRITE_BNDRY_16) {
Michael Chan6d1cfba2005-06-08 14:13:14 -070012151 static struct pci_device_id dma_wait_state_chipsets[] = {
12152 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12153 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12154 { },
12155 };
12156
David S. Miller59e6b432005-05-18 22:50:10 -070012157 /* DMA test passed without adjusting DMA boundary,
Michael Chan6d1cfba2005-06-08 14:13:14 -070012158 * now look for chipsets that are known to expose the
12159 * DMA bug without failing the test.
David S. Miller59e6b432005-05-18 22:50:10 -070012160 */
Michael Chan6d1cfba2005-06-08 14:13:14 -070012161 if (pci_dev_present(dma_wait_state_chipsets)) {
12162 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12163 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12164 }
12165 else
12166 /* Safe to use the calculated DMA boundary. */
12167 tp->dma_rwctrl = saved_dma_rwctrl;
12168
David S. Miller59e6b432005-05-18 22:50:10 -070012169 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12170 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012171
12172out:
12173 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12174out_nofree:
12175 return ret;
12176}
12177
12178static void __devinit tg3_init_link_config(struct tg3 *tp)
12179{
12180 tp->link_config.advertising =
12181 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12182 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12183 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12184 ADVERTISED_Autoneg | ADVERTISED_MII);
12185 tp->link_config.speed = SPEED_INVALID;
12186 tp->link_config.duplex = DUPLEX_INVALID;
12187 tp->link_config.autoneg = AUTONEG_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012188 tp->link_config.active_speed = SPEED_INVALID;
12189 tp->link_config.active_duplex = DUPLEX_INVALID;
12190 tp->link_config.phy_is_low_power = 0;
12191 tp->link_config.orig_speed = SPEED_INVALID;
12192 tp->link_config.orig_duplex = DUPLEX_INVALID;
12193 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12194}
12195
12196static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12197{
Michael Chanfdfec1722005-07-25 12:31:48 -070012198 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12199 tp->bufmgr_config.mbuf_read_dma_low_water =
12200 DEFAULT_MB_RDMA_LOW_WATER_5705;
12201 tp->bufmgr_config.mbuf_mac_rx_low_water =
12202 DEFAULT_MB_MACRX_LOW_WATER_5705;
12203 tp->bufmgr_config.mbuf_high_water =
12204 DEFAULT_MB_HIGH_WATER_5705;
Michael Chanb5d37722006-09-27 16:06:21 -070012205 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12206 tp->bufmgr_config.mbuf_mac_rx_low_water =
12207 DEFAULT_MB_MACRX_LOW_WATER_5906;
12208 tp->bufmgr_config.mbuf_high_water =
12209 DEFAULT_MB_HIGH_WATER_5906;
12210 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012211
Michael Chanfdfec1722005-07-25 12:31:48 -070012212 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12213 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12214 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12215 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12216 tp->bufmgr_config.mbuf_high_water_jumbo =
12217 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12218 } else {
12219 tp->bufmgr_config.mbuf_read_dma_low_water =
12220 DEFAULT_MB_RDMA_LOW_WATER;
12221 tp->bufmgr_config.mbuf_mac_rx_low_water =
12222 DEFAULT_MB_MACRX_LOW_WATER;
12223 tp->bufmgr_config.mbuf_high_water =
12224 DEFAULT_MB_HIGH_WATER;
12225
12226 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12227 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12228 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12229 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12230 tp->bufmgr_config.mbuf_high_water_jumbo =
12231 DEFAULT_MB_HIGH_WATER_JUMBO;
12232 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012233
12234 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12235 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12236}
12237
12238static char * __devinit tg3_phy_string(struct tg3 *tp)
12239{
12240 switch (tp->phy_id & PHY_ID_MASK) {
12241 case PHY_ID_BCM5400: return "5400";
12242 case PHY_ID_BCM5401: return "5401";
12243 case PHY_ID_BCM5411: return "5411";
12244 case PHY_ID_BCM5701: return "5701";
12245 case PHY_ID_BCM5703: return "5703";
12246 case PHY_ID_BCM5704: return "5704";
12247 case PHY_ID_BCM5705: return "5705";
12248 case PHY_ID_BCM5750: return "5750";
Michael Chan85e94ce2005-04-21 17:05:28 -070012249 case PHY_ID_BCM5752: return "5752";
Michael Chana4e2b342005-10-26 15:46:52 -070012250 case PHY_ID_BCM5714: return "5714";
Michael Chan4cf78e42005-07-25 12:29:19 -070012251 case PHY_ID_BCM5780: return "5780";
Michael Chanaf36e6b2006-03-23 01:28:06 -080012252 case PHY_ID_BCM5755: return "5755";
Michael Chand9ab5ad12006-03-20 22:27:35 -080012253 case PHY_ID_BCM5787: return "5787";
Matt Carlsond30cdd22007-10-07 23:28:35 -070012254 case PHY_ID_BCM5784: return "5784";
Michael Chan126a3362006-09-27 16:03:07 -070012255 case PHY_ID_BCM5756: return "5722/5756";
Michael Chanb5d37722006-09-27 16:06:21 -070012256 case PHY_ID_BCM5906: return "5906";
Matt Carlson9936bcf2007-10-10 18:03:07 -070012257 case PHY_ID_BCM5761: return "5761";
Linus Torvalds1da177e2005-04-16 15:20:36 -070012258 case PHY_ID_BCM8002: return "8002/serdes";
12259 case 0: return "serdes";
12260 default: return "unknown";
12261 };
12262}
12263
Michael Chanf9804dd2005-09-27 12:13:10 -070012264static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12265{
12266 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12267 strcpy(str, "PCI Express");
12268 return str;
12269 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12270 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12271
12272 strcpy(str, "PCIX:");
12273
12274 if ((clock_ctrl == 7) ||
12275 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12276 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12277 strcat(str, "133MHz");
12278 else if (clock_ctrl == 0)
12279 strcat(str, "33MHz");
12280 else if (clock_ctrl == 2)
12281 strcat(str, "50MHz");
12282 else if (clock_ctrl == 4)
12283 strcat(str, "66MHz");
12284 else if (clock_ctrl == 6)
12285 strcat(str, "100MHz");
Michael Chanf9804dd2005-09-27 12:13:10 -070012286 } else {
12287 strcpy(str, "PCI:");
12288 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12289 strcat(str, "66MHz");
12290 else
12291 strcat(str, "33MHz");
12292 }
12293 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12294 strcat(str, ":32-bit");
12295 else
12296 strcat(str, ":64-bit");
12297 return str;
12298}
12299
Michael Chan8c2dc7e2005-12-19 16:26:02 -080012300static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012301{
12302 struct pci_dev *peer;
12303 unsigned int func, devnr = tp->pdev->devfn & ~7;
12304
12305 for (func = 0; func < 8; func++) {
12306 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12307 if (peer && peer != tp->pdev)
12308 break;
12309 pci_dev_put(peer);
12310 }
Michael Chan16fe9d72005-12-13 21:09:54 -080012311 /* 5704 can be configured in single-port mode, set peer to
12312 * tp->pdev in that case.
12313 */
12314 if (!peer) {
12315 peer = tp->pdev;
12316 return peer;
12317 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012318
12319 /*
12320 * We don't need to keep the refcount elevated; there's no way
12321 * to remove one half of this device without removing the other
12322 */
12323 pci_dev_put(peer);
12324
12325 return peer;
12326}
12327
David S. Miller15f98502005-05-18 22:49:26 -070012328static void __devinit tg3_init_coal(struct tg3 *tp)
12329{
12330 struct ethtool_coalesce *ec = &tp->coal;
12331
12332 memset(ec, 0, sizeof(*ec));
12333 ec->cmd = ETHTOOL_GCOALESCE;
12334 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12335 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12336 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12337 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12338 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12339 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12340 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12341 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12342 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12343
12344 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12345 HOSTCC_MODE_CLRTICK_TXBD)) {
12346 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12347 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12348 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12349 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12350 }
Michael Chand244c892005-07-05 14:42:33 -070012351
12352 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12353 ec->rx_coalesce_usecs_irq = 0;
12354 ec->tx_coalesce_usecs_irq = 0;
12355 ec->stats_block_coalesce_usecs = 0;
12356 }
David S. Miller15f98502005-05-18 22:49:26 -070012357}
12358
Linus Torvalds1da177e2005-04-16 15:20:36 -070012359static int __devinit tg3_init_one(struct pci_dev *pdev,
12360 const struct pci_device_id *ent)
12361{
12362 static int tg3_version_printed = 0;
12363 unsigned long tg3reg_base, tg3reg_len;
12364 struct net_device *dev;
12365 struct tg3 *tp;
Joe Perchesd6645372007-12-20 04:06:59 -080012366 int err, pm_cap;
Michael Chanf9804dd2005-09-27 12:13:10 -070012367 char str[40];
Michael Chan72f2afb2006-03-06 19:28:35 -080012368 u64 dma_mask, persist_dma_mask;
Joe Perchesd6645372007-12-20 04:06:59 -080012369 DECLARE_MAC_BUF(mac);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012370
12371 if (tg3_version_printed++ == 0)
12372 printk(KERN_INFO "%s", version);
12373
12374 err = pci_enable_device(pdev);
12375 if (err) {
12376 printk(KERN_ERR PFX "Cannot enable PCI device, "
12377 "aborting.\n");
12378 return err;
12379 }
12380
12381 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12382 printk(KERN_ERR PFX "Cannot find proper PCI device "
12383 "base address, aborting.\n");
12384 err = -ENODEV;
12385 goto err_out_disable_pdev;
12386 }
12387
12388 err = pci_request_regions(pdev, DRV_MODULE_NAME);
12389 if (err) {
12390 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12391 "aborting.\n");
12392 goto err_out_disable_pdev;
12393 }
12394
12395 pci_set_master(pdev);
12396
12397 /* Find power-management capability. */
12398 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12399 if (pm_cap == 0) {
12400 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12401 "aborting.\n");
12402 err = -EIO;
12403 goto err_out_free_res;
12404 }
12405
Linus Torvalds1da177e2005-04-16 15:20:36 -070012406 tg3reg_base = pci_resource_start(pdev, 0);
12407 tg3reg_len = pci_resource_len(pdev, 0);
12408
12409 dev = alloc_etherdev(sizeof(*tp));
12410 if (!dev) {
12411 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12412 err = -ENOMEM;
12413 goto err_out_free_res;
12414 }
12415
Linus Torvalds1da177e2005-04-16 15:20:36 -070012416 SET_NETDEV_DEV(dev, &pdev->dev);
12417
Linus Torvalds1da177e2005-04-16 15:20:36 -070012418#if TG3_VLAN_TAG_USED
12419 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12420 dev->vlan_rx_register = tg3_vlan_rx_register;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012421#endif
12422
12423 tp = netdev_priv(dev);
12424 tp->pdev = pdev;
12425 tp->dev = dev;
12426 tp->pm_cap = pm_cap;
12427 tp->mac_mode = TG3_DEF_MAC_MODE;
12428 tp->rx_mode = TG3_DEF_RX_MODE;
12429 tp->tx_mode = TG3_DEF_TX_MODE;
12430 tp->mi_mode = MAC_MI_MODE_BASE;
12431 if (tg3_debug > 0)
12432 tp->msg_enable = tg3_debug;
12433 else
12434 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12435
12436 /* The word/byte swap controls here control register access byte
12437 * swapping. DMA data byte swapping is controlled in the GRC_MODE
12438 * setting below.
12439 */
12440 tp->misc_host_ctrl =
12441 MISC_HOST_CTRL_MASK_PCI_INT |
12442 MISC_HOST_CTRL_WORD_SWAP |
12443 MISC_HOST_CTRL_INDIR_ACCESS |
12444 MISC_HOST_CTRL_PCISTATE_RW;
12445
12446 /* The NONFRM (non-frame) byte/word swap controls take effect
12447 * on descriptor entries, anything which isn't packet data.
12448 *
12449 * The StrongARM chips on the board (one for tx, one for rx)
12450 * are running in big-endian mode.
12451 */
12452 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12453 GRC_MODE_WSWAP_NONFRM_DATA);
12454#ifdef __BIG_ENDIAN
12455 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12456#endif
12457 spin_lock_init(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012458 spin_lock_init(&tp->indirect_lock);
David Howellsc4028952006-11-22 14:57:56 +000012459 INIT_WORK(&tp->reset_task, tg3_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012460
12461 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
Andy Gospodarekab0049b2007-09-06 20:42:14 +010012462 if (!tp->regs) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012463 printk(KERN_ERR PFX "Cannot map device registers, "
12464 "aborting.\n");
12465 err = -ENOMEM;
12466 goto err_out_free_dev;
12467 }
12468
12469 tg3_init_link_config(tp);
12470
Linus Torvalds1da177e2005-04-16 15:20:36 -070012471 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12472 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12473 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12474
12475 dev->open = tg3_open;
12476 dev->stop = tg3_close;
12477 dev->get_stats = tg3_get_stats;
12478 dev->set_multicast_list = tg3_set_rx_mode;
12479 dev->set_mac_address = tg3_set_mac_addr;
12480 dev->do_ioctl = tg3_ioctl;
12481 dev->tx_timeout = tg3_tx_timeout;
Stephen Hemmingerbea33482007-10-03 16:41:36 -070012482 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012483 dev->ethtool_ops = &tg3_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012484 dev->watchdog_timeo = TG3_TX_TIMEOUT;
12485 dev->change_mtu = tg3_change_mtu;
12486 dev->irq = pdev->irq;
12487#ifdef CONFIG_NET_POLL_CONTROLLER
12488 dev->poll_controller = tg3_poll_controller;
12489#endif
12490
12491 err = tg3_get_invariants(tp);
12492 if (err) {
12493 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12494 "aborting.\n");
12495 goto err_out_iounmap;
12496 }
12497
Michael Chan4a29cc22006-03-19 13:21:12 -080012498 /* The EPB bridge inside 5714, 5715, and 5780 and any
12499 * device behind the EPB cannot support DMA addresses > 40-bit.
Michael Chan72f2afb2006-03-06 19:28:35 -080012500 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12501 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12502 * do DMA address check in tg3_start_xmit().
12503 */
Michael Chan4a29cc22006-03-19 13:21:12 -080012504 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12505 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12506 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
Michael Chan72f2afb2006-03-06 19:28:35 -080012507 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12508#ifdef CONFIG_HIGHMEM
12509 dma_mask = DMA_64BIT_MASK;
12510#endif
Michael Chan4a29cc22006-03-19 13:21:12 -080012511 } else
Michael Chan72f2afb2006-03-06 19:28:35 -080012512 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12513
12514 /* Configure DMA attributes. */
12515 if (dma_mask > DMA_32BIT_MASK) {
12516 err = pci_set_dma_mask(pdev, dma_mask);
12517 if (!err) {
12518 dev->features |= NETIF_F_HIGHDMA;
12519 err = pci_set_consistent_dma_mask(pdev,
12520 persist_dma_mask);
12521 if (err < 0) {
12522 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12523 "DMA for consistent allocations\n");
12524 goto err_out_iounmap;
12525 }
12526 }
12527 }
12528 if (err || dma_mask == DMA_32BIT_MASK) {
12529 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12530 if (err) {
12531 printk(KERN_ERR PFX "No usable DMA configuration, "
12532 "aborting.\n");
12533 goto err_out_iounmap;
12534 }
12535 }
12536
Michael Chanfdfec1722005-07-25 12:31:48 -070012537 tg3_init_bufmgr_config(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012538
Linus Torvalds1da177e2005-04-16 15:20:36 -070012539 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12540 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12541 }
12542 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12543 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12544 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
Michael Chanc7835a72006-11-15 21:14:42 -080012545 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -070012546 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12547 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12548 } else {
Michael Chan7f62ad52007-02-20 23:25:40 -080012549 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012550 }
12551
Michael Chan4e3a7aa2006-03-20 17:47:44 -080012552 /* TSO is on by default on chips that support hardware TSO.
12553 * Firmware TSO on older chips gives lower performance, so it
12554 * is off by default, but can be enabled using ethtool.
12555 */
Michael Chanb0026622006-07-03 19:42:14 -070012556 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012557 dev->features |= NETIF_F_TSO;
Michael Chanb5d37722006-09-27 16:06:21 -070012558 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12559 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
Michael Chanb0026622006-07-03 19:42:14 -070012560 dev->features |= NETIF_F_TSO6;
Matt Carlson9936bcf2007-10-10 18:03:07 -070012561 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12562 dev->features |= NETIF_F_TSO_ECN;
Michael Chanb0026622006-07-03 19:42:14 -070012563 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012564
Linus Torvalds1da177e2005-04-16 15:20:36 -070012565
12566 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12567 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12568 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12569 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12570 tp->rx_pending = 63;
12571 }
12572
Linus Torvalds1da177e2005-04-16 15:20:36 -070012573 err = tg3_get_device_address(tp);
12574 if (err) {
12575 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12576 "aborting.\n");
12577 goto err_out_iounmap;
12578 }
12579
Matt Carlson0d3031d2007-10-10 18:02:43 -070012580 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12581 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12582 printk(KERN_ERR PFX "Cannot find proper PCI device "
12583 "base address for APE, aborting.\n");
12584 err = -ENODEV;
12585 goto err_out_iounmap;
12586 }
12587
12588 tg3reg_base = pci_resource_start(pdev, 2);
12589 tg3reg_len = pci_resource_len(pdev, 2);
12590
12591 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
12592 if (tp->aperegs == 0UL) {
12593 printk(KERN_ERR PFX "Cannot map APE registers, "
12594 "aborting.\n");
12595 err = -ENOMEM;
12596 goto err_out_iounmap;
12597 }
12598
12599 tg3_ape_lock_init(tp);
12600 }
12601
Matt Carlsonc88864d2007-11-12 21:07:01 -080012602 /*
12603 * Reset chip in case UNDI or EFI driver did not shutdown
12604 * DMA self test will enable WDMAC and we'll see (spurious)
12605 * pending DMA on the PCI bus at that point.
12606 */
12607 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12608 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
12609 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
12610 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12611 }
12612
12613 err = tg3_test_dma(tp);
12614 if (err) {
12615 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12616 goto err_out_apeunmap;
12617 }
12618
12619 /* Tigon3 can do ipv4 only... and some chips have buggy
12620 * checksumming.
12621 */
12622 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
12623 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12624 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12625 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12626 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12627 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12628 dev->features |= NETIF_F_IPV6_CSUM;
12629
12630 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12631 } else
12632 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12633
12634 /* flow control autonegotiation is default behavior */
12635 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
Matt Carlson8d018622007-12-20 20:05:44 -080012636 tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
Matt Carlsonc88864d2007-11-12 21:07:01 -080012637
12638 tg3_init_coal(tp);
12639
Michael Chanc49a1562006-12-17 17:07:29 -080012640 pci_set_drvdata(pdev, dev);
12641
Linus Torvalds1da177e2005-04-16 15:20:36 -070012642 err = register_netdev(dev);
12643 if (err) {
12644 printk(KERN_ERR PFX "Cannot register net device, "
12645 "aborting.\n");
Matt Carlson0d3031d2007-10-10 18:02:43 -070012646 goto err_out_apeunmap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012647 }
12648
Joe Perchesd6645372007-12-20 04:06:59 -080012649 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
12650 "(%s) %s Ethernet %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070012651 dev->name,
12652 tp->board_part_number,
12653 tp->pci_chip_rev_id,
12654 tg3_phy_string(tp),
Michael Chanf9804dd2005-09-27 12:13:10 -070012655 tg3_bus_string(tp, str),
Michael Chancbb45d22006-12-07 00:24:09 -080012656 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12657 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
Joe Perchesd6645372007-12-20 04:06:59 -080012658 "10/100/1000Base-T")),
12659 print_mac(mac, dev->dev_addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -070012660
12661 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
Michael Chan1c46ae052007-03-24 20:54:37 -070012662 "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070012663 dev->name,
12664 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12665 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12666 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12667 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -070012668 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12669 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
Michael Chan4a29cc22006-03-19 13:21:12 -080012670 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12671 dev->name, tp->dma_rwctrl,
12672 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
12673 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
Linus Torvalds1da177e2005-04-16 15:20:36 -070012674
12675 return 0;
12676
Matt Carlson0d3031d2007-10-10 18:02:43 -070012677err_out_apeunmap:
12678 if (tp->aperegs) {
12679 iounmap(tp->aperegs);
12680 tp->aperegs = NULL;
12681 }
12682
Linus Torvalds1da177e2005-04-16 15:20:36 -070012683err_out_iounmap:
Michael Chan68929142005-08-09 20:17:14 -070012684 if (tp->regs) {
12685 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070012686 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070012687 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012688
12689err_out_free_dev:
12690 free_netdev(dev);
12691
12692err_out_free_res:
12693 pci_release_regions(pdev);
12694
12695err_out_disable_pdev:
12696 pci_disable_device(pdev);
12697 pci_set_drvdata(pdev, NULL);
12698 return err;
12699}
12700
12701static void __devexit tg3_remove_one(struct pci_dev *pdev)
12702{
12703 struct net_device *dev = pci_get_drvdata(pdev);
12704
12705 if (dev) {
12706 struct tg3 *tp = netdev_priv(dev);
12707
Michael Chan7faa0062006-02-02 17:29:28 -080012708 flush_scheduled_work();
Linus Torvalds1da177e2005-04-16 15:20:36 -070012709 unregister_netdev(dev);
Matt Carlson0d3031d2007-10-10 18:02:43 -070012710 if (tp->aperegs) {
12711 iounmap(tp->aperegs);
12712 tp->aperegs = NULL;
12713 }
Michael Chan68929142005-08-09 20:17:14 -070012714 if (tp->regs) {
12715 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070012716 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070012717 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012718 free_netdev(dev);
12719 pci_release_regions(pdev);
12720 pci_disable_device(pdev);
12721 pci_set_drvdata(pdev, NULL);
12722 }
12723}
12724
12725static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12726{
12727 struct net_device *dev = pci_get_drvdata(pdev);
12728 struct tg3 *tp = netdev_priv(dev);
12729 int err;
12730
Michael Chan3e0c95f2007-08-03 20:56:54 -070012731 /* PCI register 4 needs to be saved whether netif_running() or not.
12732 * MSI address and data need to be saved if using MSI and
12733 * netif_running().
12734 */
12735 pci_save_state(pdev);
12736
Linus Torvalds1da177e2005-04-16 15:20:36 -070012737 if (!netif_running(dev))
12738 return 0;
12739
Michael Chan7faa0062006-02-02 17:29:28 -080012740 flush_scheduled_work();
Linus Torvalds1da177e2005-04-16 15:20:36 -070012741 tg3_netif_stop(tp);
12742
12743 del_timer_sync(&tp->timer);
12744
David S. Millerf47c11e2005-06-24 20:18:35 -070012745 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012746 tg3_disable_ints(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -070012747 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012748
12749 netif_device_detach(dev);
12750
David S. Millerf47c11e2005-06-24 20:18:35 -070012751 tg3_full_lock(tp, 0);
Michael Chan944d9802005-05-29 14:57:48 -070012752 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan6a9eba12005-12-13 21:08:58 -080012753 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
David S. Millerf47c11e2005-06-24 20:18:35 -070012754 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012755
12756 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12757 if (err) {
David S. Millerf47c11e2005-06-24 20:18:35 -070012758 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012759
Michael Chan6a9eba12005-12-13 21:08:58 -080012760 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Michael Chanb9ec6c12006-07-25 16:37:27 -070012761 if (tg3_restart_hw(tp, 1))
12762 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012763
12764 tp->timer.expires = jiffies + tp->timer_offset;
12765 add_timer(&tp->timer);
12766
12767 netif_device_attach(dev);
12768 tg3_netif_start(tp);
12769
Michael Chanb9ec6c12006-07-25 16:37:27 -070012770out:
David S. Millerf47c11e2005-06-24 20:18:35 -070012771 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012772 }
12773
12774 return err;
12775}
12776
12777static int tg3_resume(struct pci_dev *pdev)
12778{
12779 struct net_device *dev = pci_get_drvdata(pdev);
12780 struct tg3 *tp = netdev_priv(dev);
12781 int err;
12782
Michael Chan3e0c95f2007-08-03 20:56:54 -070012783 pci_restore_state(tp->pdev);
12784
Linus Torvalds1da177e2005-04-16 15:20:36 -070012785 if (!netif_running(dev))
12786 return 0;
12787
Michael Chanbc1c7562006-03-20 17:48:03 -080012788 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012789 if (err)
12790 return err;
12791
12792 netif_device_attach(dev);
12793
David S. Millerf47c11e2005-06-24 20:18:35 -070012794 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012795
Michael Chan6a9eba12005-12-13 21:08:58 -080012796 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Michael Chanb9ec6c12006-07-25 16:37:27 -070012797 err = tg3_restart_hw(tp, 1);
12798 if (err)
12799 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012800
12801 tp->timer.expires = jiffies + tp->timer_offset;
12802 add_timer(&tp->timer);
12803
Linus Torvalds1da177e2005-04-16 15:20:36 -070012804 tg3_netif_start(tp);
12805
Michael Chanb9ec6c12006-07-25 16:37:27 -070012806out:
David S. Millerf47c11e2005-06-24 20:18:35 -070012807 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012808
Michael Chanb9ec6c12006-07-25 16:37:27 -070012809 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012810}
12811
12812static struct pci_driver tg3_driver = {
12813 .name = DRV_MODULE_NAME,
12814 .id_table = tg3_pci_tbl,
12815 .probe = tg3_init_one,
12816 .remove = __devexit_p(tg3_remove_one),
12817 .suspend = tg3_suspend,
12818 .resume = tg3_resume
12819};
12820
12821static int __init tg3_init(void)
12822{
Jeff Garzik29917622006-08-19 17:48:59 -040012823 return pci_register_driver(&tg3_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012824}
12825
12826static void __exit tg3_cleanup(void)
12827{
12828 pci_unregister_driver(&tg3_driver);
12829}
12830
12831module_init(tg3_init);
12832module_exit(tg3_cleanup);