blob: 7928acc1ec7a0bfcda61b1550fc73f0b86ef3a25 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
Michael Chan65610fb2007-02-13 12:18:46 -08007 * Copyright (C) 2005-2007 Broadcom Corporation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Firmware is:
Michael Chan49cabf42005-06-06 15:15:17 -070010 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/compiler.h>
24#include <linux/slab.h>
25#include <linux/delay.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020026#include <linux/in.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/workqueue.h>
Michael Chan61487482005-09-05 17:53:19 -070039#include <linux/prefetch.h>
Tobias Klauserf9a5f7d2005-10-29 15:09:26 +020040#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42#include <net/checksum.h>
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -030043#include <net/ip.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45#include <asm/system.h>
46#include <asm/io.h>
47#include <asm/byteorder.h>
48#include <asm/uaccess.h>
49
David S. Miller49b6e95f2007-03-29 01:38:42 -070050#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <asm/idprom.h>
David S. Miller49b6e95f2007-03-29 01:38:42 -070052#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#endif
54
55#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56#define TG3_VLAN_TAG_USED 1
57#else
58#define TG3_VLAN_TAG_USED 0
59#endif
60
Linus Torvalds1da177e2005-04-16 15:20:36 -070061#define TG3_TSO_SUPPORT 1
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
63#include "tg3.h"
64
65#define DRV_MODULE_NAME "tg3"
66#define PFX DRV_MODULE_NAME ": "
Matt Carlson920e37f2008-05-02 16:49:50 -070067#define DRV_MODULE_VERSION "3.92"
68#define DRV_MODULE_RELDATE "May 2, 2008"
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
70#define TG3_DEF_MAC_MODE 0
71#define TG3_DEF_RX_MODE 0
72#define TG3_DEF_TX_MODE 0
73#define TG3_DEF_MSG_ENABLE \
74 (NETIF_MSG_DRV | \
75 NETIF_MSG_PROBE | \
76 NETIF_MSG_LINK | \
77 NETIF_MSG_TIMER | \
78 NETIF_MSG_IFDOWN | \
79 NETIF_MSG_IFUP | \
80 NETIF_MSG_RX_ERR | \
81 NETIF_MSG_TX_ERR)
82
83/* length of time before we decide the hardware is borked,
84 * and dev->tx_timeout() should be called to fix the problem
85 */
86#define TG3_TX_TIMEOUT (5 * HZ)
87
88/* hardware minimum and maximum for a single frame's data payload */
89#define TG3_MIN_MTU 60
90#define TG3_MAX_MTU(tp) \
Michael Chan0f893dc2005-07-25 12:30:38 -070091 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
93/* These numbers seem to be hard coded in the NIC firmware somehow.
94 * You can't change the ring sizes, but you can change where you place
95 * them in the NIC onboard memory.
96 */
97#define TG3_RX_RING_SIZE 512
98#define TG3_DEF_RX_RING_PENDING 200
99#define TG3_RX_JUMBO_RING_SIZE 256
100#define TG3_DEF_RX_JUMBO_RING_PENDING 100
101
102/* Do not place this n-ring entries value into the tp struct itself,
103 * we really want to expose these constants to GCC so that modulo et
104 * al. operations are done with shifts and masks instead of with
105 * hw multiply/modulo instructions. Another solution would be to
106 * replace things like '% foo' with '& (foo - 1)'.
107 */
108#define TG3_RX_RCB_RING_SIZE(tp) \
109 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
110
111#define TG3_TX_RING_SIZE 512
112#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
113
114#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
115 TG3_RX_RING_SIZE)
116#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117 TG3_RX_JUMBO_RING_SIZE)
118#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_RCB_RING_SIZE(tp))
120#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
121 TG3_TX_RING_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
125#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
126
127/* minimum number of free TX descriptors required to wake up TX process */
Ranjit Manomohan42952232006-10-18 20:54:26 -0700128#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
130/* number of ETHTOOL_GSTATS u64's */
131#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
Michael Chan4cafd3f2005-05-29 14:56:34 -0700133#define TG3_NUM_TEST 6
134
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135static char version[] __devinitdata =
136 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140MODULE_LICENSE("GPL");
141MODULE_VERSION(DRV_MODULE_VERSION);
142
143static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
144module_param(tg3_debug, int, 0);
145MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147static struct pci_device_id tg3_pci_tbl[] = {
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700148 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
Michael Chan126a3362006-09-27 16:03:07 -0700172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
Michael Chan126a3362006-09-27 16:03:07 -0700187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
Michael Chan676917d2006-12-07 00:20:22 -0800191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
Michael Chanb5d37722006-09-27 16:06:21 -0700199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
Matt Carlsond30cdd22007-10-07 23:28:35 -0700201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
Matt Carlson6c7af272007-10-21 16:12:02 -0700203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
Matt Carlson9936bcf2007-10-10 18:03:07 -0700204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
Henrik Kretzschmar13185212006-08-22 00:28:33 -0700206 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
213 {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214};
215
216MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
217
Andreas Mohr50da8592006-08-14 23:54:30 -0700218static const struct {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 const char string[ETH_GSTRING_LEN];
220} ethtool_stats_keys[TG3_NUM_STATS] = {
221 { "rx_octets" },
222 { "rx_fragments" },
223 { "rx_ucast_packets" },
224 { "rx_mcast_packets" },
225 { "rx_bcast_packets" },
226 { "rx_fcs_errors" },
227 { "rx_align_errors" },
228 { "rx_xon_pause_rcvd" },
229 { "rx_xoff_pause_rcvd" },
230 { "rx_mac_ctrl_rcvd" },
231 { "rx_xoff_entered" },
232 { "rx_frame_too_long_errors" },
233 { "rx_jabbers" },
234 { "rx_undersize_packets" },
235 { "rx_in_length_errors" },
236 { "rx_out_length_errors" },
237 { "rx_64_or_less_octet_packets" },
238 { "rx_65_to_127_octet_packets" },
239 { "rx_128_to_255_octet_packets" },
240 { "rx_256_to_511_octet_packets" },
241 { "rx_512_to_1023_octet_packets" },
242 { "rx_1024_to_1522_octet_packets" },
243 { "rx_1523_to_2047_octet_packets" },
244 { "rx_2048_to_4095_octet_packets" },
245 { "rx_4096_to_8191_octet_packets" },
246 { "rx_8192_to_9022_octet_packets" },
247
248 { "tx_octets" },
249 { "tx_collisions" },
250
251 { "tx_xon_sent" },
252 { "tx_xoff_sent" },
253 { "tx_flow_control" },
254 { "tx_mac_errors" },
255 { "tx_single_collisions" },
256 { "tx_mult_collisions" },
257 { "tx_deferred" },
258 { "tx_excessive_collisions" },
259 { "tx_late_collisions" },
260 { "tx_collide_2times" },
261 { "tx_collide_3times" },
262 { "tx_collide_4times" },
263 { "tx_collide_5times" },
264 { "tx_collide_6times" },
265 { "tx_collide_7times" },
266 { "tx_collide_8times" },
267 { "tx_collide_9times" },
268 { "tx_collide_10times" },
269 { "tx_collide_11times" },
270 { "tx_collide_12times" },
271 { "tx_collide_13times" },
272 { "tx_collide_14times" },
273 { "tx_collide_15times" },
274 { "tx_ucast_packets" },
275 { "tx_mcast_packets" },
276 { "tx_bcast_packets" },
277 { "tx_carrier_sense_errors" },
278 { "tx_discards" },
279 { "tx_errors" },
280
281 { "dma_writeq_full" },
282 { "dma_write_prioq_full" },
283 { "rxbds_empty" },
284 { "rx_discards" },
285 { "rx_errors" },
286 { "rx_threshold_hit" },
287
288 { "dma_readq_full" },
289 { "dma_read_prioq_full" },
290 { "tx_comp_queue_full" },
291
292 { "ring_set_send_prod_index" },
293 { "ring_status_update" },
294 { "nic_irqs" },
295 { "nic_avoided_irqs" },
296 { "nic_tx_threshold_hit" }
297};
298
Andreas Mohr50da8592006-08-14 23:54:30 -0700299static const struct {
Michael Chan4cafd3f2005-05-29 14:56:34 -0700300 const char string[ETH_GSTRING_LEN];
301} ethtool_test_keys[TG3_NUM_TEST] = {
302 { "nvram test (online) " },
303 { "link test (online) " },
304 { "register test (offline)" },
305 { "memory test (offline)" },
306 { "loopback test (offline)" },
307 { "interrupt test (offline)" },
308};
309
Michael Chanb401e9e2005-12-19 16:27:04 -0800310static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
311{
312 writel(val, tp->regs + off);
313}
314
315static u32 tg3_read32(struct tg3 *tp, u32 off)
316{
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400317 return (readl(tp->regs + off));
Michael Chanb401e9e2005-12-19 16:27:04 -0800318}
319
Matt Carlson0d3031d2007-10-10 18:02:43 -0700320static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
321{
322 writel(val, tp->aperegs + off);
323}
324
325static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
326{
327 return (readl(tp->aperegs + off));
328}
329
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
331{
Michael Chan68929142005-08-09 20:17:14 -0700332 unsigned long flags;
333
334 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700335 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
Michael Chan68929142005-08-09 20:17:14 -0700337 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Michael Chan1ee582d2005-08-09 20:16:46 -0700338}
339
340static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
341{
342 writel(val, tp->regs + off);
343 readl(tp->regs + off);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344}
345
Michael Chan68929142005-08-09 20:17:14 -0700346static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
347{
348 unsigned long flags;
349 u32 val;
350
351 spin_lock_irqsave(&tp->indirect_lock, flags);
352 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354 spin_unlock_irqrestore(&tp->indirect_lock, flags);
355 return val;
356}
357
358static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
359{
360 unsigned long flags;
361
362 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364 TG3_64BIT_REG_LOW, val);
365 return;
366 }
367 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369 TG3_64BIT_REG_LOW, val);
370 return;
371 }
372
373 spin_lock_irqsave(&tp->indirect_lock, flags);
374 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376 spin_unlock_irqrestore(&tp->indirect_lock, flags);
377
378 /* In indirect mode when disabling interrupts, we also need
379 * to clear the interrupt bit in the GRC local ctrl register.
380 */
381 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
382 (val == 0x1)) {
383 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
385 }
386}
387
388static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
389{
390 unsigned long flags;
391 u32 val;
392
393 spin_lock_irqsave(&tp->indirect_lock, flags);
394 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396 spin_unlock_irqrestore(&tp->indirect_lock, flags);
397 return val;
398}
399
Michael Chanb401e9e2005-12-19 16:27:04 -0800400/* usec_wait specifies the wait time in usec when writing to certain registers
401 * where it is unsafe to read back the register without some delay.
402 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
404 */
405static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406{
Michael Chanb401e9e2005-12-19 16:27:04 -0800407 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409 /* Non-posted methods */
410 tp->write32(tp, off, val);
411 else {
412 /* Posted method */
413 tg3_write32(tp, off, val);
414 if (usec_wait)
415 udelay(usec_wait);
416 tp->read32(tp, off);
417 }
418 /* Wait again after the read for the posted method to guarantee that
419 * the wait time is met.
420 */
421 if (usec_wait)
422 udelay(usec_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423}
424
Michael Chan09ee9292005-08-09 20:17:00 -0700425static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
426{
427 tp->write32_mbox(tp, off, val);
Michael Chan68929142005-08-09 20:17:14 -0700428 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430 tp->read32_mbox(tp, off);
Michael Chan09ee9292005-08-09 20:17:00 -0700431}
432
Michael Chan20094932005-08-09 20:16:32 -0700433static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434{
435 void __iomem *mbox = tp->regs + off;
436 writel(val, mbox);
437 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
438 writel(val, mbox);
439 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
440 readl(mbox);
441}
442
Michael Chanb5d37722006-09-27 16:06:21 -0700443static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
444{
445 return (readl(tp->regs + off + GRCMBOX_BASE));
446}
447
448static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
449{
450 writel(val, tp->regs + off + GRCMBOX_BASE);
451}
452
Michael Chan20094932005-08-09 20:16:32 -0700453#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700454#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
Michael Chan20094932005-08-09 20:16:32 -0700455#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
456#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
Michael Chan09ee9292005-08-09 20:17:00 -0700457#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
Michael Chan20094932005-08-09 20:16:32 -0700458
459#define tw32(reg,val) tp->write32(tp, reg, val)
Michael Chanb401e9e2005-12-19 16:27:04 -0800460#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
461#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
Michael Chan20094932005-08-09 20:16:32 -0700462#define tr32(reg) tp->read32(tp, reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463
464static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
465{
Michael Chan68929142005-08-09 20:17:14 -0700466 unsigned long flags;
467
Michael Chanb5d37722006-09-27 16:06:21 -0700468 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
470 return;
471
Michael Chan68929142005-08-09 20:17:14 -0700472 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700473 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
Michael Chanbbadf502006-04-06 21:46:34 -0700477 /* Always leave this as zero. */
478 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
479 } else {
480 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481 tw32_f(TG3PCI_MEM_WIN_DATA, val);
482
483 /* Always leave this as zero. */
484 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
485 }
Michael Chan68929142005-08-09 20:17:14 -0700486 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487}
488
489static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
490{
Michael Chan68929142005-08-09 20:17:14 -0700491 unsigned long flags;
492
Michael Chanb5d37722006-09-27 16:06:21 -0700493 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
495 *val = 0;
496 return;
497 }
498
Michael Chan68929142005-08-09 20:17:14 -0700499 spin_lock_irqsave(&tp->indirect_lock, flags);
Michael Chanbbadf502006-04-06 21:46:34 -0700500 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503
Michael Chanbbadf502006-04-06 21:46:34 -0700504 /* Always leave this as zero. */
505 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506 } else {
507 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508 *val = tr32(TG3PCI_MEM_WIN_DATA);
509
510 /* Always leave this as zero. */
511 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512 }
Michael Chan68929142005-08-09 20:17:14 -0700513 spin_unlock_irqrestore(&tp->indirect_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514}
515
Matt Carlson0d3031d2007-10-10 18:02:43 -0700516static void tg3_ape_lock_init(struct tg3 *tp)
517{
518 int i;
519
520 /* Make sure the driver hasn't any stale locks. */
521 for (i = 0; i < 8; i++)
522 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523 APE_LOCK_GRANT_DRIVER);
524}
525
526static int tg3_ape_lock(struct tg3 *tp, int locknum)
527{
528 int i, off;
529 int ret = 0;
530 u32 status;
531
532 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
533 return 0;
534
535 switch (locknum) {
536 case TG3_APE_LOCK_MEM:
537 break;
538 default:
539 return -EINVAL;
540 }
541
542 off = 4 * locknum;
543
544 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
545
546 /* Wait for up to 1 millisecond to acquire lock. */
547 for (i = 0; i < 100; i++) {
548 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549 if (status == APE_LOCK_GRANT_DRIVER)
550 break;
551 udelay(10);
552 }
553
554 if (status != APE_LOCK_GRANT_DRIVER) {
555 /* Revoke the lock request. */
556 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557 APE_LOCK_GRANT_DRIVER);
558
559 ret = -EBUSY;
560 }
561
562 return ret;
563}
564
565static void tg3_ape_unlock(struct tg3 *tp, int locknum)
566{
567 int off;
568
569 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
570 return;
571
572 switch (locknum) {
573 case TG3_APE_LOCK_MEM:
574 break;
575 default:
576 return;
577 }
578
579 off = 4 * locknum;
580 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
581}
582
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583static void tg3_disable_ints(struct tg3 *tp)
584{
585 tw32(TG3PCI_MISC_HOST_CTRL,
586 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700587 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588}
589
590static inline void tg3_cond_int(struct tg3 *tp)
591{
Michael Chan38f38432005-09-05 17:53:32 -0700592 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593 (tp->hw_status->status & SD_STATUS_UPDATED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
Michael Chanb5d37722006-09-27 16:06:21 -0700595 else
596 tw32(HOSTCC_MODE, tp->coalesce_mode |
597 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598}
599
600static void tg3_enable_ints(struct tg3 *tp)
601{
Michael Chanbbe832c2005-06-24 20:20:04 -0700602 tp->irq_sync = 0;
603 wmb();
604
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 tw32(TG3PCI_MISC_HOST_CTRL,
606 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
Michael Chan09ee9292005-08-09 20:17:00 -0700607 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608 (tp->last_tag << 24));
Michael Chanfcfa0a32006-03-20 22:28:41 -0800609 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611 (tp->last_tag << 24));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 tg3_cond_int(tp);
613}
614
Michael Chan04237dd2005-04-25 15:17:17 -0700615static inline unsigned int tg3_has_work(struct tg3 *tp)
616{
617 struct tg3_hw_status *sblk = tp->hw_status;
618 unsigned int work_exists = 0;
619
620 /* check for phy events */
621 if (!(tp->tg3_flags &
622 (TG3_FLAG_USE_LINKCHG_REG |
623 TG3_FLAG_POLL_SERDES))) {
624 if (sblk->status & SD_STATUS_LINK_CHG)
625 work_exists = 1;
626 }
627 /* check for RX/TX work to do */
628 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
630 work_exists = 1;
631
632 return work_exists;
633}
634
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635/* tg3_restart_ints
Michael Chan04237dd2005-04-25 15:17:17 -0700636 * similar to tg3_enable_ints, but it accurately determines whether there
637 * is new work pending and can return without flushing the PIO write
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400638 * which reenables interrupts
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 */
640static void tg3_restart_ints(struct tg3 *tp)
641{
David S. Millerfac9b832005-05-18 22:46:34 -0700642 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
643 tp->last_tag << 24);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 mmiowb();
645
David S. Millerfac9b832005-05-18 22:46:34 -0700646 /* When doing tagged status, this work check is unnecessary.
647 * The last_tag we write above tells the chip which piece of
648 * work we've completed.
649 */
650 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
651 tg3_has_work(tp))
Michael Chan04237dd2005-04-25 15:17:17 -0700652 tw32(HOSTCC_MODE, tp->coalesce_mode |
653 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654}
655
656static inline void tg3_netif_stop(struct tg3 *tp)
657{
Michael Chanbbe832c2005-06-24 20:20:04 -0700658 tp->dev->trans_start = jiffies; /* prevent tx timeout */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700659 napi_disable(&tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 netif_tx_disable(tp->dev);
661}
662
663static inline void tg3_netif_start(struct tg3 *tp)
664{
665 netif_wake_queue(tp->dev);
666 /* NOTE: unconditional netif_wake_queue is only appropriate
667 * so long as all callers are assured to have free tx slots
668 * (such as after tg3_init_hw)
669 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700670 napi_enable(&tp->napi);
David S. Millerf47c11e2005-06-24 20:18:35 -0700671 tp->hw_status->status |= SD_STATUS_UPDATED;
672 tg3_enable_ints(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673}
674
675static void tg3_switch_clocks(struct tg3 *tp)
676{
677 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
678 u32 orig_clock_ctrl;
679
Matt Carlson795d01c2007-10-07 23:28:17 -0700680 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan4cf78e42005-07-25 12:29:19 -0700682 return;
683
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 orig_clock_ctrl = clock_ctrl;
685 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686 CLOCK_CTRL_CLKRUN_OENABLE |
687 0x1f);
688 tp->pci_clock_ctrl = clock_ctrl;
689
690 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800692 tw32_wait_f(TG3PCI_CLOCK_CTRL,
693 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 }
695 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
Michael Chanb401e9e2005-12-19 16:27:04 -0800696 tw32_wait_f(TG3PCI_CLOCK_CTRL,
697 clock_ctrl |
698 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
699 40);
700 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701 clock_ctrl | (CLOCK_CTRL_ALTCLK),
702 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 }
Michael Chanb401e9e2005-12-19 16:27:04 -0800704 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705}
706
707#define PHY_BUSY_LOOPS 5000
708
709static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
710{
711 u32 frame_val;
712 unsigned int loops;
713 int ret;
714
715 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716 tw32_f(MAC_MI_MODE,
717 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718 udelay(80);
719 }
720
721 *val = 0x0;
722
723 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724 MI_COM_PHY_ADDR_MASK);
725 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726 MI_COM_REG_ADDR_MASK);
727 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400728
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 tw32_f(MAC_MI_COM, frame_val);
730
731 loops = PHY_BUSY_LOOPS;
732 while (loops != 0) {
733 udelay(10);
734 frame_val = tr32(MAC_MI_COM);
735
736 if ((frame_val & MI_COM_BUSY) == 0) {
737 udelay(5);
738 frame_val = tr32(MAC_MI_COM);
739 break;
740 }
741 loops -= 1;
742 }
743
744 ret = -EBUSY;
745 if (loops != 0) {
746 *val = frame_val & MI_COM_DATA_MASK;
747 ret = 0;
748 }
749
750 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751 tw32_f(MAC_MI_MODE, tp->mi_mode);
752 udelay(80);
753 }
754
755 return ret;
756}
757
758static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
759{
760 u32 frame_val;
761 unsigned int loops;
762 int ret;
763
Michael Chanb5d37722006-09-27 16:06:21 -0700764 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
766 return 0;
767
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
769 tw32_f(MAC_MI_MODE,
770 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
771 udelay(80);
772 }
773
774 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775 MI_COM_PHY_ADDR_MASK);
776 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777 MI_COM_REG_ADDR_MASK);
778 frame_val |= (val & MI_COM_DATA_MASK);
779 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400780
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 tw32_f(MAC_MI_COM, frame_val);
782
783 loops = PHY_BUSY_LOOPS;
784 while (loops != 0) {
785 udelay(10);
786 frame_val = tr32(MAC_MI_COM);
787 if ((frame_val & MI_COM_BUSY) == 0) {
788 udelay(5);
789 frame_val = tr32(MAC_MI_COM);
790 break;
791 }
792 loops -= 1;
793 }
794
795 ret = -EBUSY;
796 if (loops != 0)
797 ret = 0;
798
799 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800 tw32_f(MAC_MI_MODE, tp->mi_mode);
801 udelay(80);
802 }
803
804 return ret;
805}
806
Matt Carlson95e28692008-05-25 23:44:14 -0700807static int tg3_bmcr_reset(struct tg3 *tp)
808{
809 u32 phy_control;
810 int limit, err;
811
812 /* OK, reset it, and poll the BMCR_RESET bit until it
813 * clears or we time out.
814 */
815 phy_control = BMCR_RESET;
816 err = tg3_writephy(tp, MII_BMCR, phy_control);
817 if (err != 0)
818 return -EBUSY;
819
820 limit = 5000;
821 while (limit--) {
822 err = tg3_readphy(tp, MII_BMCR, &phy_control);
823 if (err != 0)
824 return -EBUSY;
825
826 if ((phy_control & BMCR_RESET) == 0) {
827 udelay(40);
828 break;
829 }
830 udelay(10);
831 }
832 if (limit <= 0)
833 return -EBUSY;
834
835 return 0;
836}
837
838/* tp->lock is held. */
839static void tg3_wait_for_event_ack(struct tg3 *tp)
840{
841 int i;
842
843 /* Wait for up to 2.5 milliseconds */
844 for (i = 0; i < 250000; i++) {
845 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
846 break;
847 udelay(10);
848 }
849}
850
851/* tp->lock is held. */
852static void tg3_ump_link_report(struct tg3 *tp)
853{
854 u32 reg;
855 u32 val;
856
857 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
858 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
859 return;
860
861 tg3_wait_for_event_ack(tp);
862
863 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
864
865 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
866
867 val = 0;
868 if (!tg3_readphy(tp, MII_BMCR, &reg))
869 val = reg << 16;
870 if (!tg3_readphy(tp, MII_BMSR, &reg))
871 val |= (reg & 0xffff);
872 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
873
874 val = 0;
875 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
876 val = reg << 16;
877 if (!tg3_readphy(tp, MII_LPA, &reg))
878 val |= (reg & 0xffff);
879 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
880
881 val = 0;
882 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
883 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
884 val = reg << 16;
885 if (!tg3_readphy(tp, MII_STAT1000, &reg))
886 val |= (reg & 0xffff);
887 }
888 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
889
890 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
891 val = reg << 16;
892 else
893 val = 0;
894 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
895
896 val = tr32(GRC_RX_CPU_EVENT);
897 val |= GRC_RX_CPU_DRIVER_EVENT;
898 tw32_f(GRC_RX_CPU_EVENT, val);
899}
900
901static void tg3_link_report(struct tg3 *tp)
902{
903 if (!netif_carrier_ok(tp->dev)) {
904 if (netif_msg_link(tp))
905 printk(KERN_INFO PFX "%s: Link is down.\n",
906 tp->dev->name);
907 tg3_ump_link_report(tp);
908 } else if (netif_msg_link(tp)) {
909 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
910 tp->dev->name,
911 (tp->link_config.active_speed == SPEED_1000 ?
912 1000 :
913 (tp->link_config.active_speed == SPEED_100 ?
914 100 : 10)),
915 (tp->link_config.active_duplex == DUPLEX_FULL ?
916 "full" : "half"));
917
918 printk(KERN_INFO PFX
919 "%s: Flow control is %s for TX and %s for RX.\n",
920 tp->dev->name,
921 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
922 "on" : "off",
923 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
924 "on" : "off");
925 tg3_ump_link_report(tp);
926 }
927}
928
929static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
930{
931 u16 miireg;
932
933 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
934 miireg = ADVERTISE_PAUSE_CAP;
935 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
936 miireg = ADVERTISE_PAUSE_ASYM;
937 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
938 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
939 else
940 miireg = 0;
941
942 return miireg;
943}
944
945static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
946{
947 u16 miireg;
948
949 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
950 miireg = ADVERTISE_1000XPAUSE;
951 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
952 miireg = ADVERTISE_1000XPSE_ASYM;
953 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
954 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
955 else
956 miireg = 0;
957
958 return miireg;
959}
960
961static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
962{
963 u8 cap = 0;
964
965 if (lcladv & ADVERTISE_PAUSE_CAP) {
966 if (lcladv & ADVERTISE_PAUSE_ASYM) {
967 if (rmtadv & LPA_PAUSE_CAP)
968 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
969 else if (rmtadv & LPA_PAUSE_ASYM)
970 cap = TG3_FLOW_CTRL_RX;
971 } else {
972 if (rmtadv & LPA_PAUSE_CAP)
973 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
974 }
975 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
976 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
977 cap = TG3_FLOW_CTRL_TX;
978 }
979
980 return cap;
981}
982
983static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
984{
985 u8 cap = 0;
986
987 if (lcladv & ADVERTISE_1000XPAUSE) {
988 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
989 if (rmtadv & LPA_1000XPAUSE)
990 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
991 else if (rmtadv & LPA_1000XPAUSE_ASYM)
992 cap = TG3_FLOW_CTRL_RX;
993 } else {
994 if (rmtadv & LPA_1000XPAUSE)
995 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
996 }
997 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
998 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
999 cap = TG3_FLOW_CTRL_TX;
1000 }
1001
1002 return cap;
1003}
1004
1005static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1006{
1007 u8 new_tg3_flags = 0;
1008 u32 old_rx_mode = tp->rx_mode;
1009 u32 old_tx_mode = tp->tx_mode;
1010
1011 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1012 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1013 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1014 new_tg3_flags = tg3_resolve_flowctrl_1000X(local_adv,
1015 remote_adv);
1016 else
1017 new_tg3_flags = tg3_resolve_flowctrl_1000T(local_adv,
1018 remote_adv);
1019 } else {
1020 new_tg3_flags = tp->link_config.flowctrl;
1021 }
1022
1023 tp->link_config.active_flowctrl = new_tg3_flags;
1024
1025 if (new_tg3_flags & TG3_FLOW_CTRL_RX)
1026 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1027 else
1028 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1029
1030 if (old_rx_mode != tp->rx_mode) {
1031 tw32_f(MAC_RX_MODE, tp->rx_mode);
1032 }
1033
1034 if (new_tg3_flags & TG3_FLOW_CTRL_TX)
1035 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1036 else
1037 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1038
1039 if (old_tx_mode != tp->tx_mode) {
1040 tw32_f(MAC_TX_MODE, tp->tx_mode);
1041 }
1042}
1043
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001044static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1045{
1046 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1047 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1048}
1049
Matt Carlson9ef8ca92007-07-11 19:48:29 -07001050static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1051{
1052 u32 phy;
1053
1054 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1055 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1056 return;
1057
1058 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1059 u32 ephy;
1060
1061 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1062 tg3_writephy(tp, MII_TG3_EPHY_TEST,
1063 ephy | MII_TG3_EPHY_SHADOW_EN);
1064 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1065 if (enable)
1066 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1067 else
1068 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1069 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1070 }
1071 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1072 }
1073 } else {
1074 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1075 MII_TG3_AUXCTL_SHDWSEL_MISC;
1076 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1077 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1078 if (enable)
1079 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1080 else
1081 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1082 phy |= MII_TG3_AUXCTL_MISC_WREN;
1083 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1084 }
1085 }
1086}
1087
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088static void tg3_phy_set_wirespeed(struct tg3 *tp)
1089{
1090 u32 val;
1091
1092 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1093 return;
1094
1095 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1096 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1097 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1098 (val | (1 << 15) | (1 << 4)));
1099}
1100
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001101static void tg3_phy_apply_otp(struct tg3 *tp)
1102{
1103 u32 otp, phy;
1104
1105 if (!tp->phy_otp)
1106 return;
1107
1108 otp = tp->phy_otp;
1109
1110 /* Enable SM_DSP clock and tx 6dB coding. */
1111 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1112 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1113 MII_TG3_AUXCTL_ACTL_TX_6DB;
1114 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1115
1116 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1117 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1118 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1119
1120 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1121 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1122 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1123
1124 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1125 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1126 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1127
1128 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1129 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1130
1131 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1132 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1133
1134 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1135 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1136 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1137
1138 /* Turn off SM_DSP clock. */
1139 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1140 MII_TG3_AUXCTL_ACTL_TX_6DB;
1141 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1142}
1143
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144static int tg3_wait_macro_done(struct tg3 *tp)
1145{
1146 int limit = 100;
1147
1148 while (limit--) {
1149 u32 tmp32;
1150
1151 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1152 if ((tmp32 & 0x1000) == 0)
1153 break;
1154 }
1155 }
1156 if (limit <= 0)
1157 return -EBUSY;
1158
1159 return 0;
1160}
1161
1162static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1163{
1164 static const u32 test_pat[4][6] = {
1165 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1166 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1167 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1168 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1169 };
1170 int chan;
1171
1172 for (chan = 0; chan < 4; chan++) {
1173 int i;
1174
1175 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1176 (chan * 0x2000) | 0x0200);
1177 tg3_writephy(tp, 0x16, 0x0002);
1178
1179 for (i = 0; i < 6; i++)
1180 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1181 test_pat[chan][i]);
1182
1183 tg3_writephy(tp, 0x16, 0x0202);
1184 if (tg3_wait_macro_done(tp)) {
1185 *resetp = 1;
1186 return -EBUSY;
1187 }
1188
1189 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1190 (chan * 0x2000) | 0x0200);
1191 tg3_writephy(tp, 0x16, 0x0082);
1192 if (tg3_wait_macro_done(tp)) {
1193 *resetp = 1;
1194 return -EBUSY;
1195 }
1196
1197 tg3_writephy(tp, 0x16, 0x0802);
1198 if (tg3_wait_macro_done(tp)) {
1199 *resetp = 1;
1200 return -EBUSY;
1201 }
1202
1203 for (i = 0; i < 6; i += 2) {
1204 u32 low, high;
1205
1206 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1207 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1208 tg3_wait_macro_done(tp)) {
1209 *resetp = 1;
1210 return -EBUSY;
1211 }
1212 low &= 0x7fff;
1213 high &= 0x000f;
1214 if (low != test_pat[chan][i] ||
1215 high != test_pat[chan][i+1]) {
1216 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1217 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1218 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1219
1220 return -EBUSY;
1221 }
1222 }
1223 }
1224
1225 return 0;
1226}
1227
1228static int tg3_phy_reset_chanpat(struct tg3 *tp)
1229{
1230 int chan;
1231
1232 for (chan = 0; chan < 4; chan++) {
1233 int i;
1234
1235 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1236 (chan * 0x2000) | 0x0200);
1237 tg3_writephy(tp, 0x16, 0x0002);
1238 for (i = 0; i < 6; i++)
1239 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1240 tg3_writephy(tp, 0x16, 0x0202);
1241 if (tg3_wait_macro_done(tp))
1242 return -EBUSY;
1243 }
1244
1245 return 0;
1246}
1247
1248static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1249{
1250 u32 reg32, phy9_orig;
1251 int retries, do_phy_reset, err;
1252
1253 retries = 10;
1254 do_phy_reset = 1;
1255 do {
1256 if (do_phy_reset) {
1257 err = tg3_bmcr_reset(tp);
1258 if (err)
1259 return err;
1260 do_phy_reset = 0;
1261 }
1262
1263 /* Disable transmitter and interrupt. */
1264 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1265 continue;
1266
1267 reg32 |= 0x3000;
1268 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1269
1270 /* Set full-duplex, 1000 mbps. */
1271 tg3_writephy(tp, MII_BMCR,
1272 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1273
1274 /* Set to master mode. */
1275 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1276 continue;
1277
1278 tg3_writephy(tp, MII_TG3_CTRL,
1279 (MII_TG3_CTRL_AS_MASTER |
1280 MII_TG3_CTRL_ENABLE_AS_MASTER));
1281
1282 /* Enable SM_DSP_CLOCK and 6dB. */
1283 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1284
1285 /* Block the PHY control access. */
1286 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1287 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1288
1289 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1290 if (!err)
1291 break;
1292 } while (--retries);
1293
1294 err = tg3_phy_reset_chanpat(tp);
1295 if (err)
1296 return err;
1297
1298 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1299 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1300
1301 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1302 tg3_writephy(tp, 0x16, 0x0000);
1303
1304 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1305 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1306 /* Set Extended packet length bit for jumbo frames */
1307 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1308 }
1309 else {
1310 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1311 }
1312
1313 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1314
1315 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1316 reg32 &= ~0x3000;
1317 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1318 } else if (!err)
1319 err = -EBUSY;
1320
1321 return err;
1322}
1323
1324/* This will reset the tigon3 PHY if there is no valid
1325 * link unless the FORCE argument is non-zero.
1326 */
1327static int tg3_phy_reset(struct tg3 *tp)
1328{
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001329 u32 cpmuctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 u32 phy_status;
1331 int err;
1332
Michael Chan60189dd2006-12-17 17:08:07 -08001333 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1334 u32 val;
1335
1336 val = tr32(GRC_MISC_CFG);
1337 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1338 udelay(40);
1339 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1341 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1342 if (err != 0)
1343 return -EBUSY;
1344
Michael Chanc8e1e822006-04-29 18:55:17 -07001345 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1346 netif_carrier_off(tp->dev);
1347 tg3_link_report(tp);
1348 }
1349
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1351 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1352 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1353 err = tg3_phy_reset_5703_4_5(tp);
1354 if (err)
1355 return err;
1356 goto out;
1357 }
1358
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001359 cpmuctrl = 0;
1360 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1361 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1362 cpmuctrl = tr32(TG3_CPMU_CTRL);
1363 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1364 tw32(TG3_CPMU_CTRL,
1365 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1366 }
1367
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368 err = tg3_bmcr_reset(tp);
1369 if (err)
1370 return err;
1371
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001372 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1373 u32 phy;
1374
1375 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1376 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1377
1378 tw32(TG3_CPMU_CTRL, cpmuctrl);
1379 }
1380
Matt Carlsonb5af7122007-11-12 21:22:02 -08001381 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
Matt Carlsonce057f02007-11-12 21:08:03 -08001382 u32 val;
1383
1384 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1385 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1386 CPMU_LSPD_1000MB_MACCLK_12_5) {
1387 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1388 udelay(40);
1389 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1390 }
Matt Carlson662f38d2007-11-12 21:16:17 -08001391
1392 /* Disable GPHY autopowerdown. */
1393 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1394 MII_TG3_MISC_SHDW_WREN |
1395 MII_TG3_MISC_SHDW_APD_SEL |
1396 MII_TG3_MISC_SHDW_APD_WKTM_84MS);
Matt Carlsonce057f02007-11-12 21:08:03 -08001397 }
1398
Matt Carlsonb2a5c192008-04-03 21:44:44 -07001399 tg3_phy_apply_otp(tp);
1400
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401out:
1402 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1403 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1404 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1405 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1406 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1407 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1408 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1409 }
1410 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1411 tg3_writephy(tp, 0x1c, 0x8d68);
1412 tg3_writephy(tp, 0x1c, 0x8d68);
1413 }
1414 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1415 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1416 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1417 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1418 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1419 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1420 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1421 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1422 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1423 }
Michael Chanc424cb22006-04-29 18:56:34 -07001424 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1425 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1426 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
Michael Chanc1d2a192007-01-08 19:57:20 -08001427 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1428 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1429 tg3_writephy(tp, MII_TG3_TEST1,
1430 MII_TG3_TEST1_TRIM_EN | 0x4);
1431 } else
1432 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
Michael Chanc424cb22006-04-29 18:56:34 -07001433 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1434 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 /* Set Extended packet length bit (bit 14) on all chips that */
1436 /* support jumbo frames */
1437 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1438 /* Cannot do read-modify-write on 5401 */
1439 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
Michael Chan0f893dc2005-07-25 12:30:38 -07001440 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 u32 phy_reg;
1442
1443 /* Set bit 14 with read-modify-write to preserve other bits */
1444 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1445 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1446 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1447 }
1448
1449 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1450 * jumbo frames transmission.
1451 */
Michael Chan0f893dc2005-07-25 12:30:38 -07001452 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 u32 phy_reg;
1454
1455 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1456 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1457 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1458 }
1459
Michael Chan715116a2006-09-27 16:09:25 -07001460 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan715116a2006-09-27 16:09:25 -07001461 /* adjust output voltage */
1462 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
Michael Chan715116a2006-09-27 16:09:25 -07001463 }
1464
Matt Carlson9ef8ca92007-07-11 19:48:29 -07001465 tg3_phy_toggle_automdix(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 tg3_phy_set_wirespeed(tp);
1467 return 0;
1468}
1469
1470static void tg3_frob_aux_power(struct tg3 *tp)
1471{
1472 struct tg3 *tp_peer = tp;
1473
Michael Chan9d26e212006-12-07 00:21:14 -08001474 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 return;
1476
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001477 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1478 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1479 struct net_device *dev_peer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001481 dev_peer = pci_get_drvdata(tp->pdev_peer);
Michael Chanbc1c7562006-03-20 17:48:03 -08001482 /* remove_one() may have been run on the peer. */
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001483 if (!dev_peer)
Michael Chanbc1c7562006-03-20 17:48:03 -08001484 tp_peer = tp;
1485 else
1486 tp_peer = netdev_priv(dev_peer);
Michael Chan8c2dc7e2005-12-19 16:26:02 -08001487 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488
1489 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
Michael Chan6921d202005-12-13 21:15:53 -08001490 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1491 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1492 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1494 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
Michael Chanb401e9e2005-12-19 16:27:04 -08001495 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1496 (GRC_LCLCTRL_GPIO_OE0 |
1497 GRC_LCLCTRL_GPIO_OE1 |
1498 GRC_LCLCTRL_GPIO_OE2 |
1499 GRC_LCLCTRL_GPIO_OUTPUT0 |
1500 GRC_LCLCTRL_GPIO_OUTPUT1),
1501 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 } else {
1503 u32 no_gpio2;
Michael Chandc56b7d2005-12-19 16:26:28 -08001504 u32 grc_local_ctrl = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505
1506 if (tp_peer != tp &&
1507 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1508 return;
1509
Michael Chandc56b7d2005-12-19 16:26:28 -08001510 /* Workaround to prevent overdrawing Amps. */
1511 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1512 ASIC_REV_5714) {
1513 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chanb401e9e2005-12-19 16:27:04 -08001514 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1515 grc_local_ctrl, 100);
Michael Chandc56b7d2005-12-19 16:26:28 -08001516 }
1517
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 /* On 5753 and variants, GPIO2 cannot be used. */
1519 no_gpio2 = tp->nic_sram_data_cfg &
1520 NIC_SRAM_DATA_CFG_NO_GPIO2;
1521
Michael Chandc56b7d2005-12-19 16:26:28 -08001522 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 GRC_LCLCTRL_GPIO_OE1 |
1524 GRC_LCLCTRL_GPIO_OE2 |
1525 GRC_LCLCTRL_GPIO_OUTPUT1 |
1526 GRC_LCLCTRL_GPIO_OUTPUT2;
1527 if (no_gpio2) {
1528 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1529 GRC_LCLCTRL_GPIO_OUTPUT2);
1530 }
Michael Chanb401e9e2005-12-19 16:27:04 -08001531 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1532 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533
1534 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1535
Michael Chanb401e9e2005-12-19 16:27:04 -08001536 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1537 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538
1539 if (!no_gpio2) {
1540 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chanb401e9e2005-12-19 16:27:04 -08001541 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1542 grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 }
1544 }
1545 } else {
1546 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1547 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1548 if (tp_peer != tp &&
1549 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1550 return;
1551
Michael Chanb401e9e2005-12-19 16:27:04 -08001552 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1553 (GRC_LCLCTRL_GPIO_OE1 |
1554 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555
Michael Chanb401e9e2005-12-19 16:27:04 -08001556 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1557 GRC_LCLCTRL_GPIO_OE1, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558
Michael Chanb401e9e2005-12-19 16:27:04 -08001559 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1560 (GRC_LCLCTRL_GPIO_OE1 |
1561 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 }
1563 }
1564}
1565
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07001566static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1567{
1568 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1569 return 1;
1570 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1571 if (speed != SPEED_10)
1572 return 1;
1573 } else if (speed == SPEED_10)
1574 return 1;
1575
1576 return 0;
1577}
1578
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579static int tg3_setup_phy(struct tg3 *, int);
1580
1581#define RESET_KIND_SHUTDOWN 0
1582#define RESET_KIND_INIT 1
1583#define RESET_KIND_SUSPEND 2
1584
1585static void tg3_write_sig_post_reset(struct tg3 *, int);
1586static int tg3_halt_cpu(struct tg3 *, u32);
Michael Chan6921d202005-12-13 21:15:53 -08001587static int tg3_nvram_lock(struct tg3 *);
1588static void tg3_nvram_unlock(struct tg3 *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589
Michael Chan15c3b692006-03-22 01:06:52 -08001590static void tg3_power_down_phy(struct tg3 *tp)
1591{
Matt Carlsonce057f02007-11-12 21:08:03 -08001592 u32 val;
1593
Michael Chan51297242007-02-13 12:17:57 -08001594 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1595 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1596 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1597 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1598
1599 sg_dig_ctrl |=
1600 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1601 tw32(SG_DIG_CTRL, sg_dig_ctrl);
1602 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1603 }
Michael Chan3f7045c2006-09-27 16:02:29 -07001604 return;
Michael Chan51297242007-02-13 12:17:57 -08001605 }
Michael Chan3f7045c2006-09-27 16:02:29 -07001606
Michael Chan60189dd2006-12-17 17:08:07 -08001607 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan60189dd2006-12-17 17:08:07 -08001608 tg3_bmcr_reset(tp);
1609 val = tr32(GRC_MISC_CFG);
1610 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1611 udelay(40);
1612 return;
1613 } else {
Michael Chan715116a2006-09-27 16:09:25 -07001614 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1615 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1616 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1617 }
Michael Chan3f7045c2006-09-27 16:02:29 -07001618
Michael Chan15c3b692006-03-22 01:06:52 -08001619 /* The PHY should not be powered down on some chips because
1620 * of bugs.
1621 */
1622 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1623 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1624 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1625 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1626 return;
Matt Carlsonce057f02007-11-12 21:08:03 -08001627
Matt Carlsonb5af7122007-11-12 21:22:02 -08001628 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
Matt Carlsonce057f02007-11-12 21:08:03 -08001629 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1630 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1631 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1632 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1633 }
1634
Michael Chan15c3b692006-03-22 01:06:52 -08001635 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1636}
1637
Michael Chanbc1c7562006-03-20 17:48:03 -08001638static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639{
1640 u32 misc_host_ctrl;
1641 u16 power_control, power_caps;
1642 int pm = tp->pm_cap;
1643
1644 /* Make sure register accesses (indirect or otherwise)
1645 * will function correctly.
1646 */
1647 pci_write_config_dword(tp->pdev,
1648 TG3PCI_MISC_HOST_CTRL,
1649 tp->misc_host_ctrl);
1650
1651 pci_read_config_word(tp->pdev,
1652 pm + PCI_PM_CTRL,
1653 &power_control);
1654 power_control |= PCI_PM_CTRL_PME_STATUS;
1655 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1656 switch (state) {
Michael Chanbc1c7562006-03-20 17:48:03 -08001657 case PCI_D0:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 power_control |= 0;
1659 pci_write_config_word(tp->pdev,
1660 pm + PCI_PM_CTRL,
1661 power_control);
Michael Chan8c6bda12005-04-21 17:09:08 -07001662 udelay(100); /* Delay after power state change */
1663
Michael Chan9d26e212006-12-07 00:21:14 -08001664 /* Switch out of Vaux if it is a NIC */
1665 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
Michael Chanb401e9e2005-12-19 16:27:04 -08001666 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667
1668 return 0;
1669
Michael Chanbc1c7562006-03-20 17:48:03 -08001670 case PCI_D1:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 power_control |= 1;
1672 break;
1673
Michael Chanbc1c7562006-03-20 17:48:03 -08001674 case PCI_D2:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 power_control |= 2;
1676 break;
1677
Michael Chanbc1c7562006-03-20 17:48:03 -08001678 case PCI_D3hot:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679 power_control |= 3;
1680 break;
1681
1682 default:
1683 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1684 "requested.\n",
1685 tp->dev->name, state);
1686 return -EINVAL;
1687 };
1688
1689 power_control |= PCI_PM_CTRL_PME_ENABLE;
1690
1691 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1692 tw32(TG3PCI_MISC_HOST_CTRL,
1693 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1694
1695 if (tp->link_config.phy_is_low_power == 0) {
1696 tp->link_config.phy_is_low_power = 1;
1697 tp->link_config.orig_speed = tp->link_config.speed;
1698 tp->link_config.orig_duplex = tp->link_config.duplex;
1699 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1700 }
1701
Michael Chan747e8f82005-07-25 12:33:22 -07001702 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703 tp->link_config.speed = SPEED_10;
1704 tp->link_config.duplex = DUPLEX_HALF;
1705 tp->link_config.autoneg = AUTONEG_ENABLE;
1706 tg3_setup_phy(tp, 0);
1707 }
1708
Michael Chanb5d37722006-09-27 16:06:21 -07001709 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1710 u32 val;
1711
1712 val = tr32(GRC_VCPU_EXT_CTRL);
1713 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1714 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chan6921d202005-12-13 21:15:53 -08001715 int i;
1716 u32 val;
1717
1718 for (i = 0; i < 200; i++) {
1719 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1720 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1721 break;
1722 msleep(1);
1723 }
1724 }
Gary Zambranoa85feb82007-05-05 11:52:19 -07001725 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1726 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1727 WOL_DRV_STATE_SHUTDOWN |
1728 WOL_DRV_WOL |
1729 WOL_SET_MAGIC_PKT);
Michael Chan6921d202005-12-13 21:15:53 -08001730
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1732
1733 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1734 u32 mac_mode;
1735
1736 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1737 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1738 udelay(40);
1739
Michael Chan3f7045c2006-09-27 16:02:29 -07001740 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1741 mac_mode = MAC_MODE_PORT_MODE_GMII;
1742 else
1743 mac_mode = MAC_MODE_PORT_MODE_MII;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07001745 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1746 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1747 ASIC_REV_5700) {
1748 u32 speed = (tp->tg3_flags &
1749 TG3_FLAG_WOL_SPEED_100MB) ?
1750 SPEED_100 : SPEED_10;
1751 if (tg3_5700_link_polarity(tp, speed))
1752 mac_mode |= MAC_MODE_LINK_POLARITY;
1753 else
1754 mac_mode &= ~MAC_MODE_LINK_POLARITY;
1755 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756 } else {
1757 mac_mode = MAC_MODE_PORT_MODE_TBI;
1758 }
1759
John W. Linvillecbf46852005-04-21 17:01:29 -07001760 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 tw32(MAC_LED_CTRL, tp->led_ctrl);
1762
1763 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1764 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1765 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1766
1767 tw32_f(MAC_MODE, mac_mode);
1768 udelay(100);
1769
1770 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1771 udelay(10);
1772 }
1773
1774 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1775 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1776 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1777 u32 base_val;
1778
1779 base_val = tp->pci_clock_ctrl;
1780 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1781 CLOCK_CTRL_TXCLK_DISABLE);
1782
Michael Chanb401e9e2005-12-19 16:27:04 -08001783 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1784 CLOCK_CTRL_PWRDOWN_PLL133, 40);
Michael Chand7b0a852007-02-13 12:17:38 -08001785 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
Matt Carlson795d01c2007-10-07 23:28:17 -07001786 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
Michael Chand7b0a852007-02-13 12:17:38 -08001787 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
Michael Chan4cf78e42005-07-25 12:29:19 -07001788 /* do nothing */
Michael Chan85e94ce2005-04-21 17:05:28 -07001789 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1791 u32 newbits1, newbits2;
1792
1793 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1794 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1795 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1796 CLOCK_CTRL_TXCLK_DISABLE |
1797 CLOCK_CTRL_ALTCLK);
1798 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1799 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1800 newbits1 = CLOCK_CTRL_625_CORE;
1801 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1802 } else {
1803 newbits1 = CLOCK_CTRL_ALTCLK;
1804 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1805 }
1806
Michael Chanb401e9e2005-12-19 16:27:04 -08001807 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1808 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809
Michael Chanb401e9e2005-12-19 16:27:04 -08001810 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1811 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812
1813 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1814 u32 newbits3;
1815
1816 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1817 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1818 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1819 CLOCK_CTRL_TXCLK_DISABLE |
1820 CLOCK_CTRL_44MHZ_CORE);
1821 } else {
1822 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1823 }
1824
Michael Chanb401e9e2005-12-19 16:27:04 -08001825 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1826 tp->pci_clock_ctrl | newbits3, 40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 }
1828 }
1829
Michael Chan6921d202005-12-13 21:15:53 -08001830 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
Matt Carlson0d3031d2007-10-10 18:02:43 -07001831 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1832 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
Michael Chan3f7045c2006-09-27 16:02:29 -07001833 tg3_power_down_phy(tp);
Michael Chan6921d202005-12-13 21:15:53 -08001834
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 tg3_frob_aux_power(tp);
1836
1837 /* Workaround for unstable PLL clock */
1838 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1839 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1840 u32 val = tr32(0x7d00);
1841
1842 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1843 tw32(0x7d00, val);
Michael Chan6921d202005-12-13 21:15:53 -08001844 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chanec41c7d2006-01-17 02:40:55 -08001845 int err;
1846
1847 err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848 tg3_halt_cpu(tp, RX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -08001849 if (!err)
1850 tg3_nvram_unlock(tp);
Michael Chan6921d202005-12-13 21:15:53 -08001851 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 }
1853
Michael Chanbbadf502006-04-06 21:46:34 -07001854 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1855
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 /* Finally, set the new power state. */
1857 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
Michael Chan8c6bda12005-04-21 17:09:08 -07001858 udelay(100); /* Delay after power state change */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 return 0;
1861}
1862
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1864{
1865 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1866 case MII_TG3_AUX_STAT_10HALF:
1867 *speed = SPEED_10;
1868 *duplex = DUPLEX_HALF;
1869 break;
1870
1871 case MII_TG3_AUX_STAT_10FULL:
1872 *speed = SPEED_10;
1873 *duplex = DUPLEX_FULL;
1874 break;
1875
1876 case MII_TG3_AUX_STAT_100HALF:
1877 *speed = SPEED_100;
1878 *duplex = DUPLEX_HALF;
1879 break;
1880
1881 case MII_TG3_AUX_STAT_100FULL:
1882 *speed = SPEED_100;
1883 *duplex = DUPLEX_FULL;
1884 break;
1885
1886 case MII_TG3_AUX_STAT_1000HALF:
1887 *speed = SPEED_1000;
1888 *duplex = DUPLEX_HALF;
1889 break;
1890
1891 case MII_TG3_AUX_STAT_1000FULL:
1892 *speed = SPEED_1000;
1893 *duplex = DUPLEX_FULL;
1894 break;
1895
1896 default:
Michael Chan715116a2006-09-27 16:09:25 -07001897 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1898 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1899 SPEED_10;
1900 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1901 DUPLEX_HALF;
1902 break;
1903 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 *speed = SPEED_INVALID;
1905 *duplex = DUPLEX_INVALID;
1906 break;
1907 };
1908}
1909
1910static void tg3_phy_copper_begin(struct tg3 *tp)
1911{
1912 u32 new_adv;
1913 int i;
1914
1915 if (tp->link_config.phy_is_low_power) {
1916 /* Entering low power mode. Disable gigabit and
1917 * 100baseT advertisements.
1918 */
1919 tg3_writephy(tp, MII_TG3_CTRL, 0);
1920
1921 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1922 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1923 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1924 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1925
1926 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1927 } else if (tp->link_config.speed == SPEED_INVALID) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1929 tp->link_config.advertising &=
1930 ~(ADVERTISED_1000baseT_Half |
1931 ADVERTISED_1000baseT_Full);
1932
Matt Carlsonba4d07a2007-12-20 20:08:00 -08001933 new_adv = ADVERTISE_CSMA;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1935 new_adv |= ADVERTISE_10HALF;
1936 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1937 new_adv |= ADVERTISE_10FULL;
1938 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1939 new_adv |= ADVERTISE_100HALF;
1940 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1941 new_adv |= ADVERTISE_100FULL;
Matt Carlsonba4d07a2007-12-20 20:08:00 -08001942
1943 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1944
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1946
1947 if (tp->link_config.advertising &
1948 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1949 new_adv = 0;
1950 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1951 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1952 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1953 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1954 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1955 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1956 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1957 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1958 MII_TG3_CTRL_ENABLE_AS_MASTER);
1959 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1960 } else {
1961 tg3_writephy(tp, MII_TG3_CTRL, 0);
1962 }
1963 } else {
Matt Carlsonba4d07a2007-12-20 20:08:00 -08001964 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1965 new_adv |= ADVERTISE_CSMA;
1966
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 /* Asking for a specific link mode. */
1968 if (tp->link_config.speed == SPEED_1000) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1970
1971 if (tp->link_config.duplex == DUPLEX_FULL)
1972 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1973 else
1974 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1975 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1976 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1977 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1978 MII_TG3_CTRL_ENABLE_AS_MASTER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 if (tp->link_config.speed == SPEED_100) {
1981 if (tp->link_config.duplex == DUPLEX_FULL)
1982 new_adv |= ADVERTISE_100FULL;
1983 else
1984 new_adv |= ADVERTISE_100HALF;
1985 } else {
1986 if (tp->link_config.duplex == DUPLEX_FULL)
1987 new_adv |= ADVERTISE_10FULL;
1988 else
1989 new_adv |= ADVERTISE_10HALF;
1990 }
1991 tg3_writephy(tp, MII_ADVERTISE, new_adv);
Matt Carlsonba4d07a2007-12-20 20:08:00 -08001992
1993 new_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 }
Matt Carlsonba4d07a2007-12-20 20:08:00 -08001995
1996 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 }
1998
1999 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2000 tp->link_config.speed != SPEED_INVALID) {
2001 u32 bmcr, orig_bmcr;
2002
2003 tp->link_config.active_speed = tp->link_config.speed;
2004 tp->link_config.active_duplex = tp->link_config.duplex;
2005
2006 bmcr = 0;
2007 switch (tp->link_config.speed) {
2008 default:
2009 case SPEED_10:
2010 break;
2011
2012 case SPEED_100:
2013 bmcr |= BMCR_SPEED100;
2014 break;
2015
2016 case SPEED_1000:
2017 bmcr |= TG3_BMCR_SPEED1000;
2018 break;
2019 };
2020
2021 if (tp->link_config.duplex == DUPLEX_FULL)
2022 bmcr |= BMCR_FULLDPLX;
2023
2024 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2025 (bmcr != orig_bmcr)) {
2026 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2027 for (i = 0; i < 1500; i++) {
2028 u32 tmp;
2029
2030 udelay(10);
2031 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2032 tg3_readphy(tp, MII_BMSR, &tmp))
2033 continue;
2034 if (!(tmp & BMSR_LSTATUS)) {
2035 udelay(40);
2036 break;
2037 }
2038 }
2039 tg3_writephy(tp, MII_BMCR, bmcr);
2040 udelay(40);
2041 }
2042 } else {
2043 tg3_writephy(tp, MII_BMCR,
2044 BMCR_ANENABLE | BMCR_ANRESTART);
2045 }
2046}
2047
2048static int tg3_init_5401phy_dsp(struct tg3 *tp)
2049{
2050 int err;
2051
2052 /* Turn off tap power management. */
2053 /* Set Extended packet length bit */
2054 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2055
2056 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2057 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2058
2059 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2060 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2061
2062 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2063 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2064
2065 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2066 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2067
2068 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2069 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2070
2071 udelay(40);
2072
2073 return err;
2074}
2075
Michael Chan3600d912006-12-07 00:21:48 -08002076static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077{
Michael Chan3600d912006-12-07 00:21:48 -08002078 u32 adv_reg, all_mask = 0;
2079
2080 if (mask & ADVERTISED_10baseT_Half)
2081 all_mask |= ADVERTISE_10HALF;
2082 if (mask & ADVERTISED_10baseT_Full)
2083 all_mask |= ADVERTISE_10FULL;
2084 if (mask & ADVERTISED_100baseT_Half)
2085 all_mask |= ADVERTISE_100HALF;
2086 if (mask & ADVERTISED_100baseT_Full)
2087 all_mask |= ADVERTISE_100FULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088
2089 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2090 return 0;
2091
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092 if ((adv_reg & all_mask) != all_mask)
2093 return 0;
2094 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2095 u32 tg3_ctrl;
2096
Michael Chan3600d912006-12-07 00:21:48 -08002097 all_mask = 0;
2098 if (mask & ADVERTISED_1000baseT_Half)
2099 all_mask |= ADVERTISE_1000HALF;
2100 if (mask & ADVERTISED_1000baseT_Full)
2101 all_mask |= ADVERTISE_1000FULL;
2102
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2104 return 0;
2105
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106 if ((tg3_ctrl & all_mask) != all_mask)
2107 return 0;
2108 }
2109 return 1;
2110}
2111
Matt Carlsonef167e22007-12-20 20:10:01 -08002112static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2113{
2114 u32 curadv, reqadv;
2115
2116 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2117 return 1;
2118
2119 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2120 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2121
2122 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2123 if (curadv != reqadv)
2124 return 0;
2125
2126 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2127 tg3_readphy(tp, MII_LPA, rmtadv);
2128 } else {
2129 /* Reprogram the advertisement register, even if it
2130 * does not affect the current link. If the link
2131 * gets renegotiated in the future, we can save an
2132 * additional renegotiation cycle by advertising
2133 * it correctly in the first place.
2134 */
2135 if (curadv != reqadv) {
2136 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2137 ADVERTISE_PAUSE_ASYM);
2138 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2139 }
2140 }
2141
2142 return 1;
2143}
2144
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2146{
2147 int current_link_up;
2148 u32 bmsr, dummy;
Matt Carlsonef167e22007-12-20 20:10:01 -08002149 u32 lcl_adv, rmt_adv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 u16 current_speed;
2151 u8 current_duplex;
2152 int i, err;
2153
2154 tw32(MAC_EVENT, 0);
2155
2156 tw32_f(MAC_STATUS,
2157 (MAC_STATUS_SYNC_CHANGED |
2158 MAC_STATUS_CFG_CHANGED |
2159 MAC_STATUS_MI_COMPLETION |
2160 MAC_STATUS_LNKSTATE_CHANGED));
2161 udelay(40);
2162
Matt Carlson8ef21422008-05-02 16:47:53 -07002163 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2164 tw32_f(MAC_MI_MODE,
2165 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2166 udelay(80);
2167 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168
2169 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2170
2171 /* Some third-party PHYs need to be reset on link going
2172 * down.
2173 */
2174 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2175 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2176 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2177 netif_carrier_ok(tp->dev)) {
2178 tg3_readphy(tp, MII_BMSR, &bmsr);
2179 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2180 !(bmsr & BMSR_LSTATUS))
2181 force_reset = 1;
2182 }
2183 if (force_reset)
2184 tg3_phy_reset(tp);
2185
2186 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2187 tg3_readphy(tp, MII_BMSR, &bmsr);
2188 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2189 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2190 bmsr = 0;
2191
2192 if (!(bmsr & BMSR_LSTATUS)) {
2193 err = tg3_init_5401phy_dsp(tp);
2194 if (err)
2195 return err;
2196
2197 tg3_readphy(tp, MII_BMSR, &bmsr);
2198 for (i = 0; i < 1000; i++) {
2199 udelay(10);
2200 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2201 (bmsr & BMSR_LSTATUS)) {
2202 udelay(40);
2203 break;
2204 }
2205 }
2206
2207 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2208 !(bmsr & BMSR_LSTATUS) &&
2209 tp->link_config.active_speed == SPEED_1000) {
2210 err = tg3_phy_reset(tp);
2211 if (!err)
2212 err = tg3_init_5401phy_dsp(tp);
2213 if (err)
2214 return err;
2215 }
2216 }
2217 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2218 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2219 /* 5701 {A0,B0} CRC bug workaround */
2220 tg3_writephy(tp, 0x15, 0x0a75);
2221 tg3_writephy(tp, 0x1c, 0x8c68);
2222 tg3_writephy(tp, 0x1c, 0x8d68);
2223 tg3_writephy(tp, 0x1c, 0x8c68);
2224 }
2225
2226 /* Clear pending interrupts... */
2227 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2228 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2229
2230 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2231 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
Michael Chan715116a2006-09-27 16:09:25 -07002232 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2234
2235 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2236 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2237 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2238 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2239 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2240 else
2241 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2242 }
2243
2244 current_link_up = 0;
2245 current_speed = SPEED_INVALID;
2246 current_duplex = DUPLEX_INVALID;
2247
2248 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2249 u32 val;
2250
2251 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2252 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2253 if (!(val & (1 << 10))) {
2254 val |= (1 << 10);
2255 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2256 goto relink;
2257 }
2258 }
2259
2260 bmsr = 0;
2261 for (i = 0; i < 100; i++) {
2262 tg3_readphy(tp, MII_BMSR, &bmsr);
2263 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2264 (bmsr & BMSR_LSTATUS))
2265 break;
2266 udelay(40);
2267 }
2268
2269 if (bmsr & BMSR_LSTATUS) {
2270 u32 aux_stat, bmcr;
2271
2272 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2273 for (i = 0; i < 2000; i++) {
2274 udelay(10);
2275 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2276 aux_stat)
2277 break;
2278 }
2279
2280 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2281 &current_speed,
2282 &current_duplex);
2283
2284 bmcr = 0;
2285 for (i = 0; i < 200; i++) {
2286 tg3_readphy(tp, MII_BMCR, &bmcr);
2287 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2288 continue;
2289 if (bmcr && bmcr != 0x7fff)
2290 break;
2291 udelay(10);
2292 }
2293
Matt Carlsonef167e22007-12-20 20:10:01 -08002294 lcl_adv = 0;
2295 rmt_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296
Matt Carlsonef167e22007-12-20 20:10:01 -08002297 tp->link_config.active_speed = current_speed;
2298 tp->link_config.active_duplex = current_duplex;
2299
2300 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2301 if ((bmcr & BMCR_ANENABLE) &&
2302 tg3_copper_is_advertising_all(tp,
2303 tp->link_config.advertising)) {
2304 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2305 &rmt_adv))
2306 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307 }
2308 } else {
2309 if (!(bmcr & BMCR_ANENABLE) &&
2310 tp->link_config.speed == current_speed &&
Matt Carlsonef167e22007-12-20 20:10:01 -08002311 tp->link_config.duplex == current_duplex &&
2312 tp->link_config.flowctrl ==
2313 tp->link_config.active_flowctrl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 }
2316 }
2317
Matt Carlsonef167e22007-12-20 20:10:01 -08002318 if (current_link_up == 1 &&
2319 tp->link_config.active_duplex == DUPLEX_FULL)
2320 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321 }
2322
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323relink:
Michael Chan6921d202005-12-13 21:15:53 -08002324 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325 u32 tmp;
2326
2327 tg3_phy_copper_begin(tp);
2328
2329 tg3_readphy(tp, MII_BMSR, &tmp);
2330 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2331 (tmp & BMSR_LSTATUS))
2332 current_link_up = 1;
2333 }
2334
2335 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2336 if (current_link_up == 1) {
2337 if (tp->link_config.active_speed == SPEED_100 ||
2338 tp->link_config.active_speed == SPEED_10)
2339 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2340 else
2341 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2342 } else
2343 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2344
2345 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2346 if (tp->link_config.active_duplex == DUPLEX_HALF)
2347 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2348
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002350 if (current_link_up == 1 &&
2351 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07002353 else
2354 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355 }
2356
2357 /* ??? Without this setting Netgear GA302T PHY does not
2358 * ??? send/receive packets...
2359 */
2360 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2361 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2362 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2363 tw32_f(MAC_MI_MODE, tp->mi_mode);
2364 udelay(80);
2365 }
2366
2367 tw32_f(MAC_MODE, tp->mac_mode);
2368 udelay(40);
2369
2370 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2371 /* Polled via timer. */
2372 tw32_f(MAC_EVENT, 0);
2373 } else {
2374 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2375 }
2376 udelay(40);
2377
2378 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2379 current_link_up == 1 &&
2380 tp->link_config.active_speed == SPEED_1000 &&
2381 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2382 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2383 udelay(120);
2384 tw32_f(MAC_STATUS,
2385 (MAC_STATUS_SYNC_CHANGED |
2386 MAC_STATUS_CFG_CHANGED));
2387 udelay(40);
2388 tg3_write_mem(tp,
2389 NIC_SRAM_FIRMWARE_MBOX,
2390 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2391 }
2392
2393 if (current_link_up != netif_carrier_ok(tp->dev)) {
2394 if (current_link_up)
2395 netif_carrier_on(tp->dev);
2396 else
2397 netif_carrier_off(tp->dev);
2398 tg3_link_report(tp);
2399 }
2400
2401 return 0;
2402}
2403
2404struct tg3_fiber_aneginfo {
2405 int state;
2406#define ANEG_STATE_UNKNOWN 0
2407#define ANEG_STATE_AN_ENABLE 1
2408#define ANEG_STATE_RESTART_INIT 2
2409#define ANEG_STATE_RESTART 3
2410#define ANEG_STATE_DISABLE_LINK_OK 4
2411#define ANEG_STATE_ABILITY_DETECT_INIT 5
2412#define ANEG_STATE_ABILITY_DETECT 6
2413#define ANEG_STATE_ACK_DETECT_INIT 7
2414#define ANEG_STATE_ACK_DETECT 8
2415#define ANEG_STATE_COMPLETE_ACK_INIT 9
2416#define ANEG_STATE_COMPLETE_ACK 10
2417#define ANEG_STATE_IDLE_DETECT_INIT 11
2418#define ANEG_STATE_IDLE_DETECT 12
2419#define ANEG_STATE_LINK_OK 13
2420#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2421#define ANEG_STATE_NEXT_PAGE_WAIT 15
2422
2423 u32 flags;
2424#define MR_AN_ENABLE 0x00000001
2425#define MR_RESTART_AN 0x00000002
2426#define MR_AN_COMPLETE 0x00000004
2427#define MR_PAGE_RX 0x00000008
2428#define MR_NP_LOADED 0x00000010
2429#define MR_TOGGLE_TX 0x00000020
2430#define MR_LP_ADV_FULL_DUPLEX 0x00000040
2431#define MR_LP_ADV_HALF_DUPLEX 0x00000080
2432#define MR_LP_ADV_SYM_PAUSE 0x00000100
2433#define MR_LP_ADV_ASYM_PAUSE 0x00000200
2434#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2435#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2436#define MR_LP_ADV_NEXT_PAGE 0x00001000
2437#define MR_TOGGLE_RX 0x00002000
2438#define MR_NP_RX 0x00004000
2439
2440#define MR_LINK_OK 0x80000000
2441
2442 unsigned long link_time, cur_time;
2443
2444 u32 ability_match_cfg;
2445 int ability_match_count;
2446
2447 char ability_match, idle_match, ack_match;
2448
2449 u32 txconfig, rxconfig;
2450#define ANEG_CFG_NP 0x00000080
2451#define ANEG_CFG_ACK 0x00000040
2452#define ANEG_CFG_RF2 0x00000020
2453#define ANEG_CFG_RF1 0x00000010
2454#define ANEG_CFG_PS2 0x00000001
2455#define ANEG_CFG_PS1 0x00008000
2456#define ANEG_CFG_HD 0x00004000
2457#define ANEG_CFG_FD 0x00002000
2458#define ANEG_CFG_INVAL 0x00001f06
2459
2460};
2461#define ANEG_OK 0
2462#define ANEG_DONE 1
2463#define ANEG_TIMER_ENAB 2
2464#define ANEG_FAILED -1
2465
2466#define ANEG_STATE_SETTLE_TIME 10000
2467
2468static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2469 struct tg3_fiber_aneginfo *ap)
2470{
Matt Carlson5be73b42007-12-20 20:09:29 -08002471 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 unsigned long delta;
2473 u32 rx_cfg_reg;
2474 int ret;
2475
2476 if (ap->state == ANEG_STATE_UNKNOWN) {
2477 ap->rxconfig = 0;
2478 ap->link_time = 0;
2479 ap->cur_time = 0;
2480 ap->ability_match_cfg = 0;
2481 ap->ability_match_count = 0;
2482 ap->ability_match = 0;
2483 ap->idle_match = 0;
2484 ap->ack_match = 0;
2485 }
2486 ap->cur_time++;
2487
2488 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2489 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2490
2491 if (rx_cfg_reg != ap->ability_match_cfg) {
2492 ap->ability_match_cfg = rx_cfg_reg;
2493 ap->ability_match = 0;
2494 ap->ability_match_count = 0;
2495 } else {
2496 if (++ap->ability_match_count > 1) {
2497 ap->ability_match = 1;
2498 ap->ability_match_cfg = rx_cfg_reg;
2499 }
2500 }
2501 if (rx_cfg_reg & ANEG_CFG_ACK)
2502 ap->ack_match = 1;
2503 else
2504 ap->ack_match = 0;
2505
2506 ap->idle_match = 0;
2507 } else {
2508 ap->idle_match = 1;
2509 ap->ability_match_cfg = 0;
2510 ap->ability_match_count = 0;
2511 ap->ability_match = 0;
2512 ap->ack_match = 0;
2513
2514 rx_cfg_reg = 0;
2515 }
2516
2517 ap->rxconfig = rx_cfg_reg;
2518 ret = ANEG_OK;
2519
2520 switch(ap->state) {
2521 case ANEG_STATE_UNKNOWN:
2522 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2523 ap->state = ANEG_STATE_AN_ENABLE;
2524
2525 /* fallthru */
2526 case ANEG_STATE_AN_ENABLE:
2527 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2528 if (ap->flags & MR_AN_ENABLE) {
2529 ap->link_time = 0;
2530 ap->cur_time = 0;
2531 ap->ability_match_cfg = 0;
2532 ap->ability_match_count = 0;
2533 ap->ability_match = 0;
2534 ap->idle_match = 0;
2535 ap->ack_match = 0;
2536
2537 ap->state = ANEG_STATE_RESTART_INIT;
2538 } else {
2539 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2540 }
2541 break;
2542
2543 case ANEG_STATE_RESTART_INIT:
2544 ap->link_time = ap->cur_time;
2545 ap->flags &= ~(MR_NP_LOADED);
2546 ap->txconfig = 0;
2547 tw32(MAC_TX_AUTO_NEG, 0);
2548 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2549 tw32_f(MAC_MODE, tp->mac_mode);
2550 udelay(40);
2551
2552 ret = ANEG_TIMER_ENAB;
2553 ap->state = ANEG_STATE_RESTART;
2554
2555 /* fallthru */
2556 case ANEG_STATE_RESTART:
2557 delta = ap->cur_time - ap->link_time;
2558 if (delta > ANEG_STATE_SETTLE_TIME) {
2559 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2560 } else {
2561 ret = ANEG_TIMER_ENAB;
2562 }
2563 break;
2564
2565 case ANEG_STATE_DISABLE_LINK_OK:
2566 ret = ANEG_DONE;
2567 break;
2568
2569 case ANEG_STATE_ABILITY_DETECT_INIT:
2570 ap->flags &= ~(MR_TOGGLE_TX);
Matt Carlson5be73b42007-12-20 20:09:29 -08002571 ap->txconfig = ANEG_CFG_FD;
2572 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2573 if (flowctrl & ADVERTISE_1000XPAUSE)
2574 ap->txconfig |= ANEG_CFG_PS1;
2575 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2576 ap->txconfig |= ANEG_CFG_PS2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2578 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2579 tw32_f(MAC_MODE, tp->mac_mode);
2580 udelay(40);
2581
2582 ap->state = ANEG_STATE_ABILITY_DETECT;
2583 break;
2584
2585 case ANEG_STATE_ABILITY_DETECT:
2586 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2587 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2588 }
2589 break;
2590
2591 case ANEG_STATE_ACK_DETECT_INIT:
2592 ap->txconfig |= ANEG_CFG_ACK;
2593 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2594 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2595 tw32_f(MAC_MODE, tp->mac_mode);
2596 udelay(40);
2597
2598 ap->state = ANEG_STATE_ACK_DETECT;
2599
2600 /* fallthru */
2601 case ANEG_STATE_ACK_DETECT:
2602 if (ap->ack_match != 0) {
2603 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2604 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2605 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2606 } else {
2607 ap->state = ANEG_STATE_AN_ENABLE;
2608 }
2609 } else if (ap->ability_match != 0 &&
2610 ap->rxconfig == 0) {
2611 ap->state = ANEG_STATE_AN_ENABLE;
2612 }
2613 break;
2614
2615 case ANEG_STATE_COMPLETE_ACK_INIT:
2616 if (ap->rxconfig & ANEG_CFG_INVAL) {
2617 ret = ANEG_FAILED;
2618 break;
2619 }
2620 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2621 MR_LP_ADV_HALF_DUPLEX |
2622 MR_LP_ADV_SYM_PAUSE |
2623 MR_LP_ADV_ASYM_PAUSE |
2624 MR_LP_ADV_REMOTE_FAULT1 |
2625 MR_LP_ADV_REMOTE_FAULT2 |
2626 MR_LP_ADV_NEXT_PAGE |
2627 MR_TOGGLE_RX |
2628 MR_NP_RX);
2629 if (ap->rxconfig & ANEG_CFG_FD)
2630 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2631 if (ap->rxconfig & ANEG_CFG_HD)
2632 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2633 if (ap->rxconfig & ANEG_CFG_PS1)
2634 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2635 if (ap->rxconfig & ANEG_CFG_PS2)
2636 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2637 if (ap->rxconfig & ANEG_CFG_RF1)
2638 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2639 if (ap->rxconfig & ANEG_CFG_RF2)
2640 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2641 if (ap->rxconfig & ANEG_CFG_NP)
2642 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2643
2644 ap->link_time = ap->cur_time;
2645
2646 ap->flags ^= (MR_TOGGLE_TX);
2647 if (ap->rxconfig & 0x0008)
2648 ap->flags |= MR_TOGGLE_RX;
2649 if (ap->rxconfig & ANEG_CFG_NP)
2650 ap->flags |= MR_NP_RX;
2651 ap->flags |= MR_PAGE_RX;
2652
2653 ap->state = ANEG_STATE_COMPLETE_ACK;
2654 ret = ANEG_TIMER_ENAB;
2655 break;
2656
2657 case ANEG_STATE_COMPLETE_ACK:
2658 if (ap->ability_match != 0 &&
2659 ap->rxconfig == 0) {
2660 ap->state = ANEG_STATE_AN_ENABLE;
2661 break;
2662 }
2663 delta = ap->cur_time - ap->link_time;
2664 if (delta > ANEG_STATE_SETTLE_TIME) {
2665 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2666 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2667 } else {
2668 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2669 !(ap->flags & MR_NP_RX)) {
2670 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2671 } else {
2672 ret = ANEG_FAILED;
2673 }
2674 }
2675 }
2676 break;
2677
2678 case ANEG_STATE_IDLE_DETECT_INIT:
2679 ap->link_time = ap->cur_time;
2680 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2681 tw32_f(MAC_MODE, tp->mac_mode);
2682 udelay(40);
2683
2684 ap->state = ANEG_STATE_IDLE_DETECT;
2685 ret = ANEG_TIMER_ENAB;
2686 break;
2687
2688 case ANEG_STATE_IDLE_DETECT:
2689 if (ap->ability_match != 0 &&
2690 ap->rxconfig == 0) {
2691 ap->state = ANEG_STATE_AN_ENABLE;
2692 break;
2693 }
2694 delta = ap->cur_time - ap->link_time;
2695 if (delta > ANEG_STATE_SETTLE_TIME) {
2696 /* XXX another gem from the Broadcom driver :( */
2697 ap->state = ANEG_STATE_LINK_OK;
2698 }
2699 break;
2700
2701 case ANEG_STATE_LINK_OK:
2702 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2703 ret = ANEG_DONE;
2704 break;
2705
2706 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2707 /* ??? unimplemented */
2708 break;
2709
2710 case ANEG_STATE_NEXT_PAGE_WAIT:
2711 /* ??? unimplemented */
2712 break;
2713
2714 default:
2715 ret = ANEG_FAILED;
2716 break;
2717 };
2718
2719 return ret;
2720}
2721
Matt Carlson5be73b42007-12-20 20:09:29 -08002722static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723{
2724 int res = 0;
2725 struct tg3_fiber_aneginfo aninfo;
2726 int status = ANEG_FAILED;
2727 unsigned int tick;
2728 u32 tmp;
2729
2730 tw32_f(MAC_TX_AUTO_NEG, 0);
2731
2732 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2733 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2734 udelay(40);
2735
2736 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2737 udelay(40);
2738
2739 memset(&aninfo, 0, sizeof(aninfo));
2740 aninfo.flags |= MR_AN_ENABLE;
2741 aninfo.state = ANEG_STATE_UNKNOWN;
2742 aninfo.cur_time = 0;
2743 tick = 0;
2744 while (++tick < 195000) {
2745 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2746 if (status == ANEG_DONE || status == ANEG_FAILED)
2747 break;
2748
2749 udelay(1);
2750 }
2751
2752 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2753 tw32_f(MAC_MODE, tp->mac_mode);
2754 udelay(40);
2755
Matt Carlson5be73b42007-12-20 20:09:29 -08002756 *txflags = aninfo.txconfig;
2757 *rxflags = aninfo.flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758
2759 if (status == ANEG_DONE &&
2760 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2761 MR_LP_ADV_FULL_DUPLEX)))
2762 res = 1;
2763
2764 return res;
2765}
2766
2767static void tg3_init_bcm8002(struct tg3 *tp)
2768{
2769 u32 mac_status = tr32(MAC_STATUS);
2770 int i;
2771
2772 /* Reset when initting first time or we have a link. */
2773 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2774 !(mac_status & MAC_STATUS_PCS_SYNCED))
2775 return;
2776
2777 /* Set PLL lock range. */
2778 tg3_writephy(tp, 0x16, 0x8007);
2779
2780 /* SW reset */
2781 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2782
2783 /* Wait for reset to complete. */
2784 /* XXX schedule_timeout() ... */
2785 for (i = 0; i < 500; i++)
2786 udelay(10);
2787
2788 /* Config mode; select PMA/Ch 1 regs. */
2789 tg3_writephy(tp, 0x10, 0x8411);
2790
2791 /* Enable auto-lock and comdet, select txclk for tx. */
2792 tg3_writephy(tp, 0x11, 0x0a10);
2793
2794 tg3_writephy(tp, 0x18, 0x00a0);
2795 tg3_writephy(tp, 0x16, 0x41ff);
2796
2797 /* Assert and deassert POR. */
2798 tg3_writephy(tp, 0x13, 0x0400);
2799 udelay(40);
2800 tg3_writephy(tp, 0x13, 0x0000);
2801
2802 tg3_writephy(tp, 0x11, 0x0a50);
2803 udelay(40);
2804 tg3_writephy(tp, 0x11, 0x0a10);
2805
2806 /* Wait for signal to stabilize */
2807 /* XXX schedule_timeout() ... */
2808 for (i = 0; i < 15000; i++)
2809 udelay(10);
2810
2811 /* Deselect the channel register so we can read the PHYID
2812 * later.
2813 */
2814 tg3_writephy(tp, 0x10, 0x8011);
2815}
2816
2817static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2818{
Matt Carlson82cd3d12007-12-20 20:09:00 -08002819 u16 flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820 u32 sg_dig_ctrl, sg_dig_status;
2821 u32 serdes_cfg, expected_sg_dig_ctrl;
2822 int workaround, port_a;
2823 int current_link_up;
2824
2825 serdes_cfg = 0;
2826 expected_sg_dig_ctrl = 0;
2827 workaround = 0;
2828 port_a = 1;
2829 current_link_up = 0;
2830
2831 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2832 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2833 workaround = 1;
2834 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2835 port_a = 0;
2836
2837 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2838 /* preserve bits 20-23 for voltage regulator */
2839 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2840 }
2841
2842 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2843
2844 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
Matt Carlsonc98f6e32007-12-20 20:08:32 -08002845 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846 if (workaround) {
2847 u32 val = serdes_cfg;
2848
2849 if (port_a)
2850 val |= 0xc010000;
2851 else
2852 val |= 0x4010000;
2853 tw32_f(MAC_SERDES_CFG, val);
2854 }
Matt Carlsonc98f6e32007-12-20 20:08:32 -08002855
2856 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857 }
2858 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2859 tg3_setup_flow_control(tp, 0, 0);
2860 current_link_up = 1;
2861 }
2862 goto out;
2863 }
2864
2865 /* Want auto-negotiation. */
Matt Carlsonc98f6e32007-12-20 20:08:32 -08002866 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867
Matt Carlson82cd3d12007-12-20 20:09:00 -08002868 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2869 if (flowctrl & ADVERTISE_1000XPAUSE)
2870 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
2871 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2872 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873
2874 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07002875 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2876 tp->serdes_counter &&
2877 ((mac_status & (MAC_STATUS_PCS_SYNCED |
2878 MAC_STATUS_RCVD_CFG)) ==
2879 MAC_STATUS_PCS_SYNCED)) {
2880 tp->serdes_counter--;
2881 current_link_up = 1;
2882 goto out;
2883 }
2884restart_autoneg:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885 if (workaround)
2886 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
Matt Carlsonc98f6e32007-12-20 20:08:32 -08002887 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002888 udelay(5);
2889 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2890
Michael Chan3d3ebe72006-09-27 15:59:15 -07002891 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2892 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2894 MAC_STATUS_SIGNAL_DET)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07002895 sg_dig_status = tr32(SG_DIG_STATUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 mac_status = tr32(MAC_STATUS);
2897
Matt Carlsonc98f6e32007-12-20 20:08:32 -08002898 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899 (mac_status & MAC_STATUS_PCS_SYNCED)) {
Matt Carlson82cd3d12007-12-20 20:09:00 -08002900 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901
Matt Carlson82cd3d12007-12-20 20:09:00 -08002902 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
2903 local_adv |= ADVERTISE_1000XPAUSE;
2904 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
2905 local_adv |= ADVERTISE_1000XPSE_ASYM;
2906
Matt Carlsonc98f6e32007-12-20 20:08:32 -08002907 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08002908 remote_adv |= LPA_1000XPAUSE;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08002909 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
Matt Carlson82cd3d12007-12-20 20:09:00 -08002910 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911
2912 tg3_setup_flow_control(tp, local_adv, remote_adv);
2913 current_link_up = 1;
Michael Chan3d3ebe72006-09-27 15:59:15 -07002914 tp->serdes_counter = 0;
2915 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Matt Carlsonc98f6e32007-12-20 20:08:32 -08002916 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07002917 if (tp->serdes_counter)
2918 tp->serdes_counter--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919 else {
2920 if (workaround) {
2921 u32 val = serdes_cfg;
2922
2923 if (port_a)
2924 val |= 0xc010000;
2925 else
2926 val |= 0x4010000;
2927
2928 tw32_f(MAC_SERDES_CFG, val);
2929 }
2930
Matt Carlsonc98f6e32007-12-20 20:08:32 -08002931 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932 udelay(40);
2933
2934 /* Link parallel detection - link is up */
2935 /* only if we have PCS_SYNC and not */
2936 /* receiving config code words */
2937 mac_status = tr32(MAC_STATUS);
2938 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2939 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2940 tg3_setup_flow_control(tp, 0, 0);
2941 current_link_up = 1;
Michael Chan3d3ebe72006-09-27 15:59:15 -07002942 tp->tg3_flags2 |=
2943 TG3_FLG2_PARALLEL_DETECT;
2944 tp->serdes_counter =
2945 SERDES_PARALLEL_DET_TIMEOUT;
2946 } else
2947 goto restart_autoneg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948 }
2949 }
Michael Chan3d3ebe72006-09-27 15:59:15 -07002950 } else {
2951 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2952 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002953 }
2954
2955out:
2956 return current_link_up;
2957}
2958
2959static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2960{
2961 int current_link_up = 0;
2962
Michael Chan5cf64b82007-05-05 12:11:21 -07002963 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965
2966 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
Matt Carlson5be73b42007-12-20 20:09:29 -08002967 u32 txflags, rxflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968 int i;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002969
Matt Carlson5be73b42007-12-20 20:09:29 -08002970 if (fiber_autoneg(tp, &txflags, &rxflags)) {
2971 u32 local_adv = 0, remote_adv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972
Matt Carlson5be73b42007-12-20 20:09:29 -08002973 if (txflags & ANEG_CFG_PS1)
2974 local_adv |= ADVERTISE_1000XPAUSE;
2975 if (txflags & ANEG_CFG_PS2)
2976 local_adv |= ADVERTISE_1000XPSE_ASYM;
2977
2978 if (rxflags & MR_LP_ADV_SYM_PAUSE)
2979 remote_adv |= LPA_1000XPAUSE;
2980 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
2981 remote_adv |= LPA_1000XPAUSE_ASYM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982
2983 tg3_setup_flow_control(tp, local_adv, remote_adv);
2984
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985 current_link_up = 1;
2986 }
2987 for (i = 0; i < 30; i++) {
2988 udelay(20);
2989 tw32_f(MAC_STATUS,
2990 (MAC_STATUS_SYNC_CHANGED |
2991 MAC_STATUS_CFG_CHANGED));
2992 udelay(40);
2993 if ((tr32(MAC_STATUS) &
2994 (MAC_STATUS_SYNC_CHANGED |
2995 MAC_STATUS_CFG_CHANGED)) == 0)
2996 break;
2997 }
2998
2999 mac_status = tr32(MAC_STATUS);
3000 if (current_link_up == 0 &&
3001 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3002 !(mac_status & MAC_STATUS_RCVD_CFG))
3003 current_link_up = 1;
3004 } else {
Matt Carlson5be73b42007-12-20 20:09:29 -08003005 tg3_setup_flow_control(tp, 0, 0);
3006
Linus Torvalds1da177e2005-04-16 15:20:36 -07003007 /* Forcing 1000FD link up. */
3008 current_link_up = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009
3010 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3011 udelay(40);
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07003012
3013 tw32_f(MAC_MODE, tp->mac_mode);
3014 udelay(40);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003015 }
3016
3017out:
3018 return current_link_up;
3019}
3020
3021static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3022{
3023 u32 orig_pause_cfg;
3024 u16 orig_active_speed;
3025 u8 orig_active_duplex;
3026 u32 mac_status;
3027 int current_link_up;
3028 int i;
3029
Matt Carlson8d018622007-12-20 20:05:44 -08003030 orig_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003031 orig_active_speed = tp->link_config.active_speed;
3032 orig_active_duplex = tp->link_config.active_duplex;
3033
3034 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3035 netif_carrier_ok(tp->dev) &&
3036 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3037 mac_status = tr32(MAC_STATUS);
3038 mac_status &= (MAC_STATUS_PCS_SYNCED |
3039 MAC_STATUS_SIGNAL_DET |
3040 MAC_STATUS_CFG_CHANGED |
3041 MAC_STATUS_RCVD_CFG);
3042 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3043 MAC_STATUS_SIGNAL_DET)) {
3044 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3045 MAC_STATUS_CFG_CHANGED));
3046 return 0;
3047 }
3048 }
3049
3050 tw32_f(MAC_TX_AUTO_NEG, 0);
3051
3052 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3053 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3054 tw32_f(MAC_MODE, tp->mac_mode);
3055 udelay(40);
3056
3057 if (tp->phy_id == PHY_ID_BCM8002)
3058 tg3_init_bcm8002(tp);
3059
3060 /* Enable link change event even when serdes polling. */
3061 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3062 udelay(40);
3063
3064 current_link_up = 0;
3065 mac_status = tr32(MAC_STATUS);
3066
3067 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3068 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3069 else
3070 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3071
Linus Torvalds1da177e2005-04-16 15:20:36 -07003072 tp->hw_status->status =
3073 (SD_STATUS_UPDATED |
3074 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3075
3076 for (i = 0; i < 100; i++) {
3077 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3078 MAC_STATUS_CFG_CHANGED));
3079 udelay(5);
3080 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
Michael Chan3d3ebe72006-09-27 15:59:15 -07003081 MAC_STATUS_CFG_CHANGED |
3082 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003083 break;
3084 }
3085
3086 mac_status = tr32(MAC_STATUS);
3087 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3088 current_link_up = 0;
Michael Chan3d3ebe72006-09-27 15:59:15 -07003089 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3090 tp->serdes_counter == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003091 tw32_f(MAC_MODE, (tp->mac_mode |
3092 MAC_MODE_SEND_CONFIGS));
3093 udelay(1);
3094 tw32_f(MAC_MODE, tp->mac_mode);
3095 }
3096 }
3097
3098 if (current_link_up == 1) {
3099 tp->link_config.active_speed = SPEED_1000;
3100 tp->link_config.active_duplex = DUPLEX_FULL;
3101 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3102 LED_CTRL_LNKLED_OVERRIDE |
3103 LED_CTRL_1000MBPS_ON));
3104 } else {
3105 tp->link_config.active_speed = SPEED_INVALID;
3106 tp->link_config.active_duplex = DUPLEX_INVALID;
3107 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3108 LED_CTRL_LNKLED_OVERRIDE |
3109 LED_CTRL_TRAFFIC_OVERRIDE));
3110 }
3111
3112 if (current_link_up != netif_carrier_ok(tp->dev)) {
3113 if (current_link_up)
3114 netif_carrier_on(tp->dev);
3115 else
3116 netif_carrier_off(tp->dev);
3117 tg3_link_report(tp);
3118 } else {
Matt Carlson8d018622007-12-20 20:05:44 -08003119 u32 now_pause_cfg = tp->link_config.active_flowctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003120 if (orig_pause_cfg != now_pause_cfg ||
3121 orig_active_speed != tp->link_config.active_speed ||
3122 orig_active_duplex != tp->link_config.active_duplex)
3123 tg3_link_report(tp);
3124 }
3125
3126 return 0;
3127}
3128
Michael Chan747e8f82005-07-25 12:33:22 -07003129static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3130{
3131 int current_link_up, err = 0;
3132 u32 bmsr, bmcr;
3133 u16 current_speed;
3134 u8 current_duplex;
Matt Carlsonef167e22007-12-20 20:10:01 -08003135 u32 local_adv, remote_adv;
Michael Chan747e8f82005-07-25 12:33:22 -07003136
3137 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3138 tw32_f(MAC_MODE, tp->mac_mode);
3139 udelay(40);
3140
3141 tw32(MAC_EVENT, 0);
3142
3143 tw32_f(MAC_STATUS,
3144 (MAC_STATUS_SYNC_CHANGED |
3145 MAC_STATUS_CFG_CHANGED |
3146 MAC_STATUS_MI_COMPLETION |
3147 MAC_STATUS_LNKSTATE_CHANGED));
3148 udelay(40);
3149
3150 if (force_reset)
3151 tg3_phy_reset(tp);
3152
3153 current_link_up = 0;
3154 current_speed = SPEED_INVALID;
3155 current_duplex = DUPLEX_INVALID;
3156
3157 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3158 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08003159 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3160 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3161 bmsr |= BMSR_LSTATUS;
3162 else
3163 bmsr &= ~BMSR_LSTATUS;
3164 }
Michael Chan747e8f82005-07-25 12:33:22 -07003165
3166 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3167
3168 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
Matt Carlsonef167e22007-12-20 20:10:01 -08003169 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3170 tp->link_config.flowctrl == tp->link_config.active_flowctrl) {
Michael Chan747e8f82005-07-25 12:33:22 -07003171 /* do nothing, just check for link up at the end */
3172 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3173 u32 adv, new_adv;
3174
3175 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3176 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3177 ADVERTISE_1000XPAUSE |
3178 ADVERTISE_1000XPSE_ASYM |
3179 ADVERTISE_SLCT);
3180
Matt Carlsonba4d07a2007-12-20 20:08:00 -08003181 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
Michael Chan747e8f82005-07-25 12:33:22 -07003182
3183 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3184 new_adv |= ADVERTISE_1000XHALF;
3185 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3186 new_adv |= ADVERTISE_1000XFULL;
3187
3188 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3189 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3190 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3191 tg3_writephy(tp, MII_BMCR, bmcr);
3192
3193 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
Michael Chan3d3ebe72006-09-27 15:59:15 -07003194 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
Michael Chan747e8f82005-07-25 12:33:22 -07003195 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3196
3197 return err;
3198 }
3199 } else {
3200 u32 new_bmcr;
3201
3202 bmcr &= ~BMCR_SPEED1000;
3203 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3204
3205 if (tp->link_config.duplex == DUPLEX_FULL)
3206 new_bmcr |= BMCR_FULLDPLX;
3207
3208 if (new_bmcr != bmcr) {
3209 /* BMCR_SPEED1000 is a reserved bit that needs
3210 * to be set on write.
3211 */
3212 new_bmcr |= BMCR_SPEED1000;
3213
3214 /* Force a linkdown */
3215 if (netif_carrier_ok(tp->dev)) {
3216 u32 adv;
3217
3218 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3219 adv &= ~(ADVERTISE_1000XFULL |
3220 ADVERTISE_1000XHALF |
3221 ADVERTISE_SLCT);
3222 tg3_writephy(tp, MII_ADVERTISE, adv);
3223 tg3_writephy(tp, MII_BMCR, bmcr |
3224 BMCR_ANRESTART |
3225 BMCR_ANENABLE);
3226 udelay(10);
3227 netif_carrier_off(tp->dev);
3228 }
3229 tg3_writephy(tp, MII_BMCR, new_bmcr);
3230 bmcr = new_bmcr;
3231 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3232 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
Michael Chand4d2c552006-03-20 17:47:20 -08003233 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3234 ASIC_REV_5714) {
3235 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3236 bmsr |= BMSR_LSTATUS;
3237 else
3238 bmsr &= ~BMSR_LSTATUS;
3239 }
Michael Chan747e8f82005-07-25 12:33:22 -07003240 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3241 }
3242 }
3243
3244 if (bmsr & BMSR_LSTATUS) {
3245 current_speed = SPEED_1000;
3246 current_link_up = 1;
3247 if (bmcr & BMCR_FULLDPLX)
3248 current_duplex = DUPLEX_FULL;
3249 else
3250 current_duplex = DUPLEX_HALF;
3251
Matt Carlsonef167e22007-12-20 20:10:01 -08003252 local_adv = 0;
3253 remote_adv = 0;
3254
Michael Chan747e8f82005-07-25 12:33:22 -07003255 if (bmcr & BMCR_ANENABLE) {
Matt Carlsonef167e22007-12-20 20:10:01 -08003256 u32 common;
Michael Chan747e8f82005-07-25 12:33:22 -07003257
3258 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3259 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3260 common = local_adv & remote_adv;
3261 if (common & (ADVERTISE_1000XHALF |
3262 ADVERTISE_1000XFULL)) {
3263 if (common & ADVERTISE_1000XFULL)
3264 current_duplex = DUPLEX_FULL;
3265 else
3266 current_duplex = DUPLEX_HALF;
Michael Chan747e8f82005-07-25 12:33:22 -07003267 }
3268 else
3269 current_link_up = 0;
3270 }
3271 }
3272
Matt Carlsonef167e22007-12-20 20:10:01 -08003273 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3274 tg3_setup_flow_control(tp, local_adv, remote_adv);
3275
Michael Chan747e8f82005-07-25 12:33:22 -07003276 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3277 if (tp->link_config.active_duplex == DUPLEX_HALF)
3278 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3279
3280 tw32_f(MAC_MODE, tp->mac_mode);
3281 udelay(40);
3282
3283 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3284
3285 tp->link_config.active_speed = current_speed;
3286 tp->link_config.active_duplex = current_duplex;
3287
3288 if (current_link_up != netif_carrier_ok(tp->dev)) {
3289 if (current_link_up)
3290 netif_carrier_on(tp->dev);
3291 else {
3292 netif_carrier_off(tp->dev);
3293 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3294 }
3295 tg3_link_report(tp);
3296 }
3297 return err;
3298}
3299
3300static void tg3_serdes_parallel_detect(struct tg3 *tp)
3301{
Michael Chan3d3ebe72006-09-27 15:59:15 -07003302 if (tp->serdes_counter) {
Michael Chan747e8f82005-07-25 12:33:22 -07003303 /* Give autoneg time to complete. */
Michael Chan3d3ebe72006-09-27 15:59:15 -07003304 tp->serdes_counter--;
Michael Chan747e8f82005-07-25 12:33:22 -07003305 return;
3306 }
3307 if (!netif_carrier_ok(tp->dev) &&
3308 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3309 u32 bmcr;
3310
3311 tg3_readphy(tp, MII_BMCR, &bmcr);
3312 if (bmcr & BMCR_ANENABLE) {
3313 u32 phy1, phy2;
3314
3315 /* Select shadow register 0x1f */
3316 tg3_writephy(tp, 0x1c, 0x7c00);
3317 tg3_readphy(tp, 0x1c, &phy1);
3318
3319 /* Select expansion interrupt status register */
3320 tg3_writephy(tp, 0x17, 0x0f01);
3321 tg3_readphy(tp, 0x15, &phy2);
3322 tg3_readphy(tp, 0x15, &phy2);
3323
3324 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3325 /* We have signal detect and not receiving
3326 * config code words, link is up by parallel
3327 * detection.
3328 */
3329
3330 bmcr &= ~BMCR_ANENABLE;
3331 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3332 tg3_writephy(tp, MII_BMCR, bmcr);
3333 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3334 }
3335 }
3336 }
3337 else if (netif_carrier_ok(tp->dev) &&
3338 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3339 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3340 u32 phy2;
3341
3342 /* Select expansion interrupt status register */
3343 tg3_writephy(tp, 0x17, 0x0f01);
3344 tg3_readphy(tp, 0x15, &phy2);
3345 if (phy2 & 0x20) {
3346 u32 bmcr;
3347
3348 /* Config code words received, turn on autoneg. */
3349 tg3_readphy(tp, MII_BMCR, &bmcr);
3350 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3351
3352 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3353
3354 }
3355 }
3356}
3357
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3359{
3360 int err;
3361
3362 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3363 err = tg3_setup_fiber_phy(tp, force_reset);
Michael Chan747e8f82005-07-25 12:33:22 -07003364 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3365 err = tg3_setup_fiber_mii_phy(tp, force_reset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366 } else {
3367 err = tg3_setup_copper_phy(tp, force_reset);
3368 }
3369
Matt Carlsonb5af7122007-11-12 21:22:02 -08003370 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3371 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
Matt Carlsonaa6c91f2007-11-12 21:18:04 -08003372 u32 val, scale;
3373
3374 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3375 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3376 scale = 65;
3377 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3378 scale = 6;
3379 else
3380 scale = 12;
3381
3382 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3383 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3384 tw32(GRC_MISC_CFG, val);
3385 }
3386
Linus Torvalds1da177e2005-04-16 15:20:36 -07003387 if (tp->link_config.active_speed == SPEED_1000 &&
3388 tp->link_config.active_duplex == DUPLEX_HALF)
3389 tw32(MAC_TX_LENGTHS,
3390 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3391 (6 << TX_LENGTHS_IPG_SHIFT) |
3392 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3393 else
3394 tw32(MAC_TX_LENGTHS,
3395 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3396 (6 << TX_LENGTHS_IPG_SHIFT) |
3397 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3398
3399 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3400 if (netif_carrier_ok(tp->dev)) {
3401 tw32(HOSTCC_STAT_COAL_TICKS,
David S. Miller15f98502005-05-18 22:49:26 -07003402 tp->coal.stats_block_coalesce_usecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403 } else {
3404 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3405 }
3406 }
3407
Matt Carlson8ed5d972007-05-07 00:25:49 -07003408 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3409 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3410 if (!netif_carrier_ok(tp->dev))
3411 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3412 tp->pwrmgmt_thresh;
3413 else
3414 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3415 tw32(PCIE_PWR_MGMT_THRESH, val);
3416 }
3417
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418 return err;
3419}
3420
Michael Chandf3e6542006-05-26 17:48:07 -07003421/* This is called whenever we suspect that the system chipset is re-
3422 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3423 * is bogus tx completions. We try to recover by setting the
3424 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3425 * in the workqueue.
3426 */
3427static void tg3_tx_recover(struct tg3 *tp)
3428{
3429 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3430 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3431
3432 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3433 "mapped I/O cycles to the network device, attempting to "
3434 "recover. Please report the problem to the driver maintainer "
3435 "and include system chipset information.\n", tp->dev->name);
3436
3437 spin_lock(&tp->lock);
Michael Chandf3e6542006-05-26 17:48:07 -07003438 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
Michael Chandf3e6542006-05-26 17:48:07 -07003439 spin_unlock(&tp->lock);
3440}
3441
Michael Chan1b2a7202006-08-07 21:46:02 -07003442static inline u32 tg3_tx_avail(struct tg3 *tp)
3443{
3444 smp_mb();
3445 return (tp->tx_pending -
3446 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3447}
3448
Linus Torvalds1da177e2005-04-16 15:20:36 -07003449/* Tigon3 never reports partial packet sends. So we do not
3450 * need special logic to handle SKBs that have not had all
3451 * of their frags sent yet, like SunGEM does.
3452 */
3453static void tg3_tx(struct tg3 *tp)
3454{
3455 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3456 u32 sw_idx = tp->tx_cons;
3457
3458 while (sw_idx != hw_idx) {
3459 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3460 struct sk_buff *skb = ri->skb;
Michael Chandf3e6542006-05-26 17:48:07 -07003461 int i, tx_bug = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462
Michael Chandf3e6542006-05-26 17:48:07 -07003463 if (unlikely(skb == NULL)) {
3464 tg3_tx_recover(tp);
3465 return;
3466 }
3467
Linus Torvalds1da177e2005-04-16 15:20:36 -07003468 pci_unmap_single(tp->pdev,
3469 pci_unmap_addr(ri, mapping),
3470 skb_headlen(skb),
3471 PCI_DMA_TODEVICE);
3472
3473 ri->skb = NULL;
3474
3475 sw_idx = NEXT_TX(sw_idx);
3476
3477 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478 ri = &tp->tx_buffers[sw_idx];
Michael Chandf3e6542006-05-26 17:48:07 -07003479 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3480 tx_bug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003481
3482 pci_unmap_page(tp->pdev,
3483 pci_unmap_addr(ri, mapping),
3484 skb_shinfo(skb)->frags[i].size,
3485 PCI_DMA_TODEVICE);
3486
3487 sw_idx = NEXT_TX(sw_idx);
3488 }
3489
David S. Millerf47c11e2005-06-24 20:18:35 -07003490 dev_kfree_skb(skb);
Michael Chandf3e6542006-05-26 17:48:07 -07003491
3492 if (unlikely(tx_bug)) {
3493 tg3_tx_recover(tp);
3494 return;
3495 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496 }
3497
3498 tp->tx_cons = sw_idx;
3499
Michael Chan1b2a7202006-08-07 21:46:02 -07003500 /* Need to make the tx_cons update visible to tg3_start_xmit()
3501 * before checking for netif_queue_stopped(). Without the
3502 * memory barrier, there is a small possibility that tg3_start_xmit()
3503 * will miss it and cause the queue to be stopped forever.
3504 */
3505 smp_mb();
3506
3507 if (unlikely(netif_queue_stopped(tp->dev) &&
Ranjit Manomohan42952232006-10-18 20:54:26 -07003508 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
Michael Chan1b2a7202006-08-07 21:46:02 -07003509 netif_tx_lock(tp->dev);
Michael Chan51b91462005-09-01 17:41:28 -07003510 if (netif_queue_stopped(tp->dev) &&
Ranjit Manomohan42952232006-10-18 20:54:26 -07003511 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
Michael Chan51b91462005-09-01 17:41:28 -07003512 netif_wake_queue(tp->dev);
Michael Chan1b2a7202006-08-07 21:46:02 -07003513 netif_tx_unlock(tp->dev);
Michael Chan51b91462005-09-01 17:41:28 -07003514 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003515}
3516
3517/* Returns size of skb allocated or < 0 on error.
3518 *
3519 * We only need to fill in the address because the other members
3520 * of the RX descriptor are invariant, see tg3_init_rings.
3521 *
3522 * Note the purposeful assymetry of cpu vs. chip accesses. For
3523 * posting buffers we only dirty the first cache line of the RX
3524 * descriptor (containing the address). Whereas for the RX status
3525 * buffers the cpu only reads the last cacheline of the RX descriptor
3526 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3527 */
3528static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3529 int src_idx, u32 dest_idx_unmasked)
3530{
3531 struct tg3_rx_buffer_desc *desc;
3532 struct ring_info *map, *src_map;
3533 struct sk_buff *skb;
3534 dma_addr_t mapping;
3535 int skb_size, dest_idx;
3536
3537 src_map = NULL;
3538 switch (opaque_key) {
3539 case RXD_OPAQUE_RING_STD:
3540 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3541 desc = &tp->rx_std[dest_idx];
3542 map = &tp->rx_std_buffers[dest_idx];
3543 if (src_idx >= 0)
3544 src_map = &tp->rx_std_buffers[src_idx];
Michael Chan7e72aad2005-07-25 12:31:17 -07003545 skb_size = tp->rx_pkt_buf_sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546 break;
3547
3548 case RXD_OPAQUE_RING_JUMBO:
3549 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3550 desc = &tp->rx_jumbo[dest_idx];
3551 map = &tp->rx_jumbo_buffers[dest_idx];
3552 if (src_idx >= 0)
3553 src_map = &tp->rx_jumbo_buffers[src_idx];
3554 skb_size = RX_JUMBO_PKT_BUF_SZ;
3555 break;
3556
3557 default:
3558 return -EINVAL;
3559 };
3560
3561 /* Do not overwrite any of the map or rp information
3562 * until we are sure we can commit to a new buffer.
3563 *
3564 * Callers depend upon this behavior and assume that
3565 * we leave everything unchanged if we fail.
3566 */
David S. Millera20e9c62006-07-31 22:38:16 -07003567 skb = netdev_alloc_skb(tp->dev, skb_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003568 if (skb == NULL)
3569 return -ENOMEM;
3570
Linus Torvalds1da177e2005-04-16 15:20:36 -07003571 skb_reserve(skb, tp->rx_offset);
3572
3573 mapping = pci_map_single(tp->pdev, skb->data,
3574 skb_size - tp->rx_offset,
3575 PCI_DMA_FROMDEVICE);
3576
3577 map->skb = skb;
3578 pci_unmap_addr_set(map, mapping, mapping);
3579
3580 if (src_map != NULL)
3581 src_map->skb = NULL;
3582
3583 desc->addr_hi = ((u64)mapping >> 32);
3584 desc->addr_lo = ((u64)mapping & 0xffffffff);
3585
3586 return skb_size;
3587}
3588
3589/* We only need to move over in the address because the other
3590 * members of the RX descriptor are invariant. See notes above
3591 * tg3_alloc_rx_skb for full details.
3592 */
3593static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3594 int src_idx, u32 dest_idx_unmasked)
3595{
3596 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3597 struct ring_info *src_map, *dest_map;
3598 int dest_idx;
3599
3600 switch (opaque_key) {
3601 case RXD_OPAQUE_RING_STD:
3602 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3603 dest_desc = &tp->rx_std[dest_idx];
3604 dest_map = &tp->rx_std_buffers[dest_idx];
3605 src_desc = &tp->rx_std[src_idx];
3606 src_map = &tp->rx_std_buffers[src_idx];
3607 break;
3608
3609 case RXD_OPAQUE_RING_JUMBO:
3610 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3611 dest_desc = &tp->rx_jumbo[dest_idx];
3612 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3613 src_desc = &tp->rx_jumbo[src_idx];
3614 src_map = &tp->rx_jumbo_buffers[src_idx];
3615 break;
3616
3617 default:
3618 return;
3619 };
3620
3621 dest_map->skb = src_map->skb;
3622 pci_unmap_addr_set(dest_map, mapping,
3623 pci_unmap_addr(src_map, mapping));
3624 dest_desc->addr_hi = src_desc->addr_hi;
3625 dest_desc->addr_lo = src_desc->addr_lo;
3626
3627 src_map->skb = NULL;
3628}
3629
3630#if TG3_VLAN_TAG_USED
3631static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3632{
3633 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3634}
3635#endif
3636
3637/* The RX ring scheme is composed of multiple rings which post fresh
3638 * buffers to the chip, and one special ring the chip uses to report
3639 * status back to the host.
3640 *
3641 * The special ring reports the status of received packets to the
3642 * host. The chip does not write into the original descriptor the
3643 * RX buffer was obtained from. The chip simply takes the original
3644 * descriptor as provided by the host, updates the status and length
3645 * field, then writes this into the next status ring entry.
3646 *
3647 * Each ring the host uses to post buffers to the chip is described
3648 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3649 * it is first placed into the on-chip ram. When the packet's length
3650 * is known, it walks down the TG3_BDINFO entries to select the ring.
3651 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3652 * which is within the range of the new packet's length is chosen.
3653 *
3654 * The "separate ring for rx status" scheme may sound queer, but it makes
3655 * sense from a cache coherency perspective. If only the host writes
3656 * to the buffer post rings, and only the chip writes to the rx status
3657 * rings, then cache lines never move beyond shared-modified state.
3658 * If both the host and chip were to write into the same ring, cache line
3659 * eviction could occur since both entities want it in an exclusive state.
3660 */
3661static int tg3_rx(struct tg3 *tp, int budget)
3662{
Michael Chanf92905d2006-06-29 20:14:29 -07003663 u32 work_mask, rx_std_posted = 0;
Michael Chan483ba502005-04-25 15:14:03 -07003664 u32 sw_idx = tp->rx_rcb_ptr;
3665 u16 hw_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003666 int received;
3667
3668 hw_idx = tp->hw_status->idx[0].rx_producer;
3669 /*
3670 * We need to order the read of hw_idx and the read of
3671 * the opaque cookie.
3672 */
3673 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003674 work_mask = 0;
3675 received = 0;
3676 while (sw_idx != hw_idx && budget > 0) {
3677 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3678 unsigned int len;
3679 struct sk_buff *skb;
3680 dma_addr_t dma_addr;
3681 u32 opaque_key, desc_idx, *post_ptr;
3682
3683 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3684 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3685 if (opaque_key == RXD_OPAQUE_RING_STD) {
3686 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3687 mapping);
3688 skb = tp->rx_std_buffers[desc_idx].skb;
3689 post_ptr = &tp->rx_std_ptr;
Michael Chanf92905d2006-06-29 20:14:29 -07003690 rx_std_posted++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003691 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3692 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3693 mapping);
3694 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3695 post_ptr = &tp->rx_jumbo_ptr;
3696 }
3697 else {
3698 goto next_pkt_nopost;
3699 }
3700
3701 work_mask |= opaque_key;
3702
3703 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3704 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3705 drop_it:
3706 tg3_recycle_rx(tp, opaque_key,
3707 desc_idx, *post_ptr);
3708 drop_it_no_recycle:
3709 /* Other statistics kept track of by card. */
3710 tp->net_stats.rx_dropped++;
3711 goto next_pkt;
3712 }
3713
3714 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3715
Jeff Garzik6aa20a22006-09-13 13:24:59 -04003716 if (len > RX_COPY_THRESHOLD
Linus Torvalds1da177e2005-04-16 15:20:36 -07003717 && tp->rx_offset == 2
3718 /* rx_offset != 2 iff this is a 5701 card running
3719 * in PCI-X mode [see tg3_get_invariants()] */
3720 ) {
3721 int skb_size;
3722
3723 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3724 desc_idx, *post_ptr);
3725 if (skb_size < 0)
3726 goto drop_it;
3727
3728 pci_unmap_single(tp->pdev, dma_addr,
3729 skb_size - tp->rx_offset,
3730 PCI_DMA_FROMDEVICE);
3731
3732 skb_put(skb, len);
3733 } else {
3734 struct sk_buff *copy_skb;
3735
3736 tg3_recycle_rx(tp, opaque_key,
3737 desc_idx, *post_ptr);
3738
David S. Millera20e9c62006-07-31 22:38:16 -07003739 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003740 if (copy_skb == NULL)
3741 goto drop_it_no_recycle;
3742
Linus Torvalds1da177e2005-04-16 15:20:36 -07003743 skb_reserve(copy_skb, 2);
3744 skb_put(copy_skb, len);
3745 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03003746 skb_copy_from_linear_data(skb, copy_skb->data, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003747 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3748
3749 /* We'll reuse the original ring buffer. */
3750 skb = copy_skb;
3751 }
3752
3753 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3754 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3755 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3756 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3757 skb->ip_summed = CHECKSUM_UNNECESSARY;
3758 else
3759 skb->ip_summed = CHECKSUM_NONE;
3760
3761 skb->protocol = eth_type_trans(skb, tp->dev);
3762#if TG3_VLAN_TAG_USED
3763 if (tp->vlgrp != NULL &&
3764 desc->type_flags & RXD_FLAG_VLAN) {
3765 tg3_vlan_rx(tp, skb,
3766 desc->err_vlan & RXD_VLAN_MASK);
3767 } else
3768#endif
3769 netif_receive_skb(skb);
3770
3771 tp->dev->last_rx = jiffies;
3772 received++;
3773 budget--;
3774
3775next_pkt:
3776 (*post_ptr)++;
Michael Chanf92905d2006-06-29 20:14:29 -07003777
3778 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3779 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3780
3781 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3782 TG3_64BIT_REG_LOW, idx);
3783 work_mask &= ~RXD_OPAQUE_RING_STD;
3784 rx_std_posted = 0;
3785 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003786next_pkt_nopost:
Michael Chan483ba502005-04-25 15:14:03 -07003787 sw_idx++;
Eric Dumazet6b31a512007-02-06 13:29:21 -08003788 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
Michael Chan52f6d692005-04-25 15:14:32 -07003789
3790 /* Refresh hw_idx to see if there is new work */
3791 if (sw_idx == hw_idx) {
3792 hw_idx = tp->hw_status->idx[0].rx_producer;
3793 rmb();
3794 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003795 }
3796
3797 /* ACK the status ring. */
Michael Chan483ba502005-04-25 15:14:03 -07003798 tp->rx_rcb_ptr = sw_idx;
3799 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003800
3801 /* Refill RX ring(s). */
3802 if (work_mask & RXD_OPAQUE_RING_STD) {
3803 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3804 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3805 sw_idx);
3806 }
3807 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3808 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3809 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3810 sw_idx);
3811 }
3812 mmiowb();
3813
3814 return received;
3815}
3816
David S. Miller6f535762007-10-11 18:08:29 -07003817static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003818{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003819 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820
Linus Torvalds1da177e2005-04-16 15:20:36 -07003821 /* handle link change and other phy events */
3822 if (!(tp->tg3_flags &
3823 (TG3_FLAG_USE_LINKCHG_REG |
3824 TG3_FLAG_POLL_SERDES))) {
3825 if (sblk->status & SD_STATUS_LINK_CHG) {
3826 sblk->status = SD_STATUS_UPDATED |
3827 (sblk->status & ~SD_STATUS_LINK_CHG);
David S. Millerf47c11e2005-06-24 20:18:35 -07003828 spin_lock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003829 tg3_setup_phy(tp, 0);
David S. Millerf47c11e2005-06-24 20:18:35 -07003830 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003831 }
3832 }
3833
3834 /* run TX completion thread */
3835 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003836 tg3_tx(tp);
David S. Miller6f535762007-10-11 18:08:29 -07003837 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
Michael Chan4fd7ab52007-10-12 01:39:50 -07003838 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003839 }
3840
Linus Torvalds1da177e2005-04-16 15:20:36 -07003841 /* run RX thread, within the bounds set by NAPI.
3842 * All RX "locking" is done by ensuring outside
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003843 * code synchronizes with tg3->napi.poll()
Linus Torvalds1da177e2005-04-16 15:20:36 -07003844 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003845 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
David S. Miller6f535762007-10-11 18:08:29 -07003846 work_done += tg3_rx(tp, budget - work_done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003847
David S. Miller6f535762007-10-11 18:08:29 -07003848 return work_done;
3849}
David S. Millerf7383c22005-05-18 22:50:53 -07003850
David S. Miller6f535762007-10-11 18:08:29 -07003851static int tg3_poll(struct napi_struct *napi, int budget)
3852{
3853 struct tg3 *tp = container_of(napi, struct tg3, napi);
3854 int work_done = 0;
Michael Chan4fd7ab52007-10-12 01:39:50 -07003855 struct tg3_hw_status *sblk = tp->hw_status;
David S. Miller6f535762007-10-11 18:08:29 -07003856
3857 while (1) {
3858 work_done = tg3_poll_work(tp, work_done, budget);
3859
3860 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3861 goto tx_recovery;
3862
3863 if (unlikely(work_done >= budget))
3864 break;
3865
Michael Chan4fd7ab52007-10-12 01:39:50 -07003866 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3867 /* tp->last_tag is used in tg3_restart_ints() below
3868 * to tell the hw how much work has been processed,
3869 * so we must read it before checking for more work.
3870 */
3871 tp->last_tag = sblk->status_tag;
3872 rmb();
3873 } else
3874 sblk->status &= ~SD_STATUS_UPDATED;
3875
David S. Miller6f535762007-10-11 18:08:29 -07003876 if (likely(!tg3_has_work(tp))) {
David S. Miller6f535762007-10-11 18:08:29 -07003877 netif_rx_complete(tp->dev, napi);
3878 tg3_restart_ints(tp);
3879 break;
3880 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003881 }
3882
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003883 return work_done;
David S. Miller6f535762007-10-11 18:08:29 -07003884
3885tx_recovery:
Michael Chan4fd7ab52007-10-12 01:39:50 -07003886 /* work_done is guaranteed to be less than budget. */
David S. Miller6f535762007-10-11 18:08:29 -07003887 netif_rx_complete(tp->dev, napi);
3888 schedule_work(&tp->reset_task);
Michael Chan4fd7ab52007-10-12 01:39:50 -07003889 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003890}
3891
David S. Millerf47c11e2005-06-24 20:18:35 -07003892static void tg3_irq_quiesce(struct tg3 *tp)
3893{
3894 BUG_ON(tp->irq_sync);
3895
3896 tp->irq_sync = 1;
3897 smp_mb();
3898
3899 synchronize_irq(tp->pdev->irq);
3900}
3901
3902static inline int tg3_irq_sync(struct tg3 *tp)
3903{
3904 return tp->irq_sync;
3905}
3906
3907/* Fully shutdown all tg3 driver activity elsewhere in the system.
3908 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3909 * with as well. Most of the time, this is not necessary except when
3910 * shutting down the device.
3911 */
3912static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3913{
Michael Chan46966542007-07-11 19:47:19 -07003914 spin_lock_bh(&tp->lock);
David S. Millerf47c11e2005-06-24 20:18:35 -07003915 if (irq_sync)
3916 tg3_irq_quiesce(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07003917}
3918
3919static inline void tg3_full_unlock(struct tg3 *tp)
3920{
David S. Millerf47c11e2005-06-24 20:18:35 -07003921 spin_unlock_bh(&tp->lock);
3922}
3923
Michael Chanfcfa0a32006-03-20 22:28:41 -08003924/* One-shot MSI handler - Chip automatically disables interrupt
3925 * after sending MSI so driver doesn't have to do it.
3926 */
David Howells7d12e782006-10-05 14:55:46 +01003927static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
Michael Chanfcfa0a32006-03-20 22:28:41 -08003928{
3929 struct net_device *dev = dev_id;
3930 struct tg3 *tp = netdev_priv(dev);
3931
3932 prefetch(tp->hw_status);
3933 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3934
3935 if (likely(!tg3_irq_sync(tp)))
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003936 netif_rx_schedule(dev, &tp->napi);
Michael Chanfcfa0a32006-03-20 22:28:41 -08003937
3938 return IRQ_HANDLED;
3939}
3940
Michael Chan88b06bc2005-04-21 17:13:25 -07003941/* MSI ISR - No need to check for interrupt sharing and no need to
3942 * flush status block and interrupt mailbox. PCI ordering rules
3943 * guarantee that MSI will arrive after the status block.
3944 */
David Howells7d12e782006-10-05 14:55:46 +01003945static irqreturn_t tg3_msi(int irq, void *dev_id)
Michael Chan88b06bc2005-04-21 17:13:25 -07003946{
3947 struct net_device *dev = dev_id;
3948 struct tg3 *tp = netdev_priv(dev);
Michael Chan88b06bc2005-04-21 17:13:25 -07003949
Michael Chan61487482005-09-05 17:53:19 -07003950 prefetch(tp->hw_status);
3951 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Michael Chan88b06bc2005-04-21 17:13:25 -07003952 /*
David S. Millerfac9b832005-05-18 22:46:34 -07003953 * Writing any value to intr-mbox-0 clears PCI INTA# and
Michael Chan88b06bc2005-04-21 17:13:25 -07003954 * chip-internal interrupt pending events.
David S. Millerfac9b832005-05-18 22:46:34 -07003955 * Writing non-zero to intr-mbox-0 additional tells the
Michael Chan88b06bc2005-04-21 17:13:25 -07003956 * NIC to stop sending us irqs, engaging "in-intr-handler"
3957 * event coalescing.
3958 */
3959 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chan61487482005-09-05 17:53:19 -07003960 if (likely(!tg3_irq_sync(tp)))
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003961 netif_rx_schedule(dev, &tp->napi);
Michael Chan61487482005-09-05 17:53:19 -07003962
Michael Chan88b06bc2005-04-21 17:13:25 -07003963 return IRQ_RETVAL(1);
3964}
3965
David Howells7d12e782006-10-05 14:55:46 +01003966static irqreturn_t tg3_interrupt(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003967{
3968 struct net_device *dev = dev_id;
3969 struct tg3 *tp = netdev_priv(dev);
3970 struct tg3_hw_status *sblk = tp->hw_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003971 unsigned int handled = 1;
3972
Linus Torvalds1da177e2005-04-16 15:20:36 -07003973 /* In INTx mode, it is possible for the interrupt to arrive at
3974 * the CPU before the status block posted prior to the interrupt.
3975 * Reading the PCI State register will confirm whether the
3976 * interrupt is ours and will flush the status block.
3977 */
Michael Chand18edcb2007-03-24 20:57:11 -07003978 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3979 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3980 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3981 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07003982 goto out;
David S. Millerfac9b832005-05-18 22:46:34 -07003983 }
Michael Chand18edcb2007-03-24 20:57:11 -07003984 }
3985
3986 /*
3987 * Writing any value to intr-mbox-0 clears PCI INTA# and
3988 * chip-internal interrupt pending events.
3989 * Writing non-zero to intr-mbox-0 additional tells the
3990 * NIC to stop sending us irqs, engaging "in-intr-handler"
3991 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07003992 *
3993 * Flush the mailbox to de-assert the IRQ immediately to prevent
3994 * spurious interrupts. The flush impacts performance but
3995 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07003996 */
Michael Chanc04cb342007-05-07 00:26:15 -07003997 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chand18edcb2007-03-24 20:57:11 -07003998 if (tg3_irq_sync(tp))
3999 goto out;
4000 sblk->status &= ~SD_STATUS_UPDATED;
4001 if (likely(tg3_has_work(tp))) {
4002 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004003 netif_rx_schedule(dev, &tp->napi);
Michael Chand18edcb2007-03-24 20:57:11 -07004004 } else {
4005 /* No work, shared interrupt perhaps? re-enable
4006 * interrupts, and flush that PCI write
4007 */
4008 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4009 0x00000000);
David S. Millerfac9b832005-05-18 22:46:34 -07004010 }
David S. Millerf47c11e2005-06-24 20:18:35 -07004011out:
David S. Millerfac9b832005-05-18 22:46:34 -07004012 return IRQ_RETVAL(handled);
4013}
4014
David Howells7d12e782006-10-05 14:55:46 +01004015static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
David S. Millerfac9b832005-05-18 22:46:34 -07004016{
4017 struct net_device *dev = dev_id;
4018 struct tg3 *tp = netdev_priv(dev);
4019 struct tg3_hw_status *sblk = tp->hw_status;
David S. Millerfac9b832005-05-18 22:46:34 -07004020 unsigned int handled = 1;
4021
David S. Millerfac9b832005-05-18 22:46:34 -07004022 /* In INTx mode, it is possible for the interrupt to arrive at
4023 * the CPU before the status block posted prior to the interrupt.
4024 * Reading the PCI State register will confirm whether the
4025 * interrupt is ours and will flush the status block.
4026 */
Michael Chand18edcb2007-03-24 20:57:11 -07004027 if (unlikely(sblk->status_tag == tp->last_tag)) {
4028 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4029 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4030 handled = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07004031 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004032 }
Michael Chand18edcb2007-03-24 20:57:11 -07004033 }
4034
4035 /*
4036 * writing any value to intr-mbox-0 clears PCI INTA# and
4037 * chip-internal interrupt pending events.
4038 * writing non-zero to intr-mbox-0 additional tells the
4039 * NIC to stop sending us irqs, engaging "in-intr-handler"
4040 * event coalescing.
Michael Chanc04cb342007-05-07 00:26:15 -07004041 *
4042 * Flush the mailbox to de-assert the IRQ immediately to prevent
4043 * spurious interrupts. The flush impacts performance but
4044 * excessive spurious interrupts can be worse in some cases.
Michael Chand18edcb2007-03-24 20:57:11 -07004045 */
Michael Chanc04cb342007-05-07 00:26:15 -07004046 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
Michael Chand18edcb2007-03-24 20:57:11 -07004047 if (tg3_irq_sync(tp))
4048 goto out;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004049 if (netif_rx_schedule_prep(dev, &tp->napi)) {
Michael Chand18edcb2007-03-24 20:57:11 -07004050 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4051 /* Update last_tag to mark that this status has been
4052 * seen. Because interrupt may be shared, we may be
4053 * racing with tg3_poll(), so only update last_tag
4054 * if tg3_poll() is not scheduled.
4055 */
4056 tp->last_tag = sblk->status_tag;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004057 __netif_rx_schedule(dev, &tp->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004058 }
David S. Millerf47c11e2005-06-24 20:18:35 -07004059out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004060 return IRQ_RETVAL(handled);
4061}
4062
Michael Chan79381092005-04-21 17:13:59 -07004063/* ISR for interrupt test */
David Howells7d12e782006-10-05 14:55:46 +01004064static irqreturn_t tg3_test_isr(int irq, void *dev_id)
Michael Chan79381092005-04-21 17:13:59 -07004065{
4066 struct net_device *dev = dev_id;
4067 struct tg3 *tp = netdev_priv(dev);
4068 struct tg3_hw_status *sblk = tp->hw_status;
4069
Michael Chanf9804dd2005-09-27 12:13:10 -07004070 if ((sblk->status & SD_STATUS_UPDATED) ||
4071 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
Michael Chanb16250e2006-09-27 16:10:14 -07004072 tg3_disable_ints(tp);
Michael Chan79381092005-04-21 17:13:59 -07004073 return IRQ_RETVAL(1);
4074 }
4075 return IRQ_RETVAL(0);
4076}
4077
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07004078static int tg3_init_hw(struct tg3 *, int);
Michael Chan944d9802005-05-29 14:57:48 -07004079static int tg3_halt(struct tg3 *, int, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004080
Michael Chanb9ec6c12006-07-25 16:37:27 -07004081/* Restart hardware after configuration changes, self-test, etc.
4082 * Invoked with tp->lock held.
4083 */
4084static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
Eric Dumazet78c61462008-04-24 23:33:06 -07004085 __releases(tp->lock)
4086 __acquires(tp->lock)
Michael Chanb9ec6c12006-07-25 16:37:27 -07004087{
4088 int err;
4089
4090 err = tg3_init_hw(tp, reset_phy);
4091 if (err) {
4092 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4093 "aborting.\n", tp->dev->name);
4094 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4095 tg3_full_unlock(tp);
4096 del_timer_sync(&tp->timer);
4097 tp->irq_sync = 0;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004098 napi_enable(&tp->napi);
Michael Chanb9ec6c12006-07-25 16:37:27 -07004099 dev_close(tp->dev);
4100 tg3_full_lock(tp, 0);
4101 }
4102 return err;
4103}
4104
Linus Torvalds1da177e2005-04-16 15:20:36 -07004105#ifdef CONFIG_NET_POLL_CONTROLLER
4106static void tg3_poll_controller(struct net_device *dev)
4107{
Michael Chan88b06bc2005-04-21 17:13:25 -07004108 struct tg3 *tp = netdev_priv(dev);
4109
David Howells7d12e782006-10-05 14:55:46 +01004110 tg3_interrupt(tp->pdev->irq, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004111}
4112#endif
4113
David Howellsc4028952006-11-22 14:57:56 +00004114static void tg3_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004115{
David Howellsc4028952006-11-22 14:57:56 +00004116 struct tg3 *tp = container_of(work, struct tg3, reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004117 unsigned int restart_timer;
4118
Michael Chan7faa0062006-02-02 17:29:28 -08004119 tg3_full_lock(tp, 0);
Michael Chan7faa0062006-02-02 17:29:28 -08004120
4121 if (!netif_running(tp->dev)) {
Michael Chan7faa0062006-02-02 17:29:28 -08004122 tg3_full_unlock(tp);
4123 return;
4124 }
4125
4126 tg3_full_unlock(tp);
4127
Linus Torvalds1da177e2005-04-16 15:20:36 -07004128 tg3_netif_stop(tp);
4129
David S. Millerf47c11e2005-06-24 20:18:35 -07004130 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004131
4132 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4133 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4134
Michael Chandf3e6542006-05-26 17:48:07 -07004135 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4136 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4137 tp->write32_rx_mbox = tg3_write_flush_reg32;
4138 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4139 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4140 }
4141
Michael Chan944d9802005-05-29 14:57:48 -07004142 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
Michael Chanb9ec6c12006-07-25 16:37:27 -07004143 if (tg3_init_hw(tp, 1))
4144 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004145
4146 tg3_netif_start(tp);
4147
Linus Torvalds1da177e2005-04-16 15:20:36 -07004148 if (restart_timer)
4149 mod_timer(&tp->timer, jiffies + 1);
Michael Chan7faa0062006-02-02 17:29:28 -08004150
Michael Chanb9ec6c12006-07-25 16:37:27 -07004151out:
Michael Chan7faa0062006-02-02 17:29:28 -08004152 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004153}
4154
Michael Chanb0408752007-02-13 12:18:30 -08004155static void tg3_dump_short_state(struct tg3 *tp)
4156{
4157 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4158 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4159 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4160 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4161}
4162
Linus Torvalds1da177e2005-04-16 15:20:36 -07004163static void tg3_tx_timeout(struct net_device *dev)
4164{
4165 struct tg3 *tp = netdev_priv(dev);
4166
Michael Chanb0408752007-02-13 12:18:30 -08004167 if (netif_msg_tx_err(tp)) {
Michael Chan9f88f292006-12-07 00:22:54 -08004168 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4169 dev->name);
Michael Chanb0408752007-02-13 12:18:30 -08004170 tg3_dump_short_state(tp);
4171 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004172
4173 schedule_work(&tp->reset_task);
4174}
4175
Michael Chanc58ec932005-09-17 00:46:27 -07004176/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4177static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4178{
4179 u32 base = (u32) mapping & 0xffffffff;
4180
4181 return ((base > 0xffffdcc0) &&
4182 (base + len + 8 < base));
4183}
4184
Michael Chan72f2afb2006-03-06 19:28:35 -08004185/* Test for DMA addresses > 40-bit */
4186static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4187 int len)
4188{
4189#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
Michael Chan6728a8e2006-03-27 23:16:49 -08004190 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
Michael Chan72f2afb2006-03-06 19:28:35 -08004191 return (((u64) mapping + len) > DMA_40BIT_MASK);
4192 return 0;
4193#else
4194 return 0;
4195#endif
4196}
4197
Linus Torvalds1da177e2005-04-16 15:20:36 -07004198static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4199
Michael Chan72f2afb2006-03-06 19:28:35 -08004200/* Workaround 4GB and 40-bit hardware DMA bugs. */
4201static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
Michael Chanc58ec932005-09-17 00:46:27 -07004202 u32 last_plus_one, u32 *start,
4203 u32 base_flags, u32 mss)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004204{
Matt Carlson41588ba2008-04-19 18:12:33 -07004205 struct sk_buff *new_skb;
Michael Chanc58ec932005-09-17 00:46:27 -07004206 dma_addr_t new_addr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004207 u32 entry = *start;
Michael Chanc58ec932005-09-17 00:46:27 -07004208 int i, ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004209
Matt Carlson41588ba2008-04-19 18:12:33 -07004210 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4211 new_skb = skb_copy(skb, GFP_ATOMIC);
4212 else {
4213 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4214
4215 new_skb = skb_copy_expand(skb,
4216 skb_headroom(skb) + more_headroom,
4217 skb_tailroom(skb), GFP_ATOMIC);
4218 }
4219
Linus Torvalds1da177e2005-04-16 15:20:36 -07004220 if (!new_skb) {
Michael Chanc58ec932005-09-17 00:46:27 -07004221 ret = -1;
4222 } else {
4223 /* New SKB is guaranteed to be linear. */
4224 entry = *start;
4225 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
4226 PCI_DMA_TODEVICE);
4227 /* Make sure new skb does not cross any 4G boundaries.
4228 * Drop the packet if it does.
4229 */
4230 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
4231 ret = -1;
4232 dev_kfree_skb(new_skb);
4233 new_skb = NULL;
4234 } else {
4235 tg3_set_txd(tp, entry, new_addr, new_skb->len,
4236 base_flags, 1 | (mss << 1));
4237 *start = NEXT_TX(entry);
4238 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004239 }
4240
Linus Torvalds1da177e2005-04-16 15:20:36 -07004241 /* Now clean up the sw ring entries. */
4242 i = 0;
4243 while (entry != last_plus_one) {
4244 int len;
4245
4246 if (i == 0)
4247 len = skb_headlen(skb);
4248 else
4249 len = skb_shinfo(skb)->frags[i-1].size;
4250 pci_unmap_single(tp->pdev,
4251 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4252 len, PCI_DMA_TODEVICE);
4253 if (i == 0) {
4254 tp->tx_buffers[entry].skb = new_skb;
4255 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4256 } else {
4257 tp->tx_buffers[entry].skb = NULL;
4258 }
4259 entry = NEXT_TX(entry);
4260 i++;
4261 }
4262
4263 dev_kfree_skb(skb);
4264
Michael Chanc58ec932005-09-17 00:46:27 -07004265 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004266}
4267
4268static void tg3_set_txd(struct tg3 *tp, int entry,
4269 dma_addr_t mapping, int len, u32 flags,
4270 u32 mss_and_is_end)
4271{
4272 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4273 int is_end = (mss_and_is_end & 0x1);
4274 u32 mss = (mss_and_is_end >> 1);
4275 u32 vlan_tag = 0;
4276
4277 if (is_end)
4278 flags |= TXD_FLAG_END;
4279 if (flags & TXD_FLAG_VLAN) {
4280 vlan_tag = flags >> 16;
4281 flags &= 0xffff;
4282 }
4283 vlan_tag |= (mss << TXD_MSS_SHIFT);
4284
4285 txd->addr_hi = ((u64) mapping >> 32);
4286 txd->addr_lo = ((u64) mapping & 0xffffffff);
4287 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4288 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4289}
4290
Michael Chan5a6f3072006-03-20 22:28:05 -08004291/* hard_start_xmit for devices that don't have any bugs and
4292 * support TG3_FLG2_HW_TSO_2 only.
4293 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004294static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4295{
4296 struct tg3 *tp = netdev_priv(dev);
4297 dma_addr_t mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004298 u32 len, entry, base_flags, mss;
Michael Chan5a6f3072006-03-20 22:28:05 -08004299
4300 len = skb_headlen(skb);
4301
Michael Chan00b70502006-06-17 21:58:45 -07004302 /* We are running in BH disabled context with netif_tx_lock
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004303 * and TX reclaim runs via tp->napi.poll inside of a software
Michael Chan5a6f3072006-03-20 22:28:05 -08004304 * interrupt. Furthermore, IRQ processing runs lockless so we have
4305 * no IRQ context deadlocks to worry about either. Rejoice!
4306 */
Michael Chan1b2a7202006-08-07 21:46:02 -07004307 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004308 if (!netif_queue_stopped(dev)) {
4309 netif_stop_queue(dev);
4310
4311 /* This is a hard error, log it. */
4312 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4313 "queue awake!\n", dev->name);
4314 }
Michael Chan5a6f3072006-03-20 22:28:05 -08004315 return NETDEV_TX_BUSY;
4316 }
4317
4318 entry = tp->tx_prod;
4319 base_flags = 0;
Michael Chan5a6f3072006-03-20 22:28:05 -08004320 mss = 0;
Matt Carlsonc13e3712007-05-05 11:50:04 -07004321 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004322 int tcp_opt_len, ip_tcp_len;
4323
4324 if (skb_header_cloned(skb) &&
4325 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4326 dev_kfree_skb(skb);
4327 goto out_unlock;
4328 }
4329
Michael Chanb0026622006-07-03 19:42:14 -07004330 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4331 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4332 else {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004333 struct iphdr *iph = ip_hdr(skb);
4334
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004335 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -03004336 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
Michael Chanb0026622006-07-03 19:42:14 -07004337
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004338 iph->check = 0;
4339 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
Michael Chanb0026622006-07-03 19:42:14 -07004340 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4341 }
Michael Chan5a6f3072006-03-20 22:28:05 -08004342
4343 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4344 TXD_FLAG_CPU_POST_DMA);
4345
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004346 tcp_hdr(skb)->check = 0;
Michael Chan5a6f3072006-03-20 22:28:05 -08004347
Michael Chan5a6f3072006-03-20 22:28:05 -08004348 }
Patrick McHardy84fa7932006-08-29 16:44:56 -07004349 else if (skb->ip_summed == CHECKSUM_PARTIAL)
Michael Chan5a6f3072006-03-20 22:28:05 -08004350 base_flags |= TXD_FLAG_TCPUDP_CSUM;
Michael Chan5a6f3072006-03-20 22:28:05 -08004351#if TG3_VLAN_TAG_USED
4352 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4353 base_flags |= (TXD_FLAG_VLAN |
4354 (vlan_tx_tag_get(skb) << 16));
4355#endif
4356
4357 /* Queue skb data, a.k.a. the main skb fragment. */
4358 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4359
4360 tp->tx_buffers[entry].skb = skb;
4361 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4362
4363 tg3_set_txd(tp, entry, mapping, len, base_flags,
4364 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4365
4366 entry = NEXT_TX(entry);
4367
4368 /* Now loop through additional data fragments, and queue them. */
4369 if (skb_shinfo(skb)->nr_frags > 0) {
4370 unsigned int i, last;
4371
4372 last = skb_shinfo(skb)->nr_frags - 1;
4373 for (i = 0; i <= last; i++) {
4374 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4375
4376 len = frag->size;
4377 mapping = pci_map_page(tp->pdev,
4378 frag->page,
4379 frag->page_offset,
4380 len, PCI_DMA_TODEVICE);
4381
4382 tp->tx_buffers[entry].skb = NULL;
4383 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4384
4385 tg3_set_txd(tp, entry, mapping, len,
4386 base_flags, (i == last) | (mss << 1));
4387
4388 entry = NEXT_TX(entry);
4389 }
4390 }
4391
4392 /* Packets are ready, update Tx producer idx local and on card. */
4393 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4394
4395 tp->tx_prod = entry;
Michael Chan1b2a7202006-08-07 21:46:02 -07004396 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
Michael Chan5a6f3072006-03-20 22:28:05 -08004397 netif_stop_queue(dev);
Ranjit Manomohan42952232006-10-18 20:54:26 -07004398 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
Michael Chan5a6f3072006-03-20 22:28:05 -08004399 netif_wake_queue(tp->dev);
4400 }
4401
4402out_unlock:
4403 mmiowb();
Michael Chan5a6f3072006-03-20 22:28:05 -08004404
4405 dev->trans_start = jiffies;
4406
4407 return NETDEV_TX_OK;
4408}
4409
Michael Chan52c0fd82006-06-29 20:15:54 -07004410static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4411
4412/* Use GSO to workaround a rare TSO bug that may be triggered when the
4413 * TSO header is greater than 80 bytes.
4414 */
4415static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4416{
4417 struct sk_buff *segs, *nskb;
4418
4419 /* Estimate the number of fragments in the worst case */
Michael Chan1b2a7202006-08-07 21:46:02 -07004420 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
Michael Chan52c0fd82006-06-29 20:15:54 -07004421 netif_stop_queue(tp->dev);
Michael Chan7f62ad52007-02-20 23:25:40 -08004422 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4423 return NETDEV_TX_BUSY;
4424
4425 netif_wake_queue(tp->dev);
Michael Chan52c0fd82006-06-29 20:15:54 -07004426 }
4427
4428 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07004429 if (IS_ERR(segs))
Michael Chan52c0fd82006-06-29 20:15:54 -07004430 goto tg3_tso_bug_end;
4431
4432 do {
4433 nskb = segs;
4434 segs = segs->next;
4435 nskb->next = NULL;
4436 tg3_start_xmit_dma_bug(nskb, tp->dev);
4437 } while (segs);
4438
4439tg3_tso_bug_end:
4440 dev_kfree_skb(skb);
4441
4442 return NETDEV_TX_OK;
4443}
Michael Chan52c0fd82006-06-29 20:15:54 -07004444
Michael Chan5a6f3072006-03-20 22:28:05 -08004445/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4446 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4447 */
4448static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4449{
4450 struct tg3 *tp = netdev_priv(dev);
4451 dma_addr_t mapping;
4452 u32 len, entry, base_flags, mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004453 int would_hit_hwbug;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004454
4455 len = skb_headlen(skb);
4456
Michael Chan00b70502006-06-17 21:58:45 -07004457 /* We are running in BH disabled context with netif_tx_lock
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004458 * and TX reclaim runs via tp->napi.poll inside of a software
David S. Millerf47c11e2005-06-24 20:18:35 -07004459 * interrupt. Furthermore, IRQ processing runs lockless so we have
4460 * no IRQ context deadlocks to worry about either. Rejoice!
Linus Torvalds1da177e2005-04-16 15:20:36 -07004461 */
Michael Chan1b2a7202006-08-07 21:46:02 -07004462 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
Stephen Hemminger1f064a82005-12-06 17:36:44 -08004463 if (!netif_queue_stopped(dev)) {
4464 netif_stop_queue(dev);
4465
4466 /* This is a hard error, log it. */
4467 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4468 "queue awake!\n", dev->name);
4469 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004470 return NETDEV_TX_BUSY;
4471 }
4472
4473 entry = tp->tx_prod;
4474 base_flags = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07004475 if (skb->ip_summed == CHECKSUM_PARTIAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004476 base_flags |= TXD_FLAG_TCPUDP_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004477 mss = 0;
Matt Carlsonc13e3712007-05-05 11:50:04 -07004478 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004479 struct iphdr *iph;
Michael Chan52c0fd82006-06-29 20:15:54 -07004480 int tcp_opt_len, ip_tcp_len, hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004481
4482 if (skb_header_cloned(skb) &&
4483 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4484 dev_kfree_skb(skb);
4485 goto out_unlock;
4486 }
4487
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07004488 tcp_opt_len = tcp_optlen(skb);
Arnaldo Carvalho de Meloc9bdd4b2007-03-12 20:09:15 -03004489 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004490
Michael Chan52c0fd82006-06-29 20:15:54 -07004491 hdr_len = ip_tcp_len + tcp_opt_len;
4492 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
Michael Chan7f62ad52007-02-20 23:25:40 -08004493 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
Michael Chan52c0fd82006-06-29 20:15:54 -07004494 return (tg3_tso_bug(tp, skb));
4495
Linus Torvalds1da177e2005-04-16 15:20:36 -07004496 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4497 TXD_FLAG_CPU_POST_DMA);
4498
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004499 iph = ip_hdr(skb);
4500 iph->check = 0;
4501 iph->tot_len = htons(mss + hdr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004502 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004503 tcp_hdr(skb)->check = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004504 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07004505 } else
4506 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4507 iph->daddr, 0,
4508 IPPROTO_TCP,
4509 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004510
4511 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4512 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004513 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004514 int tsflags;
4515
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004516 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004517 mss |= (tsflags << 11);
4518 }
4519 } else {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004520 if (tcp_opt_len || iph->ihl > 5) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004521 int tsflags;
4522
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07004523 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004524 base_flags |= tsflags << 12;
4525 }
4526 }
4527 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004528#if TG3_VLAN_TAG_USED
4529 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4530 base_flags |= (TXD_FLAG_VLAN |
4531 (vlan_tx_tag_get(skb) << 16));
4532#endif
4533
4534 /* Queue skb data, a.k.a. the main skb fragment. */
4535 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4536
4537 tp->tx_buffers[entry].skb = skb;
4538 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4539
4540 would_hit_hwbug = 0;
4541
Matt Carlson41588ba2008-04-19 18:12:33 -07004542 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4543 would_hit_hwbug = 1;
4544 else if (tg3_4g_overflow_test(mapping, len))
Michael Chanc58ec932005-09-17 00:46:27 -07004545 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004546
4547 tg3_set_txd(tp, entry, mapping, len, base_flags,
4548 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4549
4550 entry = NEXT_TX(entry);
4551
4552 /* Now loop through additional data fragments, and queue them. */
4553 if (skb_shinfo(skb)->nr_frags > 0) {
4554 unsigned int i, last;
4555
4556 last = skb_shinfo(skb)->nr_frags - 1;
4557 for (i = 0; i <= last; i++) {
4558 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4559
4560 len = frag->size;
4561 mapping = pci_map_page(tp->pdev,
4562 frag->page,
4563 frag->page_offset,
4564 len, PCI_DMA_TODEVICE);
4565
4566 tp->tx_buffers[entry].skb = NULL;
4567 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4568
Michael Chanc58ec932005-09-17 00:46:27 -07004569 if (tg3_4g_overflow_test(mapping, len))
4570 would_hit_hwbug = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004571
Michael Chan72f2afb2006-03-06 19:28:35 -08004572 if (tg3_40bit_overflow_test(tp, mapping, len))
4573 would_hit_hwbug = 1;
4574
Linus Torvalds1da177e2005-04-16 15:20:36 -07004575 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4576 tg3_set_txd(tp, entry, mapping, len,
4577 base_flags, (i == last)|(mss << 1));
4578 else
4579 tg3_set_txd(tp, entry, mapping, len,
4580 base_flags, (i == last));
4581
4582 entry = NEXT_TX(entry);
4583 }
4584 }
4585
4586 if (would_hit_hwbug) {
4587 u32 last_plus_one = entry;
4588 u32 start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004589
Michael Chanc58ec932005-09-17 00:46:27 -07004590 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4591 start &= (TG3_TX_RING_SIZE - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004592
4593 /* If the workaround fails due to memory/mapping
4594 * failure, silently drop this packet.
4595 */
Michael Chan72f2afb2006-03-06 19:28:35 -08004596 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
Michael Chanc58ec932005-09-17 00:46:27 -07004597 &start, base_flags, mss))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004598 goto out_unlock;
4599
4600 entry = start;
4601 }
4602
4603 /* Packets are ready, update Tx producer idx local and on card. */
4604 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4605
4606 tp->tx_prod = entry;
Michael Chan1b2a7202006-08-07 21:46:02 -07004607 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004608 netif_stop_queue(dev);
Ranjit Manomohan42952232006-10-18 20:54:26 -07004609 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
Michael Chan51b91462005-09-01 17:41:28 -07004610 netif_wake_queue(tp->dev);
4611 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004612
4613out_unlock:
4614 mmiowb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004615
4616 dev->trans_start = jiffies;
4617
4618 return NETDEV_TX_OK;
4619}
4620
4621static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4622 int new_mtu)
4623{
4624 dev->mtu = new_mtu;
4625
Michael Chanef7f5ec2005-07-25 12:32:25 -07004626 if (new_mtu > ETH_DATA_LEN) {
Michael Chana4e2b342005-10-26 15:46:52 -07004627 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chanef7f5ec2005-07-25 12:32:25 -07004628 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4629 ethtool_op_set_tso(dev, 0);
4630 }
4631 else
4632 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4633 } else {
Michael Chana4e2b342005-10-26 15:46:52 -07004634 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chanef7f5ec2005-07-25 12:32:25 -07004635 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
Michael Chan0f893dc2005-07-25 12:30:38 -07004636 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
Michael Chanef7f5ec2005-07-25 12:32:25 -07004637 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004638}
4639
4640static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4641{
4642 struct tg3 *tp = netdev_priv(dev);
Michael Chanb9ec6c12006-07-25 16:37:27 -07004643 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004644
4645 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4646 return -EINVAL;
4647
4648 if (!netif_running(dev)) {
4649 /* We'll just catch it later when the
4650 * device is up'd.
4651 */
4652 tg3_set_mtu(dev, tp, new_mtu);
4653 return 0;
4654 }
4655
4656 tg3_netif_stop(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -07004657
4658 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004659
Michael Chan944d9802005-05-29 14:57:48 -07004660 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004661
4662 tg3_set_mtu(dev, tp, new_mtu);
4663
Michael Chanb9ec6c12006-07-25 16:37:27 -07004664 err = tg3_restart_hw(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004665
Michael Chanb9ec6c12006-07-25 16:37:27 -07004666 if (!err)
4667 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004668
David S. Millerf47c11e2005-06-24 20:18:35 -07004669 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004670
Michael Chanb9ec6c12006-07-25 16:37:27 -07004671 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004672}
4673
4674/* Free up pending packets in all rx/tx rings.
4675 *
4676 * The chip has been shut down and the driver detached from
4677 * the networking, so no interrupts or new tx packets will
4678 * end up in the driver. tp->{tx,}lock is not held and we are not
4679 * in an interrupt context and thus may sleep.
4680 */
4681static void tg3_free_rings(struct tg3 *tp)
4682{
4683 struct ring_info *rxp;
4684 int i;
4685
4686 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4687 rxp = &tp->rx_std_buffers[i];
4688
4689 if (rxp->skb == NULL)
4690 continue;
4691 pci_unmap_single(tp->pdev,
4692 pci_unmap_addr(rxp, mapping),
Michael Chan7e72aad2005-07-25 12:31:17 -07004693 tp->rx_pkt_buf_sz - tp->rx_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004694 PCI_DMA_FROMDEVICE);
4695 dev_kfree_skb_any(rxp->skb);
4696 rxp->skb = NULL;
4697 }
4698
4699 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4700 rxp = &tp->rx_jumbo_buffers[i];
4701
4702 if (rxp->skb == NULL)
4703 continue;
4704 pci_unmap_single(tp->pdev,
4705 pci_unmap_addr(rxp, mapping),
4706 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4707 PCI_DMA_FROMDEVICE);
4708 dev_kfree_skb_any(rxp->skb);
4709 rxp->skb = NULL;
4710 }
4711
4712 for (i = 0; i < TG3_TX_RING_SIZE; ) {
4713 struct tx_ring_info *txp;
4714 struct sk_buff *skb;
4715 int j;
4716
4717 txp = &tp->tx_buffers[i];
4718 skb = txp->skb;
4719
4720 if (skb == NULL) {
4721 i++;
4722 continue;
4723 }
4724
4725 pci_unmap_single(tp->pdev,
4726 pci_unmap_addr(txp, mapping),
4727 skb_headlen(skb),
4728 PCI_DMA_TODEVICE);
4729 txp->skb = NULL;
4730
4731 i++;
4732
4733 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4734 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4735 pci_unmap_page(tp->pdev,
4736 pci_unmap_addr(txp, mapping),
4737 skb_shinfo(skb)->frags[j].size,
4738 PCI_DMA_TODEVICE);
4739 i++;
4740 }
4741
4742 dev_kfree_skb_any(skb);
4743 }
4744}
4745
4746/* Initialize tx/rx rings for packet processing.
4747 *
4748 * The chip has been shut down and the driver detached from
4749 * the networking, so no interrupts or new tx packets will
4750 * end up in the driver. tp->{tx,}lock are held and thus
4751 * we may not sleep.
4752 */
Michael Chan32d8c572006-07-25 16:38:29 -07004753static int tg3_init_rings(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004754{
4755 u32 i;
4756
4757 /* Free up all the SKBs. */
4758 tg3_free_rings(tp);
4759
4760 /* Zero out all descriptors. */
4761 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4762 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4763 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4764 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4765
Michael Chan7e72aad2005-07-25 12:31:17 -07004766 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
Michael Chana4e2b342005-10-26 15:46:52 -07004767 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
Michael Chan7e72aad2005-07-25 12:31:17 -07004768 (tp->dev->mtu > ETH_DATA_LEN))
4769 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4770
Linus Torvalds1da177e2005-04-16 15:20:36 -07004771 /* Initialize invariants of the rings, we only set this
4772 * stuff once. This works because the card does not
4773 * write into the rx buffer posting rings.
4774 */
4775 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4776 struct tg3_rx_buffer_desc *rxd;
4777
4778 rxd = &tp->rx_std[i];
Michael Chan7e72aad2005-07-25 12:31:17 -07004779 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004780 << RXD_LEN_SHIFT;
4781 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4782 rxd->opaque = (RXD_OPAQUE_RING_STD |
4783 (i << RXD_OPAQUE_INDEX_SHIFT));
4784 }
4785
Michael Chan0f893dc2005-07-25 12:30:38 -07004786 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004787 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4788 struct tg3_rx_buffer_desc *rxd;
4789
4790 rxd = &tp->rx_jumbo[i];
4791 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4792 << RXD_LEN_SHIFT;
4793 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4794 RXD_FLAG_JUMBO;
4795 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4796 (i << RXD_OPAQUE_INDEX_SHIFT));
4797 }
4798 }
4799
4800 /* Now allocate fresh SKBs for each rx ring. */
4801 for (i = 0; i < tp->rx_pending; i++) {
Michael Chan32d8c572006-07-25 16:38:29 -07004802 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4803 printk(KERN_WARNING PFX
4804 "%s: Using a smaller RX standard ring, "
4805 "only %d out of %d buffers were allocated "
4806 "successfully.\n",
4807 tp->dev->name, i, tp->rx_pending);
4808 if (i == 0)
4809 return -ENOMEM;
4810 tp->rx_pending = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004811 break;
Michael Chan32d8c572006-07-25 16:38:29 -07004812 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004813 }
4814
Michael Chan0f893dc2005-07-25 12:30:38 -07004815 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004816 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4817 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
Michael Chan32d8c572006-07-25 16:38:29 -07004818 -1, i) < 0) {
4819 printk(KERN_WARNING PFX
4820 "%s: Using a smaller RX jumbo ring, "
4821 "only %d out of %d buffers were "
4822 "allocated successfully.\n",
4823 tp->dev->name, i, tp->rx_jumbo_pending);
4824 if (i == 0) {
4825 tg3_free_rings(tp);
4826 return -ENOMEM;
4827 }
4828 tp->rx_jumbo_pending = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004829 break;
Michael Chan32d8c572006-07-25 16:38:29 -07004830 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004831 }
4832 }
Michael Chan32d8c572006-07-25 16:38:29 -07004833 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004834}
4835
4836/*
4837 * Must not be invoked with interrupt sources disabled and
4838 * the hardware shutdown down.
4839 */
4840static void tg3_free_consistent(struct tg3 *tp)
4841{
Jesper Juhlb4558ea2005-10-28 16:53:13 -04004842 kfree(tp->rx_std_buffers);
4843 tp->rx_std_buffers = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004844 if (tp->rx_std) {
4845 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4846 tp->rx_std, tp->rx_std_mapping);
4847 tp->rx_std = NULL;
4848 }
4849 if (tp->rx_jumbo) {
4850 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4851 tp->rx_jumbo, tp->rx_jumbo_mapping);
4852 tp->rx_jumbo = NULL;
4853 }
4854 if (tp->rx_rcb) {
4855 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4856 tp->rx_rcb, tp->rx_rcb_mapping);
4857 tp->rx_rcb = NULL;
4858 }
4859 if (tp->tx_ring) {
4860 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4861 tp->tx_ring, tp->tx_desc_mapping);
4862 tp->tx_ring = NULL;
4863 }
4864 if (tp->hw_status) {
4865 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4866 tp->hw_status, tp->status_mapping);
4867 tp->hw_status = NULL;
4868 }
4869 if (tp->hw_stats) {
4870 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4871 tp->hw_stats, tp->stats_mapping);
4872 tp->hw_stats = NULL;
4873 }
4874}
4875
4876/*
4877 * Must not be invoked with interrupt sources disabled and
4878 * the hardware shutdown down. Can sleep.
4879 */
4880static int tg3_alloc_consistent(struct tg3 *tp)
4881{
Yan Burmanbd2b3342006-12-14 15:25:00 -08004882 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07004883 (TG3_RX_RING_SIZE +
4884 TG3_RX_JUMBO_RING_SIZE)) +
4885 (sizeof(struct tx_ring_info) *
4886 TG3_TX_RING_SIZE),
4887 GFP_KERNEL);
4888 if (!tp->rx_std_buffers)
4889 return -ENOMEM;
4890
Linus Torvalds1da177e2005-04-16 15:20:36 -07004891 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4892 tp->tx_buffers = (struct tx_ring_info *)
4893 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4894
4895 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4896 &tp->rx_std_mapping);
4897 if (!tp->rx_std)
4898 goto err_out;
4899
4900 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4901 &tp->rx_jumbo_mapping);
4902
4903 if (!tp->rx_jumbo)
4904 goto err_out;
4905
4906 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4907 &tp->rx_rcb_mapping);
4908 if (!tp->rx_rcb)
4909 goto err_out;
4910
4911 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4912 &tp->tx_desc_mapping);
4913 if (!tp->tx_ring)
4914 goto err_out;
4915
4916 tp->hw_status = pci_alloc_consistent(tp->pdev,
4917 TG3_HW_STATUS_SIZE,
4918 &tp->status_mapping);
4919 if (!tp->hw_status)
4920 goto err_out;
4921
4922 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4923 sizeof(struct tg3_hw_stats),
4924 &tp->stats_mapping);
4925 if (!tp->hw_stats)
4926 goto err_out;
4927
4928 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4929 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4930
4931 return 0;
4932
4933err_out:
4934 tg3_free_consistent(tp);
4935 return -ENOMEM;
4936}
4937
4938#define MAX_WAIT_CNT 1000
4939
4940/* To stop a block, clear the enable bit and poll till it
4941 * clears. tp->lock is held.
4942 */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004943static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004944{
4945 unsigned int i;
4946 u32 val;
4947
4948 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4949 switch (ofs) {
4950 case RCVLSC_MODE:
4951 case DMAC_MODE:
4952 case MBFREE_MODE:
4953 case BUFMGR_MODE:
4954 case MEMARB_MODE:
4955 /* We can't enable/disable these bits of the
4956 * 5705/5750, just say success.
4957 */
4958 return 0;
4959
4960 default:
4961 break;
4962 };
4963 }
4964
4965 val = tr32(ofs);
4966 val &= ~enable_bit;
4967 tw32_f(ofs, val);
4968
4969 for (i = 0; i < MAX_WAIT_CNT; i++) {
4970 udelay(100);
4971 val = tr32(ofs);
4972 if ((val & enable_bit) == 0)
4973 break;
4974 }
4975
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004976 if (i == MAX_WAIT_CNT && !silent) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004977 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4978 "ofs=%lx enable_bit=%x\n",
4979 ofs, enable_bit);
4980 return -ENODEV;
4981 }
4982
4983 return 0;
4984}
4985
4986/* tp->lock is held. */
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004987static int tg3_abort_hw(struct tg3 *tp, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004988{
4989 int i, err;
4990
4991 tg3_disable_ints(tp);
4992
4993 tp->rx_mode &= ~RX_MODE_ENABLE;
4994 tw32_f(MAC_RX_MODE, tp->rx_mode);
4995 udelay(10);
4996
David S. Millerb3b7d6b2005-05-05 14:40:20 -07004997 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4998 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4999 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5000 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5001 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5002 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005003
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005004 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5005 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5006 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5007 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5008 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5009 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5010 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005011
5012 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5013 tw32_f(MAC_MODE, tp->mac_mode);
5014 udelay(40);
5015
5016 tp->tx_mode &= ~TX_MODE_ENABLE;
5017 tw32_f(MAC_TX_MODE, tp->tx_mode);
5018
5019 for (i = 0; i < MAX_WAIT_CNT; i++) {
5020 udelay(100);
5021 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5022 break;
5023 }
5024 if (i >= MAX_WAIT_CNT) {
5025 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5026 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5027 tp->dev->name, tr32(MAC_TX_MODE));
Michael Chane6de8ad2005-05-05 14:42:41 -07005028 err |= -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005029 }
5030
Michael Chane6de8ad2005-05-05 14:42:41 -07005031 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005032 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5033 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005034
5035 tw32(FTQ_RESET, 0xffffffff);
5036 tw32(FTQ_RESET, 0x00000000);
5037
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005038 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5039 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005040
5041 if (tp->hw_status)
5042 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5043 if (tp->hw_stats)
5044 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5045
Linus Torvalds1da177e2005-04-16 15:20:36 -07005046 return err;
5047}
5048
5049/* tp->lock is held. */
5050static int tg3_nvram_lock(struct tg3 *tp)
5051{
5052 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5053 int i;
5054
Michael Chanec41c7d2006-01-17 02:40:55 -08005055 if (tp->nvram_lock_cnt == 0) {
5056 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5057 for (i = 0; i < 8000; i++) {
5058 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5059 break;
5060 udelay(20);
5061 }
5062 if (i == 8000) {
5063 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5064 return -ENODEV;
5065 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005066 }
Michael Chanec41c7d2006-01-17 02:40:55 -08005067 tp->nvram_lock_cnt++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005068 }
5069 return 0;
5070}
5071
5072/* tp->lock is held. */
5073static void tg3_nvram_unlock(struct tg3 *tp)
5074{
Michael Chanec41c7d2006-01-17 02:40:55 -08005075 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5076 if (tp->nvram_lock_cnt > 0)
5077 tp->nvram_lock_cnt--;
5078 if (tp->nvram_lock_cnt == 0)
5079 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5080 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005081}
5082
5083/* tp->lock is held. */
Michael Chane6af3012005-04-21 17:12:05 -07005084static void tg3_enable_nvram_access(struct tg3 *tp)
5085{
5086 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5087 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5088 u32 nvaccess = tr32(NVRAM_ACCESS);
5089
5090 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5091 }
5092}
5093
5094/* tp->lock is held. */
5095static void tg3_disable_nvram_access(struct tg3 *tp)
5096{
5097 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5098 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5099 u32 nvaccess = tr32(NVRAM_ACCESS);
5100
5101 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5102 }
5103}
5104
Matt Carlson0d3031d2007-10-10 18:02:43 -07005105static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5106{
5107 int i;
5108 u32 apedata;
5109
5110 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5111 if (apedata != APE_SEG_SIG_MAGIC)
5112 return;
5113
5114 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5115 if (apedata != APE_FW_STATUS_READY)
5116 return;
5117
5118 /* Wait for up to 1 millisecond for APE to service previous event. */
5119 for (i = 0; i < 10; i++) {
5120 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5121 return;
5122
5123 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5124
5125 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5126 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5127 event | APE_EVENT_STATUS_EVENT_PENDING);
5128
5129 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5130
5131 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5132 break;
5133
5134 udelay(100);
5135 }
5136
5137 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5138 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5139}
5140
5141static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5142{
5143 u32 event;
5144 u32 apedata;
5145
5146 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5147 return;
5148
5149 switch (kind) {
5150 case RESET_KIND_INIT:
5151 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5152 APE_HOST_SEG_SIG_MAGIC);
5153 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5154 APE_HOST_SEG_LEN_MAGIC);
5155 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5156 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5157 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5158 APE_HOST_DRIVER_ID_MAGIC);
5159 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5160 APE_HOST_BEHAV_NO_PHYLOCK);
5161
5162 event = APE_EVENT_STATUS_STATE_START;
5163 break;
5164 case RESET_KIND_SHUTDOWN:
5165 event = APE_EVENT_STATUS_STATE_UNLOAD;
5166 break;
5167 case RESET_KIND_SUSPEND:
5168 event = APE_EVENT_STATUS_STATE_SUSPEND;
5169 break;
5170 default:
5171 return;
5172 }
5173
5174 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5175
5176 tg3_ape_send_event(tp, event);
5177}
5178
Michael Chane6af3012005-04-21 17:12:05 -07005179/* tp->lock is held. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005180static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5181{
David S. Millerf49639e2006-06-09 11:58:36 -07005182 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5183 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005184
5185 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5186 switch (kind) {
5187 case RESET_KIND_INIT:
5188 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5189 DRV_STATE_START);
5190 break;
5191
5192 case RESET_KIND_SHUTDOWN:
5193 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5194 DRV_STATE_UNLOAD);
5195 break;
5196
5197 case RESET_KIND_SUSPEND:
5198 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5199 DRV_STATE_SUSPEND);
5200 break;
5201
5202 default:
5203 break;
5204 };
5205 }
Matt Carlson0d3031d2007-10-10 18:02:43 -07005206
5207 if (kind == RESET_KIND_INIT ||
5208 kind == RESET_KIND_SUSPEND)
5209 tg3_ape_driver_state_change(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005210}
5211
5212/* tp->lock is held. */
5213static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5214{
5215 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5216 switch (kind) {
5217 case RESET_KIND_INIT:
5218 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5219 DRV_STATE_START_DONE);
5220 break;
5221
5222 case RESET_KIND_SHUTDOWN:
5223 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5224 DRV_STATE_UNLOAD_DONE);
5225 break;
5226
5227 default:
5228 break;
5229 };
5230 }
Matt Carlson0d3031d2007-10-10 18:02:43 -07005231
5232 if (kind == RESET_KIND_SHUTDOWN)
5233 tg3_ape_driver_state_change(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005234}
5235
5236/* tp->lock is held. */
5237static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5238{
5239 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5240 switch (kind) {
5241 case RESET_KIND_INIT:
5242 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5243 DRV_STATE_START);
5244 break;
5245
5246 case RESET_KIND_SHUTDOWN:
5247 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5248 DRV_STATE_UNLOAD);
5249 break;
5250
5251 case RESET_KIND_SUSPEND:
5252 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5253 DRV_STATE_SUSPEND);
5254 break;
5255
5256 default:
5257 break;
5258 };
5259 }
5260}
5261
Michael Chan7a6f4362006-09-27 16:03:31 -07005262static int tg3_poll_fw(struct tg3 *tp)
5263{
5264 int i;
5265 u32 val;
5266
Michael Chanb5d37722006-09-27 16:06:21 -07005267 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Gary Zambrano0ccead12006-11-14 16:34:00 -08005268 /* Wait up to 20ms for init done. */
5269 for (i = 0; i < 200; i++) {
Michael Chanb5d37722006-09-27 16:06:21 -07005270 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5271 return 0;
Gary Zambrano0ccead12006-11-14 16:34:00 -08005272 udelay(100);
Michael Chanb5d37722006-09-27 16:06:21 -07005273 }
5274 return -ENODEV;
5275 }
5276
Michael Chan7a6f4362006-09-27 16:03:31 -07005277 /* Wait for firmware initialization to complete. */
5278 for (i = 0; i < 100000; i++) {
5279 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5280 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5281 break;
5282 udelay(10);
5283 }
5284
5285 /* Chip might not be fitted with firmware. Some Sun onboard
5286 * parts are configured like that. So don't signal the timeout
5287 * of the above loop as an error, but do report the lack of
5288 * running firmware once.
5289 */
5290 if (i >= 100000 &&
5291 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5292 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5293
5294 printk(KERN_INFO PFX "%s: No firmware running.\n",
5295 tp->dev->name);
5296 }
5297
5298 return 0;
5299}
5300
Michael Chanee6a99b2007-07-18 21:49:10 -07005301/* Save PCI command register before chip reset */
5302static void tg3_save_pci_state(struct tg3 *tp)
5303{
Matt Carlson8a6eac92007-10-21 16:17:55 -07005304 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07005305}
5306
5307/* Restore PCI state after chip reset */
5308static void tg3_restore_pci_state(struct tg3 *tp)
5309{
5310 u32 val;
5311
5312 /* Re-enable indirect register accesses. */
5313 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5314 tp->misc_host_ctrl);
5315
5316 /* Set MAX PCI retry to zero. */
5317 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5318 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5319 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5320 val |= PCISTATE_RETRY_SAME_DMA;
Matt Carlson0d3031d2007-10-10 18:02:43 -07005321 /* Allow reads and writes to the APE register and memory space. */
5322 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5323 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5324 PCISTATE_ALLOW_APE_SHMEM_WR;
Michael Chanee6a99b2007-07-18 21:49:10 -07005325 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5326
Matt Carlson8a6eac92007-10-21 16:17:55 -07005327 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
Michael Chanee6a99b2007-07-18 21:49:10 -07005328
Matt Carlson5f5c51e2007-11-12 21:19:37 -08005329 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5330 pcie_set_readrq(tp->pdev, 4096);
5331 else {
Michael Chan114342f2007-10-15 02:12:26 -07005332 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5333 tp->pci_cacheline_sz);
5334 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5335 tp->pci_lat_timer);
5336 }
Matt Carlson5f5c51e2007-11-12 21:19:37 -08005337
Michael Chanee6a99b2007-07-18 21:49:10 -07005338 /* Make sure PCI-X relaxed ordering bit is clear. */
Matt Carlson9974a352007-10-07 23:27:28 -07005339 if (tp->pcix_cap) {
5340 u16 pcix_cmd;
5341
5342 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5343 &pcix_cmd);
5344 pcix_cmd &= ~PCI_X_CMD_ERO;
5345 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5346 pcix_cmd);
5347 }
Michael Chanee6a99b2007-07-18 21:49:10 -07005348
5349 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
Michael Chanee6a99b2007-07-18 21:49:10 -07005350
5351 /* Chip reset on 5780 will reset MSI enable bit,
5352 * so need to restore it.
5353 */
5354 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5355 u16 ctrl;
5356
5357 pci_read_config_word(tp->pdev,
5358 tp->msi_cap + PCI_MSI_FLAGS,
5359 &ctrl);
5360 pci_write_config_word(tp->pdev,
5361 tp->msi_cap + PCI_MSI_FLAGS,
5362 ctrl | PCI_MSI_FLAGS_ENABLE);
5363 val = tr32(MSGINT_MODE);
5364 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5365 }
5366 }
5367}
5368
Linus Torvalds1da177e2005-04-16 15:20:36 -07005369static void tg3_stop_fw(struct tg3 *);
5370
5371/* tp->lock is held. */
5372static int tg3_chip_reset(struct tg3 *tp)
5373{
5374 u32 val;
Michael Chan1ee582d2005-08-09 20:16:46 -07005375 void (*write_op)(struct tg3 *, u32, u32);
Michael Chan7a6f4362006-09-27 16:03:31 -07005376 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005377
David S. Millerf49639e2006-06-09 11:58:36 -07005378 tg3_nvram_lock(tp);
5379
5380 /* No matching tg3_nvram_unlock() after this because
5381 * chip reset below will undo the nvram lock.
5382 */
5383 tp->nvram_lock_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005384
Michael Chanee6a99b2007-07-18 21:49:10 -07005385 /* GRC_MISC_CFG core clock reset will clear the memory
5386 * enable bit in PCI register 4 and the MSI enable bit
5387 * on some chips, so we save relevant registers here.
5388 */
5389 tg3_save_pci_state(tp);
5390
Michael Chand9ab5ad2006-03-20 22:27:35 -08005391 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -08005392 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07005393 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07005394 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5395 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
Michael Chand9ab5ad2006-03-20 22:27:35 -08005396 tw32(GRC_FASTBOOT_PC, 0);
5397
Linus Torvalds1da177e2005-04-16 15:20:36 -07005398 /*
5399 * We must avoid the readl() that normally takes place.
5400 * It locks machines, causes machine checks, and other
5401 * fun things. So, temporarily disable the 5701
5402 * hardware workaround, while we do the reset.
5403 */
Michael Chan1ee582d2005-08-09 20:16:46 -07005404 write_op = tp->write32;
5405 if (write_op == tg3_write_flush_reg32)
5406 tp->write32 = tg3_write32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005407
Michael Chand18edcb2007-03-24 20:57:11 -07005408 /* Prevent the irq handler from reading or writing PCI registers
5409 * during chip reset when the memory enable bit in the PCI command
5410 * register may be cleared. The chip does not generate interrupt
5411 * at this time, but the irq handler may still be called due to irq
5412 * sharing or irqpoll.
5413 */
5414 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
Michael Chanb8fa2f32007-04-06 17:35:37 -07005415 if (tp->hw_status) {
5416 tp->hw_status->status = 0;
5417 tp->hw_status->status_tag = 0;
5418 }
Michael Chand18edcb2007-03-24 20:57:11 -07005419 tp->last_tag = 0;
5420 smp_mb();
5421 synchronize_irq(tp->pdev->irq);
5422
Linus Torvalds1da177e2005-04-16 15:20:36 -07005423 /* do the reset */
5424 val = GRC_MISC_CFG_CORECLK_RESET;
5425
5426 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5427 if (tr32(0x7e2c) == 0x60) {
5428 tw32(0x7e2c, 0x20);
5429 }
5430 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5431 tw32(GRC_MISC_CFG, (1 << 29));
5432 val |= (1 << 29);
5433 }
5434 }
5435
Michael Chanb5d37722006-09-27 16:06:21 -07005436 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5437 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5438 tw32(GRC_VCPU_EXT_CTRL,
5439 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5440 }
5441
Linus Torvalds1da177e2005-04-16 15:20:36 -07005442 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5443 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5444 tw32(GRC_MISC_CFG, val);
5445
Michael Chan1ee582d2005-08-09 20:16:46 -07005446 /* restore 5701 hardware bug workaround write method */
5447 tp->write32 = write_op;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005448
5449 /* Unfortunately, we have to delay before the PCI read back.
5450 * Some 575X chips even will not respond to a PCI cfg access
5451 * when the reset command is given to the chip.
5452 *
5453 * How do these hardware designers expect things to work
5454 * properly if the PCI write is posted for a long period
5455 * of time? It is always necessary to have some method by
5456 * which a register read back can occur to push the write
5457 * out which does the reset.
5458 *
5459 * For most tg3 variants the trick below was working.
5460 * Ho hum...
5461 */
5462 udelay(120);
5463
5464 /* Flush PCI posted writes. The normal MMIO registers
5465 * are inaccessible at this time so this is the only
5466 * way to make this reliably (actually, this is no longer
5467 * the case, see above). I tried to use indirect
5468 * register read/write but this upset some 5701 variants.
5469 */
5470 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5471
5472 udelay(120);
5473
5474 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5475 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5476 int i;
5477 u32 cfg_val;
5478
5479 /* Wait for link training to complete. */
5480 for (i = 0; i < 5000; i++)
5481 udelay(100);
5482
5483 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5484 pci_write_config_dword(tp->pdev, 0xc4,
5485 cfg_val | (1 << 15));
5486 }
5487 /* Set PCIE max payload size and clear error status. */
5488 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5489 }
5490
Michael Chanee6a99b2007-07-18 21:49:10 -07005491 tg3_restore_pci_state(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005492
Michael Chand18edcb2007-03-24 20:57:11 -07005493 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5494
Michael Chanee6a99b2007-07-18 21:49:10 -07005495 val = 0;
5496 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan4cf78e42005-07-25 12:29:19 -07005497 val = tr32(MEMARB_MODE);
Michael Chanee6a99b2007-07-18 21:49:10 -07005498 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005499
5500 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5501 tg3_stop_fw(tp);
5502 tw32(0x5000, 0x400);
5503 }
5504
5505 tw32(GRC_MODE, tp->grc_mode);
5506
5507 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01005508 val = tr32(0xc4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005509
5510 tw32(0xc4, val | (1 << 15));
5511 }
5512
5513 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5514 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5515 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5516 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5517 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5518 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5519 }
5520
5521 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5522 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5523 tw32_f(MAC_MODE, tp->mac_mode);
Michael Chan747e8f82005-07-25 12:33:22 -07005524 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5525 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5526 tw32_f(MAC_MODE, tp->mac_mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005527 } else
5528 tw32_f(MAC_MODE, 0);
5529 udelay(40);
5530
Michael Chan7a6f4362006-09-27 16:03:31 -07005531 err = tg3_poll_fw(tp);
5532 if (err)
5533 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005534
5535 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5536 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
Andy Gospodarekab0049b2007-09-06 20:42:14 +01005537 val = tr32(0x7c00);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005538
5539 tw32(0x7c00, val | (1 << 25));
5540 }
5541
5542 /* Reprobe ASF enable state. */
5543 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5544 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5545 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5546 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5547 u32 nic_cfg;
5548
5549 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5550 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5551 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
John W. Linvillecbf46852005-04-21 17:01:29 -07005552 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005553 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5554 }
5555 }
5556
5557 return 0;
5558}
5559
5560/* tp->lock is held. */
5561static void tg3_stop_fw(struct tg3 *tp)
5562{
Matt Carlson0d3031d2007-10-10 18:02:43 -07005563 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5564 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005565 u32 val;
Matt Carlson7c5026a2008-05-02 16:49:29 -07005566
5567 /* Wait for RX cpu to ACK the previous event. */
5568 tg3_wait_for_event_ack(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005569
5570 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5571 val = tr32(GRC_RX_CPU_EVENT);
Matt Carlson7c5026a2008-05-02 16:49:29 -07005572 val |= GRC_RX_CPU_DRIVER_EVENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005573 tw32(GRC_RX_CPU_EVENT, val);
5574
Matt Carlson7c5026a2008-05-02 16:49:29 -07005575 /* Wait for RX cpu to ACK this event. */
5576 tg3_wait_for_event_ack(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005577 }
5578}
5579
5580/* tp->lock is held. */
Michael Chan944d9802005-05-29 14:57:48 -07005581static int tg3_halt(struct tg3 *tp, int kind, int silent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005582{
5583 int err;
5584
5585 tg3_stop_fw(tp);
5586
Michael Chan944d9802005-05-29 14:57:48 -07005587 tg3_write_sig_pre_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005588
David S. Millerb3b7d6b2005-05-05 14:40:20 -07005589 tg3_abort_hw(tp, silent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005590 err = tg3_chip_reset(tp);
5591
Michael Chan944d9802005-05-29 14:57:48 -07005592 tg3_write_sig_legacy(tp, kind);
5593 tg3_write_sig_post_reset(tp, kind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005594
5595 if (err)
5596 return err;
5597
5598 return 0;
5599}
5600
5601#define TG3_FW_RELEASE_MAJOR 0x0
5602#define TG3_FW_RELASE_MINOR 0x0
5603#define TG3_FW_RELEASE_FIX 0x0
5604#define TG3_FW_START_ADDR 0x08000000
5605#define TG3_FW_TEXT_ADDR 0x08000000
5606#define TG3_FW_TEXT_LEN 0x9c0
5607#define TG3_FW_RODATA_ADDR 0x080009c0
5608#define TG3_FW_RODATA_LEN 0x60
5609#define TG3_FW_DATA_ADDR 0x08000a40
5610#define TG3_FW_DATA_LEN 0x20
5611#define TG3_FW_SBSS_ADDR 0x08000a60
5612#define TG3_FW_SBSS_LEN 0xc
5613#define TG3_FW_BSS_ADDR 0x08000a70
5614#define TG3_FW_BSS_LEN 0x10
5615
Andreas Mohr50da8592006-08-14 23:54:30 -07005616static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005617 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5618 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5619 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5620 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5621 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5622 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5623 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5624 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5625 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5626 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5627 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5628 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5629 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5630 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5631 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5632 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5633 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5634 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5635 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5636 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5637 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5638 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5639 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5640 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5641 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5642 0, 0, 0, 0, 0, 0,
5643 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5644 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5645 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5646 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5647 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5648 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5649 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5650 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5651 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5652 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5653 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5654 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5655 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5656 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5657 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5658 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5659 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5660 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5661 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5662 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5663 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5664 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5665 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5666 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5667 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5668 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5669 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5670 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5671 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5672 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5673 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5674 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5675 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5676 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5677 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5678 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5679 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5680 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5681 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5682 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5683 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5684 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5685 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5686 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5687 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5688 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5689 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5690 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5691 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5692 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5693 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5694 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5695 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5696 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5697 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5698 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5699 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5700 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5701 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5702 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5703 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5704 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5705 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5706 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5707 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5708};
5709
Andreas Mohr50da8592006-08-14 23:54:30 -07005710static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005711 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5712 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5713 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5714 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5715 0x00000000
5716};
5717
5718#if 0 /* All zeros, don't eat up space with it. */
5719u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5720 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5721 0x00000000, 0x00000000, 0x00000000, 0x00000000
5722};
5723#endif
5724
5725#define RX_CPU_SCRATCH_BASE 0x30000
5726#define RX_CPU_SCRATCH_SIZE 0x04000
5727#define TX_CPU_SCRATCH_BASE 0x34000
5728#define TX_CPU_SCRATCH_SIZE 0x04000
5729
5730/* tp->lock is held. */
5731static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5732{
5733 int i;
5734
Eric Sesterhenn5d9428d2006-04-02 13:52:48 +02005735 BUG_ON(offset == TX_CPU_BASE &&
5736 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005737
Michael Chanb5d37722006-09-27 16:06:21 -07005738 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5739 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5740
5741 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5742 return 0;
5743 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005744 if (offset == RX_CPU_BASE) {
5745 for (i = 0; i < 10000; i++) {
5746 tw32(offset + CPU_STATE, 0xffffffff);
5747 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5748 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5749 break;
5750 }
5751
5752 tw32(offset + CPU_STATE, 0xffffffff);
5753 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
5754 udelay(10);
5755 } else {
5756 for (i = 0; i < 10000; i++) {
5757 tw32(offset + CPU_STATE, 0xffffffff);
5758 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5759 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5760 break;
5761 }
5762 }
5763
5764 if (i >= 10000) {
5765 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5766 "and %s CPU\n",
5767 tp->dev->name,
5768 (offset == RX_CPU_BASE ? "RX" : "TX"));
5769 return -ENODEV;
5770 }
Michael Chanec41c7d2006-01-17 02:40:55 -08005771
5772 /* Clear firmware's nvram arbitration. */
5773 if (tp->tg3_flags & TG3_FLAG_NVRAM)
5774 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005775 return 0;
5776}
5777
5778struct fw_info {
5779 unsigned int text_base;
5780 unsigned int text_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07005781 const u32 *text_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005782 unsigned int rodata_base;
5783 unsigned int rodata_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07005784 const u32 *rodata_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005785 unsigned int data_base;
5786 unsigned int data_len;
Andreas Mohr50da8592006-08-14 23:54:30 -07005787 const u32 *data_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005788};
5789
5790/* tp->lock is held. */
5791static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5792 int cpu_scratch_size, struct fw_info *info)
5793{
Michael Chanec41c7d2006-01-17 02:40:55 -08005794 int err, lock_err, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005795 void (*write_op)(struct tg3 *, u32, u32);
5796
5797 if (cpu_base == TX_CPU_BASE &&
5798 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5799 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5800 "TX cpu firmware on %s which is 5705.\n",
5801 tp->dev->name);
5802 return -EINVAL;
5803 }
5804
5805 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5806 write_op = tg3_write_mem;
5807 else
5808 write_op = tg3_write_indirect_reg32;
5809
Michael Chan1b628152005-05-29 14:59:49 -07005810 /* It is possible that bootcode is still loading at this point.
5811 * Get the nvram lock first before halting the cpu.
5812 */
Michael Chanec41c7d2006-01-17 02:40:55 -08005813 lock_err = tg3_nvram_lock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005814 err = tg3_halt_cpu(tp, cpu_base);
Michael Chanec41c7d2006-01-17 02:40:55 -08005815 if (!lock_err)
5816 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005817 if (err)
5818 goto out;
5819
5820 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5821 write_op(tp, cpu_scratch_base + i, 0);
5822 tw32(cpu_base + CPU_STATE, 0xffffffff);
5823 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5824 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5825 write_op(tp, (cpu_scratch_base +
5826 (info->text_base & 0xffff) +
5827 (i * sizeof(u32))),
5828 (info->text_data ?
5829 info->text_data[i] : 0));
5830 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5831 write_op(tp, (cpu_scratch_base +
5832 (info->rodata_base & 0xffff) +
5833 (i * sizeof(u32))),
5834 (info->rodata_data ?
5835 info->rodata_data[i] : 0));
5836 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5837 write_op(tp, (cpu_scratch_base +
5838 (info->data_base & 0xffff) +
5839 (i * sizeof(u32))),
5840 (info->data_data ?
5841 info->data_data[i] : 0));
5842
5843 err = 0;
5844
5845out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005846 return err;
5847}
5848
5849/* tp->lock is held. */
5850static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5851{
5852 struct fw_info info;
5853 int err, i;
5854
5855 info.text_base = TG3_FW_TEXT_ADDR;
5856 info.text_len = TG3_FW_TEXT_LEN;
5857 info.text_data = &tg3FwText[0];
5858 info.rodata_base = TG3_FW_RODATA_ADDR;
5859 info.rodata_len = TG3_FW_RODATA_LEN;
5860 info.rodata_data = &tg3FwRodata[0];
5861 info.data_base = TG3_FW_DATA_ADDR;
5862 info.data_len = TG3_FW_DATA_LEN;
5863 info.data_data = NULL;
5864
5865 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5866 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5867 &info);
5868 if (err)
5869 return err;
5870
5871 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5872 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5873 &info);
5874 if (err)
5875 return err;
5876
5877 /* Now startup only the RX cpu. */
5878 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5879 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5880
5881 for (i = 0; i < 5; i++) {
5882 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5883 break;
5884 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5885 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
5886 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5887 udelay(1000);
5888 }
5889 if (i >= 5) {
5890 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5891 "to set RX CPU PC, is %08x should be %08x\n",
5892 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5893 TG3_FW_TEXT_ADDR);
5894 return -ENODEV;
5895 }
5896 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5897 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
5898
5899 return 0;
5900}
5901
Linus Torvalds1da177e2005-04-16 15:20:36 -07005902
5903#define TG3_TSO_FW_RELEASE_MAJOR 0x1
5904#define TG3_TSO_FW_RELASE_MINOR 0x6
5905#define TG3_TSO_FW_RELEASE_FIX 0x0
5906#define TG3_TSO_FW_START_ADDR 0x08000000
5907#define TG3_TSO_FW_TEXT_ADDR 0x08000000
5908#define TG3_TSO_FW_TEXT_LEN 0x1aa0
5909#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5910#define TG3_TSO_FW_RODATA_LEN 0x60
5911#define TG3_TSO_FW_DATA_ADDR 0x08001b20
5912#define TG3_TSO_FW_DATA_LEN 0x30
5913#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5914#define TG3_TSO_FW_SBSS_LEN 0x2c
5915#define TG3_TSO_FW_BSS_ADDR 0x08001b80
5916#define TG3_TSO_FW_BSS_LEN 0x894
5917
Andreas Mohr50da8592006-08-14 23:54:30 -07005918static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005919 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5920 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5921 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5922 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5923 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5924 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5925 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5926 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5927 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5928 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5929 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5930 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5931 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5932 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5933 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5934 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5935 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5936 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5937 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5938 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5939 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5940 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5941 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5942 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5943 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5944 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5945 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5946 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5947 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5948 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5949 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5950 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5951 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5952 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5953 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5954 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5955 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5956 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5957 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5958 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5959 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5960 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5961 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5962 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5963 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5964 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5965 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5966 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5967 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5968 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5969 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5970 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5971 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5972 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5973 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5974 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5975 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5976 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5977 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5978 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5979 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5980 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5981 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5982 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5983 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5984 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5985 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5986 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5987 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5988 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5989 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5990 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5991 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5992 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5993 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5994 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5995 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5996 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5997 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5998 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5999 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6000 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6001 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6002 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6003 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6004 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6005 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6006 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6007 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6008 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6009 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6010 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6011 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6012 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6013 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6014 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6015 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6016 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6017 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6018 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6019 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6020 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6021 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6022 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6023 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6024 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6025 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6026 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6027 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6028 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6029 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6030 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6031 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6032 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6033 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6034 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6035 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6036 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6037 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6038 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6039 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6040 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6041 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6042 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6043 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6044 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6045 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6046 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6047 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6048 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6049 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6050 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6051 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6052 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6053 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6054 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6055 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6056 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6057 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6058 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6059 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6060 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6061 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6062 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6063 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6064 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6065 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6066 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6067 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6068 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6069 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6070 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6071 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6072 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6073 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6074 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6075 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6076 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6077 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6078 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6079 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6080 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6081 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6082 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6083 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6084 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6085 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6086 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6087 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6088 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6089 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6090 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6091 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6092 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6093 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6094 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6095 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6096 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6097 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6098 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6099 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6100 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6101 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6102 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6103 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6104 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6105 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6106 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6107 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6108 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6109 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6110 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6111 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6112 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6113 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6114 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6115 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6116 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6117 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6118 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6119 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6120 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6121 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6122 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6123 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6124 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6125 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6126 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6127 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6128 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6129 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6130 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6131 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6132 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6133 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6134 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6135 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6136 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6137 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6138 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6139 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6140 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6141 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6142 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6143 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6144 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6145 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6146 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6147 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6148 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6149 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6150 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6151 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6152 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6153 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6154 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6155 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6156 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6157 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6158 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6159 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6160 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6161 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6162 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6163 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6164 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6165 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6166 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6167 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6168 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6169 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6170 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6171 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6172 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6173 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6174 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6175 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6176 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6177 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6178 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6179 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6180 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6181 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6182 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6183 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6184 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6185 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6186 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6187 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6188 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6189 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6190 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6191 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6192 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6193 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6194 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6195 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6196 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6197 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6198 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6199 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6200 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6201 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6202 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6203};
6204
Andreas Mohr50da8592006-08-14 23:54:30 -07006205static const u32 tg3TsoFwRodata[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006206 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6207 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6208 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6209 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6210 0x00000000,
6211};
6212
Andreas Mohr50da8592006-08-14 23:54:30 -07006213static const u32 tg3TsoFwData[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006214 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6215 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6216 0x00000000,
6217};
6218
6219/* 5705 needs a special version of the TSO firmware. */
6220#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
6221#define TG3_TSO5_FW_RELASE_MINOR 0x2
6222#define TG3_TSO5_FW_RELEASE_FIX 0x0
6223#define TG3_TSO5_FW_START_ADDR 0x00010000
6224#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
6225#define TG3_TSO5_FW_TEXT_LEN 0xe90
6226#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
6227#define TG3_TSO5_FW_RODATA_LEN 0x50
6228#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
6229#define TG3_TSO5_FW_DATA_LEN 0x20
6230#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
6231#define TG3_TSO5_FW_SBSS_LEN 0x28
6232#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
6233#define TG3_TSO5_FW_BSS_LEN 0x88
6234
Andreas Mohr50da8592006-08-14 23:54:30 -07006235static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006236 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6237 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6238 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6239 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6240 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6241 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6242 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6243 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6244 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6245 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6246 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6247 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6248 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6249 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6250 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6251 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6252 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6253 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6254 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6255 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6256 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6257 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6258 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6259 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6260 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6261 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6262 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6263 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6264 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6265 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6266 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6267 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6268 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6269 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6270 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6271 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6272 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6273 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6274 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6275 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6276 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6277 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6278 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6279 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6280 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6281 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6282 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6283 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6284 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6285 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6286 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6287 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6288 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6289 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6290 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6291 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6292 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6293 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6294 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6295 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6296 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6297 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6298 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6299 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6300 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6301 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6302 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6303 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6304 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6305 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6306 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6307 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6308 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6309 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6310 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6311 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6312 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6313 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6314 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6315 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6316 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6317 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6318 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6319 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6320 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6321 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6322 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6323 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6324 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6325 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6326 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6327 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6328 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6329 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6330 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6331 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6332 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6333 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6334 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6335 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6336 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6337 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6338 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6339 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6340 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6341 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6342 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6343 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6344 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6345 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6346 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6347 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6348 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6349 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6350 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6351 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6352 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6353 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6354 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6355 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6356 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6357 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6358 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6359 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6360 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6361 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6362 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6363 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6364 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6365 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6366 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6367 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6368 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6369 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6370 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6371 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6372 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6373 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6374 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6375 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6376 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6377 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6378 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6379 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6380 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6381 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6382 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6383 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6384 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6385 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6386 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6387 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6388 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6389 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6390 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6391 0x00000000, 0x00000000, 0x00000000,
6392};
6393
Andreas Mohr50da8592006-08-14 23:54:30 -07006394static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006395 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6396 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6397 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6398 0x00000000, 0x00000000, 0x00000000,
6399};
6400
Andreas Mohr50da8592006-08-14 23:54:30 -07006401static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006402 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6403 0x00000000, 0x00000000, 0x00000000,
6404};
6405
6406/* tp->lock is held. */
6407static int tg3_load_tso_firmware(struct tg3 *tp)
6408{
6409 struct fw_info info;
6410 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6411 int err, i;
6412
6413 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6414 return 0;
6415
6416 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6417 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6418 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6419 info.text_data = &tg3Tso5FwText[0];
6420 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6421 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6422 info.rodata_data = &tg3Tso5FwRodata[0];
6423 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6424 info.data_len = TG3_TSO5_FW_DATA_LEN;
6425 info.data_data = &tg3Tso5FwData[0];
6426 cpu_base = RX_CPU_BASE;
6427 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6428 cpu_scratch_size = (info.text_len +
6429 info.rodata_len +
6430 info.data_len +
6431 TG3_TSO5_FW_SBSS_LEN +
6432 TG3_TSO5_FW_BSS_LEN);
6433 } else {
6434 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6435 info.text_len = TG3_TSO_FW_TEXT_LEN;
6436 info.text_data = &tg3TsoFwText[0];
6437 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6438 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6439 info.rodata_data = &tg3TsoFwRodata[0];
6440 info.data_base = TG3_TSO_FW_DATA_ADDR;
6441 info.data_len = TG3_TSO_FW_DATA_LEN;
6442 info.data_data = &tg3TsoFwData[0];
6443 cpu_base = TX_CPU_BASE;
6444 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6445 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6446 }
6447
6448 err = tg3_load_firmware_cpu(tp, cpu_base,
6449 cpu_scratch_base, cpu_scratch_size,
6450 &info);
6451 if (err)
6452 return err;
6453
6454 /* Now startup the cpu. */
6455 tw32(cpu_base + CPU_STATE, 0xffffffff);
6456 tw32_f(cpu_base + CPU_PC, info.text_base);
6457
6458 for (i = 0; i < 5; i++) {
6459 if (tr32(cpu_base + CPU_PC) == info.text_base)
6460 break;
6461 tw32(cpu_base + CPU_STATE, 0xffffffff);
6462 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6463 tw32_f(cpu_base + CPU_PC, info.text_base);
6464 udelay(1000);
6465 }
6466 if (i >= 5) {
6467 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6468 "to set CPU PC, is %08x should be %08x\n",
6469 tp->dev->name, tr32(cpu_base + CPU_PC),
6470 info.text_base);
6471 return -ENODEV;
6472 }
6473 tw32(cpu_base + CPU_STATE, 0xffffffff);
6474 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6475 return 0;
6476}
6477
Linus Torvalds1da177e2005-04-16 15:20:36 -07006478
6479/* tp->lock is held. */
Michael Chan986e0ae2007-05-05 12:10:20 -07006480static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006481{
6482 u32 addr_high, addr_low;
6483 int i;
6484
6485 addr_high = ((tp->dev->dev_addr[0] << 8) |
6486 tp->dev->dev_addr[1]);
6487 addr_low = ((tp->dev->dev_addr[2] << 24) |
6488 (tp->dev->dev_addr[3] << 16) |
6489 (tp->dev->dev_addr[4] << 8) |
6490 (tp->dev->dev_addr[5] << 0));
6491 for (i = 0; i < 4; i++) {
Michael Chan986e0ae2007-05-05 12:10:20 -07006492 if (i == 1 && skip_mac_1)
6493 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006494 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6495 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6496 }
6497
6498 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6499 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6500 for (i = 0; i < 12; i++) {
6501 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6502 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6503 }
6504 }
6505
6506 addr_high = (tp->dev->dev_addr[0] +
6507 tp->dev->dev_addr[1] +
6508 tp->dev->dev_addr[2] +
6509 tp->dev->dev_addr[3] +
6510 tp->dev->dev_addr[4] +
6511 tp->dev->dev_addr[5]) &
6512 TX_BACKOFF_SEED_MASK;
6513 tw32(MAC_TX_BACKOFF_SEED, addr_high);
6514}
6515
6516static int tg3_set_mac_addr(struct net_device *dev, void *p)
6517{
6518 struct tg3 *tp = netdev_priv(dev);
6519 struct sockaddr *addr = p;
Michael Chan986e0ae2007-05-05 12:10:20 -07006520 int err = 0, skip_mac_1 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006521
Michael Chanf9804dd2005-09-27 12:13:10 -07006522 if (!is_valid_ether_addr(addr->sa_data))
6523 return -EINVAL;
6524
Linus Torvalds1da177e2005-04-16 15:20:36 -07006525 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6526
Michael Chane75f7c92006-03-20 21:33:26 -08006527 if (!netif_running(dev))
6528 return 0;
6529
Michael Chan58712ef2006-04-29 18:58:01 -07006530 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
Michael Chan986e0ae2007-05-05 12:10:20 -07006531 u32 addr0_high, addr0_low, addr1_high, addr1_low;
Michael Chan58712ef2006-04-29 18:58:01 -07006532
Michael Chan986e0ae2007-05-05 12:10:20 -07006533 addr0_high = tr32(MAC_ADDR_0_HIGH);
6534 addr0_low = tr32(MAC_ADDR_0_LOW);
6535 addr1_high = tr32(MAC_ADDR_1_HIGH);
6536 addr1_low = tr32(MAC_ADDR_1_LOW);
6537
6538 /* Skip MAC addr 1 if ASF is using it. */
6539 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6540 !(addr1_high == 0 && addr1_low == 0))
6541 skip_mac_1 = 1;
Michael Chan58712ef2006-04-29 18:58:01 -07006542 }
Michael Chan986e0ae2007-05-05 12:10:20 -07006543 spin_lock_bh(&tp->lock);
6544 __tg3_set_mac_addr(tp, skip_mac_1);
6545 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006546
Michael Chanb9ec6c12006-07-25 16:37:27 -07006547 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006548}
6549
6550/* tp->lock is held. */
6551static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6552 dma_addr_t mapping, u32 maxlen_flags,
6553 u32 nic_addr)
6554{
6555 tg3_write_mem(tp,
6556 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6557 ((u64) mapping >> 32));
6558 tg3_write_mem(tp,
6559 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6560 ((u64) mapping & 0xffffffff));
6561 tg3_write_mem(tp,
6562 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6563 maxlen_flags);
6564
6565 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6566 tg3_write_mem(tp,
6567 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6568 nic_addr);
6569}
6570
6571static void __tg3_set_rx_mode(struct net_device *);
Michael Chand244c892005-07-05 14:42:33 -07006572static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
David S. Miller15f98502005-05-18 22:49:26 -07006573{
6574 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6575 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6576 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6577 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6578 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6579 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6580 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6581 }
6582 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6583 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6584 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6585 u32 val = ec->stats_block_coalesce_usecs;
6586
6587 if (!netif_carrier_ok(tp->dev))
6588 val = 0;
6589
6590 tw32(HOSTCC_STAT_COAL_TICKS, val);
6591 }
6592}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006593
6594/* tp->lock is held. */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07006595static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006596{
6597 u32 val, rdmac_mode;
6598 int i, err, limit;
6599
6600 tg3_disable_ints(tp);
6601
6602 tg3_stop_fw(tp);
6603
6604 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6605
6606 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
Michael Chane6de8ad2005-05-05 14:42:41 -07006607 tg3_abort_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006608 }
6609
Michael Chan36da4d82006-11-03 01:01:03 -08006610 if (reset_phy)
Michael Chand4d2c552006-03-20 17:47:20 -08006611 tg3_phy_reset(tp);
6612
Linus Torvalds1da177e2005-04-16 15:20:36 -07006613 err = tg3_chip_reset(tp);
6614 if (err)
6615 return err;
6616
6617 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6618
Matt Carlsonb5af7122007-11-12 21:22:02 -08006619 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
6620 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07006621 val = tr32(TG3_CPMU_CTRL);
6622 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6623 tw32(TG3_CPMU_CTRL, val);
Matt Carlson9acb9612007-11-12 21:10:06 -08006624
6625 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
6626 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
6627 val |= CPMU_LSPD_10MB_MACCLK_6_25;
6628 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
6629
6630 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
6631 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
6632 val |= CPMU_LNK_AWARE_MACCLK_6_25;
6633 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
6634
6635 val = tr32(TG3_CPMU_HST_ACC);
6636 val &= ~CPMU_HST_ACC_MACCLK_MASK;
6637 val |= CPMU_HST_ACC_MACCLK_6_25;
6638 tw32(TG3_CPMU_HST_ACC, val);
Matt Carlsond30cdd22007-10-07 23:28:35 -07006639 }
6640
Linus Torvalds1da177e2005-04-16 15:20:36 -07006641 /* This works around an issue with Athlon chipsets on
6642 * B3 tigon3 silicon. This bit has no effect on any
6643 * other revision. But do not set this on PCI Express
Matt Carlson795d01c2007-10-07 23:28:17 -07006644 * chips and don't even touch the clocks if the CPMU is present.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006645 */
Matt Carlson795d01c2007-10-07 23:28:17 -07006646 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6647 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6648 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6649 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6650 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006651
6652 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6653 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6654 val = tr32(TG3PCI_PCISTATE);
6655 val |= PCISTATE_RETRY_SAME_DMA;
6656 tw32(TG3PCI_PCISTATE, val);
6657 }
6658
Matt Carlson0d3031d2007-10-10 18:02:43 -07006659 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6660 /* Allow reads and writes to the
6661 * APE register and memory space.
6662 */
6663 val = tr32(TG3PCI_PCISTATE);
6664 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6665 PCISTATE_ALLOW_APE_SHMEM_WR;
6666 tw32(TG3PCI_PCISTATE, val);
6667 }
6668
Linus Torvalds1da177e2005-04-16 15:20:36 -07006669 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6670 /* Enable some hw fixes. */
6671 val = tr32(TG3PCI_MSI_DATA);
6672 val |= (1 << 26) | (1 << 28) | (1 << 29);
6673 tw32(TG3PCI_MSI_DATA, val);
6674 }
6675
6676 /* Descriptor ring init may make accesses to the
6677 * NIC SRAM area to setup the TX descriptors, so we
6678 * can only do this after the hardware has been
6679 * successfully reset.
6680 */
Michael Chan32d8c572006-07-25 16:38:29 -07006681 err = tg3_init_rings(tp);
6682 if (err)
6683 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006684
Matt Carlson9936bcf2007-10-10 18:03:07 -07006685 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6686 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
Matt Carlsond30cdd22007-10-07 23:28:35 -07006687 /* This value is determined during the probe time DMA
6688 * engine test, tg3_test_dma.
6689 */
6690 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6691 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006692
6693 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6694 GRC_MODE_4X_NIC_SEND_RINGS |
6695 GRC_MODE_NO_TX_PHDR_CSUM |
6696 GRC_MODE_NO_RX_PHDR_CSUM);
6697 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
Michael Chand2d746f2006-04-06 21:45:39 -07006698
6699 /* Pseudo-header checksum is done by hardware logic and not
6700 * the offload processers, so make the chip do the pseudo-
6701 * header checksums on receive. For transmit it is more
6702 * convenient to do the pseudo-header checksum in software
6703 * as Linux does that on transmit for us in all cases.
6704 */
6705 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006706
6707 tw32(GRC_MODE,
6708 tp->grc_mode |
6709 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6710
6711 /* Setup the timer prescalar register. Clock is always 66Mhz. */
6712 val = tr32(GRC_MISC_CFG);
6713 val &= ~0xff;
6714 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6715 tw32(GRC_MISC_CFG, val);
6716
6717 /* Initialize MBUF/DESC pool. */
John W. Linvillecbf46852005-04-21 17:01:29 -07006718 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006719 /* Do nothing. */
6720 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6721 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6722 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6723 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6724 else
6725 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6726 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6727 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6728 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006729 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6730 int fw_len;
6731
6732 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6733 TG3_TSO5_FW_RODATA_LEN +
6734 TG3_TSO5_FW_DATA_LEN +
6735 TG3_TSO5_FW_SBSS_LEN +
6736 TG3_TSO5_FW_BSS_LEN);
6737 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6738 tw32(BUFMGR_MB_POOL_ADDR,
6739 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6740 tw32(BUFMGR_MB_POOL_SIZE,
6741 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6742 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006743
Michael Chan0f893dc2005-07-25 12:30:38 -07006744 if (tp->dev->mtu <= ETH_DATA_LEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006745 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6746 tp->bufmgr_config.mbuf_read_dma_low_water);
6747 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6748 tp->bufmgr_config.mbuf_mac_rx_low_water);
6749 tw32(BUFMGR_MB_HIGH_WATER,
6750 tp->bufmgr_config.mbuf_high_water);
6751 } else {
6752 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6753 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6754 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6755 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6756 tw32(BUFMGR_MB_HIGH_WATER,
6757 tp->bufmgr_config.mbuf_high_water_jumbo);
6758 }
6759 tw32(BUFMGR_DMA_LOW_WATER,
6760 tp->bufmgr_config.dma_low_water);
6761 tw32(BUFMGR_DMA_HIGH_WATER,
6762 tp->bufmgr_config.dma_high_water);
6763
6764 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6765 for (i = 0; i < 2000; i++) {
6766 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6767 break;
6768 udelay(10);
6769 }
6770 if (i >= 2000) {
6771 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6772 tp->dev->name);
6773 return -ENODEV;
6774 }
6775
6776 /* Setup replenish threshold. */
Michael Chanf92905d2006-06-29 20:14:29 -07006777 val = tp->rx_pending / 8;
6778 if (val == 0)
6779 val = 1;
6780 else if (val > tp->rx_std_max_post)
6781 val = tp->rx_std_max_post;
Michael Chanb5d37722006-09-27 16:06:21 -07006782 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6783 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6784 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6785
6786 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6787 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6788 }
Michael Chanf92905d2006-06-29 20:14:29 -07006789
6790 tw32(RCVBDI_STD_THRESH, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006791
6792 /* Initialize TG3_BDINFO's at:
6793 * RCVDBDI_STD_BD: standard eth size rx ring
6794 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6795 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6796 *
6797 * like so:
6798 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6799 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6800 * ring attribute flags
6801 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6802 *
6803 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6804 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6805 *
6806 * The size of each ring is fixed in the firmware, but the location is
6807 * configurable.
6808 */
6809 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6810 ((u64) tp->rx_std_mapping >> 32));
6811 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6812 ((u64) tp->rx_std_mapping & 0xffffffff));
6813 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6814 NIC_SRAM_RX_BUFFER_DESC);
6815
6816 /* Don't even try to program the JUMBO/MINI buffer descriptor
6817 * configs on 5705.
6818 */
6819 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6820 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6821 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6822 } else {
6823 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6824 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6825
6826 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6827 BDINFO_FLAGS_DISABLED);
6828
6829 /* Setup replenish threshold. */
6830 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6831
Michael Chan0f893dc2005-07-25 12:30:38 -07006832 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006833 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6834 ((u64) tp->rx_jumbo_mapping >> 32));
6835 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6836 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6837 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6838 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6839 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6840 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6841 } else {
6842 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6843 BDINFO_FLAGS_DISABLED);
6844 }
6845
6846 }
6847
6848 /* There is only one send ring on 5705/5750, no need to explicitly
6849 * disable the others.
6850 */
6851 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6852 /* Clear out send RCB ring in SRAM. */
6853 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6854 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6855 BDINFO_FLAGS_DISABLED);
6856 }
6857
6858 tp->tx_prod = 0;
6859 tp->tx_cons = 0;
6860 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6861 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6862
6863 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6864 tp->tx_desc_mapping,
6865 (TG3_TX_RING_SIZE <<
6866 BDINFO_FLAGS_MAXLEN_SHIFT),
6867 NIC_SRAM_TX_BUFFER_DESC);
6868
6869 /* There is only one receive return ring on 5705/5750, no need
6870 * to explicitly disable the others.
6871 */
6872 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6873 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6874 i += TG3_BDINFO_SIZE) {
6875 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6876 BDINFO_FLAGS_DISABLED);
6877 }
6878 }
6879
6880 tp->rx_rcb_ptr = 0;
6881 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6882
6883 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6884 tp->rx_rcb_mapping,
6885 (TG3_RX_RCB_RING_SIZE(tp) <<
6886 BDINFO_FLAGS_MAXLEN_SHIFT),
6887 0);
6888
6889 tp->rx_std_ptr = tp->rx_pending;
6890 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6891 tp->rx_std_ptr);
6892
Michael Chan0f893dc2005-07-25 12:30:38 -07006893 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
Linus Torvalds1da177e2005-04-16 15:20:36 -07006894 tp->rx_jumbo_pending : 0;
6895 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6896 tp->rx_jumbo_ptr);
6897
6898 /* Initialize MAC address and backoff seed. */
Michael Chan986e0ae2007-05-05 12:10:20 -07006899 __tg3_set_mac_addr(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006900
6901 /* MTU + ethernet header + FCS + optional VLAN tag */
6902 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6903
6904 /* The slot time is changed by tg3_setup_phy if we
6905 * run at gigabit with half duplex.
6906 */
6907 tw32(MAC_TX_LENGTHS,
6908 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6909 (6 << TX_LENGTHS_IPG_SHIFT) |
6910 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6911
6912 /* Receive rules. */
6913 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6914 tw32(RCVLPC_CONFIG, 0x0181);
6915
6916 /* Calculate RDMAC_MODE setting early, we need it to determine
6917 * the RCVLPC_STATE_ENABLE mask.
6918 */
6919 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6920 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6921 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6922 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6923 RDMAC_MODE_LNGREAD_ENAB);
Michael Chan85e94ce2005-04-21 17:05:28 -07006924
Matt Carlsond30cdd22007-10-07 23:28:35 -07006925 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6926 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6927 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6928 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6929
Michael Chan85e94ce2005-04-21 17:05:28 -07006930 /* If statement applies to 5705 and 5750 PCI devices only */
6931 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6932 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6933 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006934 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
Matt Carlsonc13e3712007-05-05 11:50:04 -07006935 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006936 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6937 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6938 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6939 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6940 }
6941 }
6942
Michael Chan85e94ce2005-04-21 17:05:28 -07006943 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6944 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6945
Linus Torvalds1da177e2005-04-16 15:20:36 -07006946 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6947 rdmac_mode |= (1 << 27);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006948
6949 /* Receive/send statistics. */
Michael Chan16613942006-06-29 20:15:13 -07006950 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6951 val = tr32(RCVLPC_STATS_ENABLE);
6952 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6953 tw32(RCVLPC_STATS_ENABLE, val);
6954 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6955 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006956 val = tr32(RCVLPC_STATS_ENABLE);
6957 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6958 tw32(RCVLPC_STATS_ENABLE, val);
6959 } else {
6960 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6961 }
6962 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6963 tw32(SNDDATAI_STATSENAB, 0xffffff);
6964 tw32(SNDDATAI_STATSCTRL,
6965 (SNDDATAI_SCTRL_ENABLE |
6966 SNDDATAI_SCTRL_FASTUPD));
6967
6968 /* Setup host coalescing engine. */
6969 tw32(HOSTCC_MODE, 0);
6970 for (i = 0; i < 2000; i++) {
6971 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6972 break;
6973 udelay(10);
6974 }
6975
Michael Chand244c892005-07-05 14:42:33 -07006976 __tg3_set_coalesce(tp, &tp->coal);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006977
6978 /* set status block DMA address */
6979 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6980 ((u64) tp->status_mapping >> 32));
6981 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6982 ((u64) tp->status_mapping & 0xffffffff));
6983
6984 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6985 /* Status/statistics block address. See tg3_timer,
6986 * the tg3_periodic_fetch_stats call there, and
6987 * tg3_get_stats to see how this works for 5705/5750 chips.
6988 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006989 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6990 ((u64) tp->stats_mapping >> 32));
6991 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6992 ((u64) tp->stats_mapping & 0xffffffff));
6993 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6994 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6995 }
6996
6997 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6998
6999 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7000 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7001 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7002 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7003
7004 /* Clear statistics/status block in chip, and status block in ram. */
7005 for (i = NIC_SRAM_STATS_BLK;
7006 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7007 i += sizeof(u32)) {
7008 tg3_write_mem(tp, i, 0);
7009 udelay(40);
7010 }
7011 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7012
Michael Chanc94e3942005-09-27 12:12:42 -07007013 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7014 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7015 /* reset to prevent losing 1st rx packet intermittently */
7016 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7017 udelay(10);
7018 }
7019
Linus Torvalds1da177e2005-04-16 15:20:36 -07007020 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7021 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07007022 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7023 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7024 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7025 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007026 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7027 udelay(40);
7028
Michael Chan314fba32005-04-21 17:07:04 -07007029 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
Michael Chan9d26e212006-12-07 00:21:14 -08007030 * If TG3_FLG2_IS_NIC is zero, we should read the
Michael Chan314fba32005-04-21 17:07:04 -07007031 * register to preserve the GPIO settings for LOMs. The GPIOs,
7032 * whether used as inputs or outputs, are set by boot code after
7033 * reset.
7034 */
Michael Chan9d26e212006-12-07 00:21:14 -08007035 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
Michael Chan314fba32005-04-21 17:07:04 -07007036 u32 gpio_mask;
7037
Michael Chan9d26e212006-12-07 00:21:14 -08007038 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7039 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7040 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
Michael Chan3e7d83b2005-04-21 17:10:36 -07007041
7042 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7043 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7044 GRC_LCLCTRL_GPIO_OUTPUT3;
7045
Michael Chanaf36e6b2006-03-23 01:28:06 -08007046 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7047 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7048
Gary Zambranoaaf84462007-05-05 11:51:45 -07007049 tp->grc_local_ctrl &= ~gpio_mask;
Michael Chan314fba32005-04-21 17:07:04 -07007050 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7051
7052 /* GPIO1 must be driven high for eeprom write protect */
Michael Chan9d26e212006-12-07 00:21:14 -08007053 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7054 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7055 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan314fba32005-04-21 17:07:04 -07007056 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007057 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7058 udelay(100);
7059
Michael Chan09ee9292005-08-09 20:17:00 -07007060 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
David S. Millerfac9b832005-05-18 22:46:34 -07007061 tp->last_tag = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007062
7063 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7064 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7065 udelay(40);
7066 }
7067
7068 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7069 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7070 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7071 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7072 WDMAC_MODE_LNGREAD_ENAB);
7073
Michael Chan85e94ce2005-04-21 17:05:28 -07007074 /* If statement applies to 5705 and 5750 PCI devices only */
7075 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7076 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7077 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007078 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7079 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7080 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7081 /* nothing */
7082 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7083 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7084 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7085 val |= WDMAC_MODE_RX_ACCEL;
7086 }
7087 }
7088
Michael Chand9ab5ad2006-03-20 22:27:35 -08007089 /* Enable host coalescing bug fix */
Michael Chanaf36e6b2006-03-23 01:28:06 -08007090 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07007091 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07007092 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7093 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
Michael Chand9ab5ad2006-03-20 22:27:35 -08007094 val |= (1 << 29);
7095
Linus Torvalds1da177e2005-04-16 15:20:36 -07007096 tw32_f(WDMAC_MODE, val);
7097 udelay(40);
7098
Matt Carlson9974a352007-10-07 23:27:28 -07007099 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7100 u16 pcix_cmd;
7101
7102 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7103 &pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007104 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
Matt Carlson9974a352007-10-07 23:27:28 -07007105 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7106 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007107 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
Matt Carlson9974a352007-10-07 23:27:28 -07007108 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7109 pcix_cmd |= PCI_X_CMD_READ_2K;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007110 }
Matt Carlson9974a352007-10-07 23:27:28 -07007111 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7112 pcix_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007113 }
7114
7115 tw32_f(RDMAC_MODE, rdmac_mode);
7116 udelay(40);
7117
7118 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7119 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7120 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
Matt Carlson9936bcf2007-10-10 18:03:07 -07007121
7122 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7123 tw32(SNDDATAC_MODE,
7124 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7125 else
7126 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7127
Linus Torvalds1da177e2005-04-16 15:20:36 -07007128 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7129 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7130 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7131 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007132 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7133 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007134 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7135 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7136
7137 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7138 err = tg3_load_5701_a0_firmware_fix(tp);
7139 if (err)
7140 return err;
7141 }
7142
Linus Torvalds1da177e2005-04-16 15:20:36 -07007143 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7144 err = tg3_load_tso_firmware(tp);
7145 if (err)
7146 return err;
7147 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007148
7149 tp->tx_mode = TX_MODE_ENABLE;
7150 tw32_f(MAC_TX_MODE, tp->tx_mode);
7151 udelay(100);
7152
7153 tp->rx_mode = RX_MODE_ENABLE;
Matt Carlson9936bcf2007-10-10 18:03:07 -07007154 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7155 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
Michael Chanaf36e6b2006-03-23 01:28:06 -08007156 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7157
Linus Torvalds1da177e2005-04-16 15:20:36 -07007158 tw32_f(MAC_RX_MODE, tp->rx_mode);
7159 udelay(10);
7160
7161 if (tp->link_config.phy_is_low_power) {
7162 tp->link_config.phy_is_low_power = 0;
7163 tp->link_config.speed = tp->link_config.orig_speed;
7164 tp->link_config.duplex = tp->link_config.orig_duplex;
7165 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7166 }
7167
Matt Carlson8ef21422008-05-02 16:47:53 -07007168 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007169 tw32_f(MAC_MI_MODE, tp->mi_mode);
7170 udelay(80);
7171
7172 tw32(MAC_LED_CTRL, tp->led_ctrl);
7173
7174 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
Michael Chanc94e3942005-09-27 12:12:42 -07007175 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007176 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7177 udelay(10);
7178 }
7179 tw32_f(MAC_RX_MODE, tp->rx_mode);
7180 udelay(10);
7181
7182 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7183 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7184 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7185 /* Set drive transmission level to 1.2V */
7186 /* only if the signal pre-emphasis bit is not set */
7187 val = tr32(MAC_SERDES_CFG);
7188 val &= 0xfffff000;
7189 val |= 0x880;
7190 tw32(MAC_SERDES_CFG, val);
7191 }
7192 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7193 tw32(MAC_SERDES_CFG, 0x616000);
7194 }
7195
7196 /* Prevent chip from dropping frames when flow control
7197 * is enabled.
7198 */
7199 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7200
7201 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7202 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7203 /* Use hardware link auto-negotiation */
7204 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7205 }
7206
Michael Chand4d2c552006-03-20 17:47:20 -08007207 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7208 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7209 u32 tmp;
7210
7211 tmp = tr32(SERDES_RX_CTRL);
7212 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7213 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7214 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7215 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7216 }
7217
Michael Chan36da4d82006-11-03 01:01:03 -08007218 err = tg3_setup_phy(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007219 if (err)
7220 return err;
7221
Michael Chan715116a2006-09-27 16:09:25 -07007222 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7223 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007224 u32 tmp;
7225
7226 /* Clear CRC stats. */
Michael Chan569a5df2007-02-13 12:18:15 -08007227 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7228 tg3_writephy(tp, MII_TG3_TEST1,
7229 tmp | MII_TG3_TEST1_CRC_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007230 tg3_readphy(tp, 0x14, &tmp);
7231 }
7232 }
7233
7234 __tg3_set_rx_mode(tp->dev);
7235
7236 /* Initialize receive rules. */
7237 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7238 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7239 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7240 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7241
Michael Chan4cf78e42005-07-25 12:29:19 -07007242 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
Michael Chana4e2b342005-10-26 15:46:52 -07007243 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007244 limit = 8;
7245 else
7246 limit = 16;
7247 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7248 limit -= 4;
7249 switch (limit) {
7250 case 16:
7251 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7252 case 15:
7253 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7254 case 14:
7255 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7256 case 13:
7257 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7258 case 12:
7259 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7260 case 11:
7261 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7262 case 10:
7263 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7264 case 9:
7265 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7266 case 8:
7267 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7268 case 7:
7269 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7270 case 6:
7271 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7272 case 5:
7273 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7274 case 4:
7275 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7276 case 3:
7277 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7278 case 2:
7279 case 1:
7280
7281 default:
7282 break;
7283 };
7284
Matt Carlson9ce768e2007-10-11 19:49:11 -07007285 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7286 /* Write our heartbeat update interval to APE. */
7287 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7288 APE_HOST_HEARTBEAT_INT_DISABLE);
Matt Carlson0d3031d2007-10-10 18:02:43 -07007289
Linus Torvalds1da177e2005-04-16 15:20:36 -07007290 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7291
Linus Torvalds1da177e2005-04-16 15:20:36 -07007292 return 0;
7293}
7294
7295/* Called at device open time to get the chip ready for
7296 * packet processing. Invoked with tp->lock held.
7297 */
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007298static int tg3_init_hw(struct tg3 *tp, int reset_phy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007299{
7300 int err;
7301
7302 /* Force the chip into D0. */
Michael Chanbc1c7562006-03-20 17:48:03 -08007303 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007304 if (err)
7305 goto out;
7306
7307 tg3_switch_clocks(tp);
7308
7309 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7310
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007311 err = tg3_reset_hw(tp, reset_phy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007312
7313out:
7314 return err;
7315}
7316
7317#define TG3_STAT_ADD32(PSTAT, REG) \
7318do { u32 __val = tr32(REG); \
7319 (PSTAT)->low += __val; \
7320 if ((PSTAT)->low < __val) \
7321 (PSTAT)->high += 1; \
7322} while (0)
7323
7324static void tg3_periodic_fetch_stats(struct tg3 *tp)
7325{
7326 struct tg3_hw_stats *sp = tp->hw_stats;
7327
7328 if (!netif_carrier_ok(tp->dev))
7329 return;
7330
7331 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7332 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7333 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7334 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7335 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7336 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7337 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7338 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7339 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7340 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7341 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7342 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7343 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7344
7345 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7346 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7347 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7348 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7349 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7350 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7351 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7352 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7353 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7354 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7355 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7356 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7357 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7358 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
Michael Chan463d3052006-05-22 16:36:27 -07007359
7360 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7361 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7362 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007363}
7364
7365static void tg3_timer(unsigned long __opaque)
7366{
7367 struct tg3 *tp = (struct tg3 *) __opaque;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007368
Michael Chanf475f162006-03-27 23:20:14 -08007369 if (tp->irq_sync)
7370 goto restart_timer;
7371
David S. Millerf47c11e2005-06-24 20:18:35 -07007372 spin_lock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007373
David S. Millerfac9b832005-05-18 22:46:34 -07007374 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7375 /* All of this garbage is because when using non-tagged
7376 * IRQ status the mailbox/status_block protocol the chip
7377 * uses with the cpu is race prone.
7378 */
7379 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7380 tw32(GRC_LOCAL_CTRL,
7381 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7382 } else {
7383 tw32(HOSTCC_MODE, tp->coalesce_mode |
7384 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7385 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007386
David S. Millerfac9b832005-05-18 22:46:34 -07007387 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7388 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
David S. Millerf47c11e2005-06-24 20:18:35 -07007389 spin_unlock(&tp->lock);
David S. Millerfac9b832005-05-18 22:46:34 -07007390 schedule_work(&tp->reset_task);
7391 return;
7392 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007393 }
7394
Linus Torvalds1da177e2005-04-16 15:20:36 -07007395 /* This part only runs once per second. */
7396 if (!--tp->timer_counter) {
David S. Millerfac9b832005-05-18 22:46:34 -07007397 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7398 tg3_periodic_fetch_stats(tp);
7399
Linus Torvalds1da177e2005-04-16 15:20:36 -07007400 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7401 u32 mac_stat;
7402 int phy_event;
7403
7404 mac_stat = tr32(MAC_STATUS);
7405
7406 phy_event = 0;
7407 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7408 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7409 phy_event = 1;
7410 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7411 phy_event = 1;
7412
7413 if (phy_event)
7414 tg3_setup_phy(tp, 0);
7415 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7416 u32 mac_stat = tr32(MAC_STATUS);
7417 int need_setup = 0;
7418
7419 if (netif_carrier_ok(tp->dev) &&
7420 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7421 need_setup = 1;
7422 }
7423 if (! netif_carrier_ok(tp->dev) &&
7424 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7425 MAC_STATUS_SIGNAL_DET))) {
7426 need_setup = 1;
7427 }
7428 if (need_setup) {
Michael Chan3d3ebe72006-09-27 15:59:15 -07007429 if (!tp->serdes_counter) {
7430 tw32_f(MAC_MODE,
7431 (tp->mac_mode &
7432 ~MAC_MODE_PORT_MODE_MASK));
7433 udelay(40);
7434 tw32_f(MAC_MODE, tp->mac_mode);
7435 udelay(40);
7436 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007437 tg3_setup_phy(tp, 0);
7438 }
Michael Chan747e8f82005-07-25 12:33:22 -07007439 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7440 tg3_serdes_parallel_detect(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007441
7442 tp->timer_counter = tp->timer_multiplier;
7443 }
7444
Michael Chan130b8e42006-09-27 16:00:40 -07007445 /* Heartbeat is only sent once every 2 seconds.
7446 *
7447 * The heartbeat is to tell the ASF firmware that the host
7448 * driver is still alive. In the event that the OS crashes,
7449 * ASF needs to reset the hardware to free up the FIFO space
7450 * that may be filled with rx packets destined for the host.
7451 * If the FIFO is full, ASF will no longer function properly.
7452 *
7453 * Unintended resets have been reported on real time kernels
7454 * where the timer doesn't run on time. Netpoll will also have
7455 * same problem.
7456 *
7457 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7458 * to check the ring condition when the heartbeat is expiring
7459 * before doing the reset. This will prevent most unintended
7460 * resets.
7461 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007462 if (!--tp->asf_counter) {
7463 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7464 u32 val;
7465
Matt Carlson7c5026a2008-05-02 16:49:29 -07007466 tg3_wait_for_event_ack(tp);
7467
Michael Chanbbadf502006-04-06 21:46:34 -07007468 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
Michael Chan130b8e42006-09-27 16:00:40 -07007469 FWCMD_NICDRV_ALIVE3);
Michael Chanbbadf502006-04-06 21:46:34 -07007470 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
Michael Chan28fbef72005-10-26 15:48:35 -07007471 /* 5 seconds timeout */
Michael Chanbbadf502006-04-06 21:46:34 -07007472 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007473 val = tr32(GRC_RX_CPU_EVENT);
Matt Carlson7c5026a2008-05-02 16:49:29 -07007474 val |= GRC_RX_CPU_DRIVER_EVENT;
7475 tw32_f(GRC_RX_CPU_EVENT, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007476 }
7477 tp->asf_counter = tp->asf_multiplier;
7478 }
7479
David S. Millerf47c11e2005-06-24 20:18:35 -07007480 spin_unlock(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007481
Michael Chanf475f162006-03-27 23:20:14 -08007482restart_timer:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007483 tp->timer.expires = jiffies + tp->timer_offset;
7484 add_timer(&tp->timer);
7485}
7486
Adrian Bunk81789ef2006-03-20 23:00:14 -08007487static int tg3_request_irq(struct tg3 *tp)
Michael Chanfcfa0a32006-03-20 22:28:41 -08007488{
David Howells7d12e782006-10-05 14:55:46 +01007489 irq_handler_t fn;
Michael Chanfcfa0a32006-03-20 22:28:41 -08007490 unsigned long flags;
7491 struct net_device *dev = tp->dev;
7492
7493 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7494 fn = tg3_msi;
7495 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7496 fn = tg3_msi_1shot;
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07007497 flags = IRQF_SAMPLE_RANDOM;
Michael Chanfcfa0a32006-03-20 22:28:41 -08007498 } else {
7499 fn = tg3_interrupt;
7500 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7501 fn = tg3_interrupt_tagged;
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07007502 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
Michael Chanfcfa0a32006-03-20 22:28:41 -08007503 }
7504 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7505}
7506
Michael Chan79381092005-04-21 17:13:59 -07007507static int tg3_test_interrupt(struct tg3 *tp)
7508{
7509 struct net_device *dev = tp->dev;
Michael Chanb16250e2006-09-27 16:10:14 -07007510 int err, i, intr_ok = 0;
Michael Chan79381092005-04-21 17:13:59 -07007511
Michael Chand4bc3922005-05-29 14:59:20 -07007512 if (!netif_running(dev))
7513 return -ENODEV;
7514
Michael Chan79381092005-04-21 17:13:59 -07007515 tg3_disable_ints(tp);
7516
7517 free_irq(tp->pdev->irq, dev);
7518
7519 err = request_irq(tp->pdev->irq, tg3_test_isr,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -07007520 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
Michael Chan79381092005-04-21 17:13:59 -07007521 if (err)
7522 return err;
7523
Michael Chan38f38432005-09-05 17:53:32 -07007524 tp->hw_status->status &= ~SD_STATUS_UPDATED;
Michael Chan79381092005-04-21 17:13:59 -07007525 tg3_enable_ints(tp);
7526
7527 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7528 HOSTCC_MODE_NOW);
7529
7530 for (i = 0; i < 5; i++) {
Michael Chanb16250e2006-09-27 16:10:14 -07007531 u32 int_mbox, misc_host_ctrl;
7532
Michael Chan09ee9292005-08-09 20:17:00 -07007533 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7534 TG3_64BIT_REG_LOW);
Michael Chanb16250e2006-09-27 16:10:14 -07007535 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7536
7537 if ((int_mbox != 0) ||
7538 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7539 intr_ok = 1;
Michael Chan79381092005-04-21 17:13:59 -07007540 break;
Michael Chanb16250e2006-09-27 16:10:14 -07007541 }
7542
Michael Chan79381092005-04-21 17:13:59 -07007543 msleep(10);
7544 }
7545
7546 tg3_disable_ints(tp);
7547
7548 free_irq(tp->pdev->irq, dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04007549
Michael Chanfcfa0a32006-03-20 22:28:41 -08007550 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07007551
7552 if (err)
7553 return err;
7554
Michael Chanb16250e2006-09-27 16:10:14 -07007555 if (intr_ok)
Michael Chan79381092005-04-21 17:13:59 -07007556 return 0;
7557
7558 return -EIO;
7559}
7560
7561/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7562 * successfully restored
7563 */
7564static int tg3_test_msi(struct tg3 *tp)
7565{
7566 struct net_device *dev = tp->dev;
7567 int err;
7568 u16 pci_cmd;
7569
7570 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7571 return 0;
7572
7573 /* Turn off SERR reporting in case MSI terminates with Master
7574 * Abort.
7575 */
7576 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7577 pci_write_config_word(tp->pdev, PCI_COMMAND,
7578 pci_cmd & ~PCI_COMMAND_SERR);
7579
7580 err = tg3_test_interrupt(tp);
7581
7582 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7583
7584 if (!err)
7585 return 0;
7586
7587 /* other failures */
7588 if (err != -EIO)
7589 return err;
7590
7591 /* MSI test failed, go back to INTx mode */
7592 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7593 "switching to INTx mode. Please report this failure to "
7594 "the PCI maintainer and include system chipset information.\n",
7595 tp->dev->name);
7596
7597 free_irq(tp->pdev->irq, dev);
7598 pci_disable_msi(tp->pdev);
7599
7600 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7601
Michael Chanfcfa0a32006-03-20 22:28:41 -08007602 err = tg3_request_irq(tp);
Michael Chan79381092005-04-21 17:13:59 -07007603 if (err)
7604 return err;
7605
7606 /* Need to reset the chip because the MSI cycle may have terminated
7607 * with Master Abort.
7608 */
David S. Millerf47c11e2005-06-24 20:18:35 -07007609 tg3_full_lock(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07007610
Michael Chan944d9802005-05-29 14:57:48 -07007611 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007612 err = tg3_init_hw(tp, 1);
Michael Chan79381092005-04-21 17:13:59 -07007613
David S. Millerf47c11e2005-06-24 20:18:35 -07007614 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07007615
7616 if (err)
7617 free_irq(tp->pdev->irq, dev);
7618
7619 return err;
7620}
7621
Linus Torvalds1da177e2005-04-16 15:20:36 -07007622static int tg3_open(struct net_device *dev)
7623{
7624 struct tg3 *tp = netdev_priv(dev);
7625 int err;
7626
Michael Chanc49a1562006-12-17 17:07:29 -08007627 netif_carrier_off(tp->dev);
7628
David S. Millerf47c11e2005-06-24 20:18:35 -07007629 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007630
Michael Chanbc1c7562006-03-20 17:48:03 -08007631 err = tg3_set_power_state(tp, PCI_D0);
Ira W. Snyder12862082006-11-21 17:44:31 -08007632 if (err) {
7633 tg3_full_unlock(tp);
Michael Chanbc1c7562006-03-20 17:48:03 -08007634 return err;
Ira W. Snyder12862082006-11-21 17:44:31 -08007635 }
Michael Chanbc1c7562006-03-20 17:48:03 -08007636
Linus Torvalds1da177e2005-04-16 15:20:36 -07007637 tg3_disable_ints(tp);
7638 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7639
David S. Millerf47c11e2005-06-24 20:18:35 -07007640 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007641
7642 /* The placement of this call is tied
7643 * to the setup and use of Host TX descriptors.
7644 */
7645 err = tg3_alloc_consistent(tp);
7646 if (err)
7647 return err;
7648
Michael Chan7544b092007-05-05 13:08:32 -07007649 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
David S. Millerfac9b832005-05-18 22:46:34 -07007650 /* All MSI supporting chips should support tagged
7651 * status. Assert that this is the case.
7652 */
7653 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7654 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7655 "Not using MSI.\n", tp->dev->name);
7656 } else if (pci_enable_msi(tp->pdev) == 0) {
Michael Chan88b06bc2005-04-21 17:13:25 -07007657 u32 msi_mode;
7658
7659 msi_mode = tr32(MSGINT_MODE);
7660 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7661 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7662 }
7663 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08007664 err = tg3_request_irq(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007665
7666 if (err) {
Michael Chan88b06bc2005-04-21 17:13:25 -07007667 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7668 pci_disable_msi(tp->pdev);
7669 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7670 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007671 tg3_free_consistent(tp);
7672 return err;
7673 }
7674
Stephen Hemmingerbea33482007-10-03 16:41:36 -07007675 napi_enable(&tp->napi);
7676
David S. Millerf47c11e2005-06-24 20:18:35 -07007677 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007678
Gary Zambrano8e7a22e2006-04-29 18:59:13 -07007679 err = tg3_init_hw(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007680 if (err) {
Michael Chan944d9802005-05-29 14:57:48 -07007681 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007682 tg3_free_rings(tp);
7683 } else {
David S. Millerfac9b832005-05-18 22:46:34 -07007684 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7685 tp->timer_offset = HZ;
7686 else
7687 tp->timer_offset = HZ / 10;
7688
7689 BUG_ON(tp->timer_offset > HZ);
7690 tp->timer_counter = tp->timer_multiplier =
7691 (HZ / tp->timer_offset);
7692 tp->asf_counter = tp->asf_multiplier =
Michael Chan28fbef72005-10-26 15:48:35 -07007693 ((HZ / tp->timer_offset) * 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007694
7695 init_timer(&tp->timer);
7696 tp->timer.expires = jiffies + tp->timer_offset;
7697 tp->timer.data = (unsigned long) tp;
7698 tp->timer.function = tg3_timer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007699 }
7700
David S. Millerf47c11e2005-06-24 20:18:35 -07007701 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007702
7703 if (err) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07007704 napi_disable(&tp->napi);
Michael Chan88b06bc2005-04-21 17:13:25 -07007705 free_irq(tp->pdev->irq, dev);
7706 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7707 pci_disable_msi(tp->pdev);
7708 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7709 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007710 tg3_free_consistent(tp);
7711 return err;
7712 }
7713
Michael Chan79381092005-04-21 17:13:59 -07007714 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7715 err = tg3_test_msi(tp);
David S. Millerfac9b832005-05-18 22:46:34 -07007716
Michael Chan79381092005-04-21 17:13:59 -07007717 if (err) {
David S. Millerf47c11e2005-06-24 20:18:35 -07007718 tg3_full_lock(tp, 0);
Michael Chan79381092005-04-21 17:13:59 -07007719
7720 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7721 pci_disable_msi(tp->pdev);
7722 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7723 }
Michael Chan944d9802005-05-29 14:57:48 -07007724 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan79381092005-04-21 17:13:59 -07007725 tg3_free_rings(tp);
7726 tg3_free_consistent(tp);
7727
David S. Millerf47c11e2005-06-24 20:18:35 -07007728 tg3_full_unlock(tp);
Michael Chan79381092005-04-21 17:13:59 -07007729
Stephen Hemmingerbea33482007-10-03 16:41:36 -07007730 napi_disable(&tp->napi);
7731
Michael Chan79381092005-04-21 17:13:59 -07007732 return err;
7733 }
Michael Chanfcfa0a32006-03-20 22:28:41 -08007734
7735 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7736 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
Michael Chanb5d37722006-09-27 16:06:21 -07007737 u32 val = tr32(PCIE_TRANSACTION_CFG);
Michael Chanfcfa0a32006-03-20 22:28:41 -08007738
Michael Chanb5d37722006-09-27 16:06:21 -07007739 tw32(PCIE_TRANSACTION_CFG,
7740 val | PCIE_TRANS_CFG_1SHOT_MSI);
Michael Chanfcfa0a32006-03-20 22:28:41 -08007741 }
7742 }
Michael Chan79381092005-04-21 17:13:59 -07007743 }
7744
David S. Millerf47c11e2005-06-24 20:18:35 -07007745 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007746
Michael Chan79381092005-04-21 17:13:59 -07007747 add_timer(&tp->timer);
7748 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007749 tg3_enable_ints(tp);
7750
David S. Millerf47c11e2005-06-24 20:18:35 -07007751 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007752
7753 netif_start_queue(dev);
7754
7755 return 0;
7756}
7757
7758#if 0
7759/*static*/ void tg3_dump_state(struct tg3 *tp)
7760{
7761 u32 val32, val32_2, val32_3, val32_4, val32_5;
7762 u16 val16;
7763 int i;
7764
7765 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7766 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7767 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7768 val16, val32);
7769
7770 /* MAC block */
7771 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7772 tr32(MAC_MODE), tr32(MAC_STATUS));
7773 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7774 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7775 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7776 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7777 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7778 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7779
7780 /* Send data initiator control block */
7781 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7782 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7783 printk(" SNDDATAI_STATSCTRL[%08x]\n",
7784 tr32(SNDDATAI_STATSCTRL));
7785
7786 /* Send data completion control block */
7787 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7788
7789 /* Send BD ring selector block */
7790 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7791 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7792
7793 /* Send BD initiator control block */
7794 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7795 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7796
7797 /* Send BD completion control block */
7798 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7799
7800 /* Receive list placement control block */
7801 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7802 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7803 printk(" RCVLPC_STATSCTRL[%08x]\n",
7804 tr32(RCVLPC_STATSCTRL));
7805
7806 /* Receive data and receive BD initiator control block */
7807 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7808 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7809
7810 /* Receive data completion control block */
7811 printk("DEBUG: RCVDCC_MODE[%08x]\n",
7812 tr32(RCVDCC_MODE));
7813
7814 /* Receive BD initiator control block */
7815 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7816 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7817
7818 /* Receive BD completion control block */
7819 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7820 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7821
7822 /* Receive list selector control block */
7823 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7824 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7825
7826 /* Mbuf cluster free block */
7827 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7828 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7829
7830 /* Host coalescing control block */
7831 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7832 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7833 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7834 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7835 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7836 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7837 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7838 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7839 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7840 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7841 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7842 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7843
7844 /* Memory arbiter control block */
7845 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7846 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7847
7848 /* Buffer manager control block */
7849 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7850 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7851 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7852 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7853 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7854 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7855 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7856 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7857
7858 /* Read DMA control block */
7859 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7860 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7861
7862 /* Write DMA control block */
7863 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7864 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7865
7866 /* DMA completion block */
7867 printk("DEBUG: DMAC_MODE[%08x]\n",
7868 tr32(DMAC_MODE));
7869
7870 /* GRC block */
7871 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7872 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7873 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7874 tr32(GRC_LOCAL_CTRL));
7875
7876 /* TG3_BDINFOs */
7877 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7878 tr32(RCVDBDI_JUMBO_BD + 0x0),
7879 tr32(RCVDBDI_JUMBO_BD + 0x4),
7880 tr32(RCVDBDI_JUMBO_BD + 0x8),
7881 tr32(RCVDBDI_JUMBO_BD + 0xc));
7882 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7883 tr32(RCVDBDI_STD_BD + 0x0),
7884 tr32(RCVDBDI_STD_BD + 0x4),
7885 tr32(RCVDBDI_STD_BD + 0x8),
7886 tr32(RCVDBDI_STD_BD + 0xc));
7887 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7888 tr32(RCVDBDI_MINI_BD + 0x0),
7889 tr32(RCVDBDI_MINI_BD + 0x4),
7890 tr32(RCVDBDI_MINI_BD + 0x8),
7891 tr32(RCVDBDI_MINI_BD + 0xc));
7892
7893 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7894 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7895 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7896 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7897 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7898 val32, val32_2, val32_3, val32_4);
7899
7900 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7901 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7902 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7903 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7904 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7905 val32, val32_2, val32_3, val32_4);
7906
7907 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7908 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7909 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7910 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7911 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7912 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7913 val32, val32_2, val32_3, val32_4, val32_5);
7914
7915 /* SW status block */
7916 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7917 tp->hw_status->status,
7918 tp->hw_status->status_tag,
7919 tp->hw_status->rx_jumbo_consumer,
7920 tp->hw_status->rx_consumer,
7921 tp->hw_status->rx_mini_consumer,
7922 tp->hw_status->idx[0].rx_producer,
7923 tp->hw_status->idx[0].tx_consumer);
7924
7925 /* SW statistics block */
7926 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7927 ((u32 *)tp->hw_stats)[0],
7928 ((u32 *)tp->hw_stats)[1],
7929 ((u32 *)tp->hw_stats)[2],
7930 ((u32 *)tp->hw_stats)[3]);
7931
7932 /* Mailboxes */
7933 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
Michael Chan09ee9292005-08-09 20:17:00 -07007934 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7935 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7936 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7937 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007938
7939 /* NIC side send descriptors. */
7940 for (i = 0; i < 6; i++) {
7941 unsigned long txd;
7942
7943 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7944 + (i * sizeof(struct tg3_tx_buffer_desc));
7945 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7946 i,
7947 readl(txd + 0x0), readl(txd + 0x4),
7948 readl(txd + 0x8), readl(txd + 0xc));
7949 }
7950
7951 /* NIC side RX descriptors. */
7952 for (i = 0; i < 6; i++) {
7953 unsigned long rxd;
7954
7955 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7956 + (i * sizeof(struct tg3_rx_buffer_desc));
7957 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7958 i,
7959 readl(rxd + 0x0), readl(rxd + 0x4),
7960 readl(rxd + 0x8), readl(rxd + 0xc));
7961 rxd += (4 * sizeof(u32));
7962 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7963 i,
7964 readl(rxd + 0x0), readl(rxd + 0x4),
7965 readl(rxd + 0x8), readl(rxd + 0xc));
7966 }
7967
7968 for (i = 0; i < 6; i++) {
7969 unsigned long rxd;
7970
7971 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7972 + (i * sizeof(struct tg3_rx_buffer_desc));
7973 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7974 i,
7975 readl(rxd + 0x0), readl(rxd + 0x4),
7976 readl(rxd + 0x8), readl(rxd + 0xc));
7977 rxd += (4 * sizeof(u32));
7978 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7979 i,
7980 readl(rxd + 0x0), readl(rxd + 0x4),
7981 readl(rxd + 0x8), readl(rxd + 0xc));
7982 }
7983}
7984#endif
7985
7986static struct net_device_stats *tg3_get_stats(struct net_device *);
7987static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7988
7989static int tg3_close(struct net_device *dev)
7990{
7991 struct tg3 *tp = netdev_priv(dev);
7992
Stephen Hemmingerbea33482007-10-03 16:41:36 -07007993 napi_disable(&tp->napi);
Oleg Nesterov28e53bd2007-05-09 02:34:22 -07007994 cancel_work_sync(&tp->reset_task);
Michael Chan7faa0062006-02-02 17:29:28 -08007995
Linus Torvalds1da177e2005-04-16 15:20:36 -07007996 netif_stop_queue(dev);
7997
7998 del_timer_sync(&tp->timer);
7999
David S. Millerf47c11e2005-06-24 20:18:35 -07008000 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008001#if 0
8002 tg3_dump_state(tp);
8003#endif
8004
8005 tg3_disable_ints(tp);
8006
Michael Chan944d9802005-05-29 14:57:48 -07008007 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008008 tg3_free_rings(tp);
Michael Chan5cf64b82007-05-05 12:11:21 -07008009 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008010
David S. Millerf47c11e2005-06-24 20:18:35 -07008011 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008012
Michael Chan88b06bc2005-04-21 17:13:25 -07008013 free_irq(tp->pdev->irq, dev);
8014 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8015 pci_disable_msi(tp->pdev);
8016 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8017 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008018
8019 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8020 sizeof(tp->net_stats_prev));
8021 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8022 sizeof(tp->estats_prev));
8023
8024 tg3_free_consistent(tp);
8025
Michael Chanbc1c7562006-03-20 17:48:03 -08008026 tg3_set_power_state(tp, PCI_D3hot);
8027
8028 netif_carrier_off(tp->dev);
8029
Linus Torvalds1da177e2005-04-16 15:20:36 -07008030 return 0;
8031}
8032
8033static inline unsigned long get_stat64(tg3_stat64_t *val)
8034{
8035 unsigned long ret;
8036
8037#if (BITS_PER_LONG == 32)
8038 ret = val->low;
8039#else
8040 ret = ((u64)val->high << 32) | ((u64)val->low);
8041#endif
8042 return ret;
8043}
8044
8045static unsigned long calc_crc_errors(struct tg3 *tp)
8046{
8047 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8048
8049 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8050 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8051 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008052 u32 val;
8053
David S. Millerf47c11e2005-06-24 20:18:35 -07008054 spin_lock_bh(&tp->lock);
Michael Chan569a5df2007-02-13 12:18:15 -08008055 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8056 tg3_writephy(tp, MII_TG3_TEST1,
8057 val | MII_TG3_TEST1_CRC_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008058 tg3_readphy(tp, 0x14, &val);
8059 } else
8060 val = 0;
David S. Millerf47c11e2005-06-24 20:18:35 -07008061 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008062
8063 tp->phy_crc_errors += val;
8064
8065 return tp->phy_crc_errors;
8066 }
8067
8068 return get_stat64(&hw_stats->rx_fcs_errors);
8069}
8070
8071#define ESTAT_ADD(member) \
8072 estats->member = old_estats->member + \
8073 get_stat64(&hw_stats->member)
8074
8075static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8076{
8077 struct tg3_ethtool_stats *estats = &tp->estats;
8078 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8079 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8080
8081 if (!hw_stats)
8082 return old_estats;
8083
8084 ESTAT_ADD(rx_octets);
8085 ESTAT_ADD(rx_fragments);
8086 ESTAT_ADD(rx_ucast_packets);
8087 ESTAT_ADD(rx_mcast_packets);
8088 ESTAT_ADD(rx_bcast_packets);
8089 ESTAT_ADD(rx_fcs_errors);
8090 ESTAT_ADD(rx_align_errors);
8091 ESTAT_ADD(rx_xon_pause_rcvd);
8092 ESTAT_ADD(rx_xoff_pause_rcvd);
8093 ESTAT_ADD(rx_mac_ctrl_rcvd);
8094 ESTAT_ADD(rx_xoff_entered);
8095 ESTAT_ADD(rx_frame_too_long_errors);
8096 ESTAT_ADD(rx_jabbers);
8097 ESTAT_ADD(rx_undersize_packets);
8098 ESTAT_ADD(rx_in_length_errors);
8099 ESTAT_ADD(rx_out_length_errors);
8100 ESTAT_ADD(rx_64_or_less_octet_packets);
8101 ESTAT_ADD(rx_65_to_127_octet_packets);
8102 ESTAT_ADD(rx_128_to_255_octet_packets);
8103 ESTAT_ADD(rx_256_to_511_octet_packets);
8104 ESTAT_ADD(rx_512_to_1023_octet_packets);
8105 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8106 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8107 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8108 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8109 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8110
8111 ESTAT_ADD(tx_octets);
8112 ESTAT_ADD(tx_collisions);
8113 ESTAT_ADD(tx_xon_sent);
8114 ESTAT_ADD(tx_xoff_sent);
8115 ESTAT_ADD(tx_flow_control);
8116 ESTAT_ADD(tx_mac_errors);
8117 ESTAT_ADD(tx_single_collisions);
8118 ESTAT_ADD(tx_mult_collisions);
8119 ESTAT_ADD(tx_deferred);
8120 ESTAT_ADD(tx_excessive_collisions);
8121 ESTAT_ADD(tx_late_collisions);
8122 ESTAT_ADD(tx_collide_2times);
8123 ESTAT_ADD(tx_collide_3times);
8124 ESTAT_ADD(tx_collide_4times);
8125 ESTAT_ADD(tx_collide_5times);
8126 ESTAT_ADD(tx_collide_6times);
8127 ESTAT_ADD(tx_collide_7times);
8128 ESTAT_ADD(tx_collide_8times);
8129 ESTAT_ADD(tx_collide_9times);
8130 ESTAT_ADD(tx_collide_10times);
8131 ESTAT_ADD(tx_collide_11times);
8132 ESTAT_ADD(tx_collide_12times);
8133 ESTAT_ADD(tx_collide_13times);
8134 ESTAT_ADD(tx_collide_14times);
8135 ESTAT_ADD(tx_collide_15times);
8136 ESTAT_ADD(tx_ucast_packets);
8137 ESTAT_ADD(tx_mcast_packets);
8138 ESTAT_ADD(tx_bcast_packets);
8139 ESTAT_ADD(tx_carrier_sense_errors);
8140 ESTAT_ADD(tx_discards);
8141 ESTAT_ADD(tx_errors);
8142
8143 ESTAT_ADD(dma_writeq_full);
8144 ESTAT_ADD(dma_write_prioq_full);
8145 ESTAT_ADD(rxbds_empty);
8146 ESTAT_ADD(rx_discards);
8147 ESTAT_ADD(rx_errors);
8148 ESTAT_ADD(rx_threshold_hit);
8149
8150 ESTAT_ADD(dma_readq_full);
8151 ESTAT_ADD(dma_read_prioq_full);
8152 ESTAT_ADD(tx_comp_queue_full);
8153
8154 ESTAT_ADD(ring_set_send_prod_index);
8155 ESTAT_ADD(ring_status_update);
8156 ESTAT_ADD(nic_irqs);
8157 ESTAT_ADD(nic_avoided_irqs);
8158 ESTAT_ADD(nic_tx_threshold_hit);
8159
8160 return estats;
8161}
8162
8163static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8164{
8165 struct tg3 *tp = netdev_priv(dev);
8166 struct net_device_stats *stats = &tp->net_stats;
8167 struct net_device_stats *old_stats = &tp->net_stats_prev;
8168 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8169
8170 if (!hw_stats)
8171 return old_stats;
8172
8173 stats->rx_packets = old_stats->rx_packets +
8174 get_stat64(&hw_stats->rx_ucast_packets) +
8175 get_stat64(&hw_stats->rx_mcast_packets) +
8176 get_stat64(&hw_stats->rx_bcast_packets);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008177
Linus Torvalds1da177e2005-04-16 15:20:36 -07008178 stats->tx_packets = old_stats->tx_packets +
8179 get_stat64(&hw_stats->tx_ucast_packets) +
8180 get_stat64(&hw_stats->tx_mcast_packets) +
8181 get_stat64(&hw_stats->tx_bcast_packets);
8182
8183 stats->rx_bytes = old_stats->rx_bytes +
8184 get_stat64(&hw_stats->rx_octets);
8185 stats->tx_bytes = old_stats->tx_bytes +
8186 get_stat64(&hw_stats->tx_octets);
8187
8188 stats->rx_errors = old_stats->rx_errors +
John W. Linville4f63b872005-09-12 14:43:18 -07008189 get_stat64(&hw_stats->rx_errors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008190 stats->tx_errors = old_stats->tx_errors +
8191 get_stat64(&hw_stats->tx_errors) +
8192 get_stat64(&hw_stats->tx_mac_errors) +
8193 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8194 get_stat64(&hw_stats->tx_discards);
8195
8196 stats->multicast = old_stats->multicast +
8197 get_stat64(&hw_stats->rx_mcast_packets);
8198 stats->collisions = old_stats->collisions +
8199 get_stat64(&hw_stats->tx_collisions);
8200
8201 stats->rx_length_errors = old_stats->rx_length_errors +
8202 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8203 get_stat64(&hw_stats->rx_undersize_packets);
8204
8205 stats->rx_over_errors = old_stats->rx_over_errors +
8206 get_stat64(&hw_stats->rxbds_empty);
8207 stats->rx_frame_errors = old_stats->rx_frame_errors +
8208 get_stat64(&hw_stats->rx_align_errors);
8209 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8210 get_stat64(&hw_stats->tx_discards);
8211 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8212 get_stat64(&hw_stats->tx_carrier_sense_errors);
8213
8214 stats->rx_crc_errors = old_stats->rx_crc_errors +
8215 calc_crc_errors(tp);
8216
John W. Linville4f63b872005-09-12 14:43:18 -07008217 stats->rx_missed_errors = old_stats->rx_missed_errors +
8218 get_stat64(&hw_stats->rx_discards);
8219
Linus Torvalds1da177e2005-04-16 15:20:36 -07008220 return stats;
8221}
8222
8223static inline u32 calc_crc(unsigned char *buf, int len)
8224{
8225 u32 reg;
8226 u32 tmp;
8227 int j, k;
8228
8229 reg = 0xffffffff;
8230
8231 for (j = 0; j < len; j++) {
8232 reg ^= buf[j];
8233
8234 for (k = 0; k < 8; k++) {
8235 tmp = reg & 0x01;
8236
8237 reg >>= 1;
8238
8239 if (tmp) {
8240 reg ^= 0xedb88320;
8241 }
8242 }
8243 }
8244
8245 return ~reg;
8246}
8247
8248static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8249{
8250 /* accept or reject all multicast frames */
8251 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8252 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8253 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8254 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8255}
8256
8257static void __tg3_set_rx_mode(struct net_device *dev)
8258{
8259 struct tg3 *tp = netdev_priv(dev);
8260 u32 rx_mode;
8261
8262 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8263 RX_MODE_KEEP_VLAN_TAG);
8264
8265 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8266 * flag clear.
8267 */
8268#if TG3_VLAN_TAG_USED
8269 if (!tp->vlgrp &&
8270 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8271 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8272#else
8273 /* By definition, VLAN is disabled always in this
8274 * case.
8275 */
8276 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8277 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8278#endif
8279
8280 if (dev->flags & IFF_PROMISC) {
8281 /* Promiscuous mode. */
8282 rx_mode |= RX_MODE_PROMISC;
8283 } else if (dev->flags & IFF_ALLMULTI) {
8284 /* Accept all multicast. */
8285 tg3_set_multi (tp, 1);
8286 } else if (dev->mc_count < 1) {
8287 /* Reject all multicast. */
8288 tg3_set_multi (tp, 0);
8289 } else {
8290 /* Accept one or more multicast(s). */
8291 struct dev_mc_list *mclist;
8292 unsigned int i;
8293 u32 mc_filter[4] = { 0, };
8294 u32 regidx;
8295 u32 bit;
8296 u32 crc;
8297
8298 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8299 i++, mclist = mclist->next) {
8300
8301 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8302 bit = ~crc & 0x7f;
8303 regidx = (bit & 0x60) >> 5;
8304 bit &= 0x1f;
8305 mc_filter[regidx] |= (1 << bit);
8306 }
8307
8308 tw32(MAC_HASH_REG_0, mc_filter[0]);
8309 tw32(MAC_HASH_REG_1, mc_filter[1]);
8310 tw32(MAC_HASH_REG_2, mc_filter[2]);
8311 tw32(MAC_HASH_REG_3, mc_filter[3]);
8312 }
8313
8314 if (rx_mode != tp->rx_mode) {
8315 tp->rx_mode = rx_mode;
8316 tw32_f(MAC_RX_MODE, rx_mode);
8317 udelay(10);
8318 }
8319}
8320
8321static void tg3_set_rx_mode(struct net_device *dev)
8322{
8323 struct tg3 *tp = netdev_priv(dev);
8324
Michael Chane75f7c92006-03-20 21:33:26 -08008325 if (!netif_running(dev))
8326 return;
8327
David S. Millerf47c11e2005-06-24 20:18:35 -07008328 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008329 __tg3_set_rx_mode(dev);
David S. Millerf47c11e2005-06-24 20:18:35 -07008330 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008331}
8332
8333#define TG3_REGDUMP_LEN (32 * 1024)
8334
8335static int tg3_get_regs_len(struct net_device *dev)
8336{
8337 return TG3_REGDUMP_LEN;
8338}
8339
8340static void tg3_get_regs(struct net_device *dev,
8341 struct ethtool_regs *regs, void *_p)
8342{
8343 u32 *p = _p;
8344 struct tg3 *tp = netdev_priv(dev);
8345 u8 *orig_p = _p;
8346 int i;
8347
8348 regs->version = 0;
8349
8350 memset(p, 0, TG3_REGDUMP_LEN);
8351
Michael Chanbc1c7562006-03-20 17:48:03 -08008352 if (tp->link_config.phy_is_low_power)
8353 return;
8354
David S. Millerf47c11e2005-06-24 20:18:35 -07008355 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008356
8357#define __GET_REG32(reg) (*(p)++ = tr32(reg))
8358#define GET_REG32_LOOP(base,len) \
8359do { p = (u32 *)(orig_p + (base)); \
8360 for (i = 0; i < len; i += 4) \
8361 __GET_REG32((base) + i); \
8362} while (0)
8363#define GET_REG32_1(reg) \
8364do { p = (u32 *)(orig_p + (reg)); \
8365 __GET_REG32((reg)); \
8366} while (0)
8367
8368 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8369 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8370 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8371 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8372 GET_REG32_1(SNDDATAC_MODE);
8373 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8374 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8375 GET_REG32_1(SNDBDC_MODE);
8376 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8377 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8378 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8379 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8380 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8381 GET_REG32_1(RCVDCC_MODE);
8382 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8383 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8384 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8385 GET_REG32_1(MBFREE_MODE);
8386 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8387 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8388 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8389 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8390 GET_REG32_LOOP(WDMAC_MODE, 0x08);
Chris Elmquist091465d2005-12-20 13:25:19 -08008391 GET_REG32_1(RX_CPU_MODE);
8392 GET_REG32_1(RX_CPU_STATE);
8393 GET_REG32_1(RX_CPU_PGMCTR);
8394 GET_REG32_1(RX_CPU_HWBKPT);
8395 GET_REG32_1(TX_CPU_MODE);
8396 GET_REG32_1(TX_CPU_STATE);
8397 GET_REG32_1(TX_CPU_PGMCTR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008398 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8399 GET_REG32_LOOP(FTQ_RESET, 0x120);
8400 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8401 GET_REG32_1(DMAC_MODE);
8402 GET_REG32_LOOP(GRC_MODE, 0x4c);
8403 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8404 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8405
8406#undef __GET_REG32
8407#undef GET_REG32_LOOP
8408#undef GET_REG32_1
8409
David S. Millerf47c11e2005-06-24 20:18:35 -07008410 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008411}
8412
8413static int tg3_get_eeprom_len(struct net_device *dev)
8414{
8415 struct tg3 *tp = netdev_priv(dev);
8416
8417 return tp->nvram_size;
8418}
8419
8420static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
Al Virob9fc7dc2007-12-17 22:59:57 -08008421static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
Michael Chan18201802006-03-20 22:29:15 -08008422static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008423
8424static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8425{
8426 struct tg3 *tp = netdev_priv(dev);
8427 int ret;
8428 u8 *pd;
Al Virob9fc7dc2007-12-17 22:59:57 -08008429 u32 i, offset, len, b_offset, b_count;
8430 __le32 val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008431
Michael Chanbc1c7562006-03-20 17:48:03 -08008432 if (tp->link_config.phy_is_low_power)
8433 return -EAGAIN;
8434
Linus Torvalds1da177e2005-04-16 15:20:36 -07008435 offset = eeprom->offset;
8436 len = eeprom->len;
8437 eeprom->len = 0;
8438
8439 eeprom->magic = TG3_EEPROM_MAGIC;
8440
8441 if (offset & 3) {
8442 /* adjustments to start on required 4 byte boundary */
8443 b_offset = offset & 3;
8444 b_count = 4 - b_offset;
8445 if (b_count > len) {
8446 /* i.e. offset=1 len=2 */
8447 b_count = len;
8448 }
Al Virob9fc7dc2007-12-17 22:59:57 -08008449 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008450 if (ret)
8451 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008452 memcpy(data, ((char*)&val) + b_offset, b_count);
8453 len -= b_count;
8454 offset += b_count;
8455 eeprom->len += b_count;
8456 }
8457
8458 /* read bytes upto the last 4 byte boundary */
8459 pd = &data[eeprom->len];
8460 for (i = 0; i < (len - (len & 3)); i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -08008461 ret = tg3_nvram_read_le(tp, offset + i, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008462 if (ret) {
8463 eeprom->len += i;
8464 return ret;
8465 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008466 memcpy(pd + i, &val, 4);
8467 }
8468 eeprom->len += i;
8469
8470 if (len & 3) {
8471 /* read last bytes not ending on 4 byte boundary */
8472 pd = &data[eeprom->len];
8473 b_count = len & 3;
8474 b_offset = offset + len - b_count;
Al Virob9fc7dc2007-12-17 22:59:57 -08008475 ret = tg3_nvram_read_le(tp, b_offset, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008476 if (ret)
8477 return ret;
Al Virob9fc7dc2007-12-17 22:59:57 -08008478 memcpy(pd, &val, b_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008479 eeprom->len += b_count;
8480 }
8481 return 0;
8482}
8483
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008484static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008485
8486static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8487{
8488 struct tg3 *tp = netdev_priv(dev);
8489 int ret;
Al Virob9fc7dc2007-12-17 22:59:57 -08008490 u32 offset, len, b_offset, odd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008491 u8 *buf;
Al Virob9fc7dc2007-12-17 22:59:57 -08008492 __le32 start, end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008493
Michael Chanbc1c7562006-03-20 17:48:03 -08008494 if (tp->link_config.phy_is_low_power)
8495 return -EAGAIN;
8496
Linus Torvalds1da177e2005-04-16 15:20:36 -07008497 if (eeprom->magic != TG3_EEPROM_MAGIC)
8498 return -EINVAL;
8499
8500 offset = eeprom->offset;
8501 len = eeprom->len;
8502
8503 if ((b_offset = (offset & 3))) {
8504 /* adjustments to start on required 4 byte boundary */
Al Virob9fc7dc2007-12-17 22:59:57 -08008505 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008506 if (ret)
8507 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008508 len += b_offset;
8509 offset &= ~3;
Michael Chan1c8594b2005-04-21 17:12:46 -07008510 if (len < 4)
8511 len = 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008512 }
8513
8514 odd_len = 0;
Michael Chan1c8594b2005-04-21 17:12:46 -07008515 if (len & 3) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008516 /* adjustments to end on required 4 byte boundary */
8517 odd_len = 1;
8518 len = (len + 3) & ~3;
Al Virob9fc7dc2007-12-17 22:59:57 -08008519 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008520 if (ret)
8521 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008522 }
8523
8524 buf = data;
8525 if (b_offset || odd_len) {
8526 buf = kmalloc(len, GFP_KERNEL);
Andy Gospodarekab0049b2007-09-06 20:42:14 +01008527 if (!buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008528 return -ENOMEM;
8529 if (b_offset)
8530 memcpy(buf, &start, 4);
8531 if (odd_len)
8532 memcpy(buf+len-4, &end, 4);
8533 memcpy(buf + b_offset, data, eeprom->len);
8534 }
8535
8536 ret = tg3_nvram_write_block(tp, offset, len, buf);
8537
8538 if (buf != data)
8539 kfree(buf);
8540
8541 return ret;
8542}
8543
8544static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8545{
8546 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008547
Linus Torvalds1da177e2005-04-16 15:20:36 -07008548 cmd->supported = (SUPPORTED_Autoneg);
8549
8550 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8551 cmd->supported |= (SUPPORTED_1000baseT_Half |
8552 SUPPORTED_1000baseT_Full);
8553
Karsten Keilef348142006-05-12 12:49:08 -07008554 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008555 cmd->supported |= (SUPPORTED_100baseT_Half |
8556 SUPPORTED_100baseT_Full |
8557 SUPPORTED_10baseT_Half |
8558 SUPPORTED_10baseT_Full |
Matt Carlson3bebab52007-11-12 21:22:40 -08008559 SUPPORTED_TP);
Karsten Keilef348142006-05-12 12:49:08 -07008560 cmd->port = PORT_TP;
8561 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008562 cmd->supported |= SUPPORTED_FIBRE;
Karsten Keilef348142006-05-12 12:49:08 -07008563 cmd->port = PORT_FIBRE;
8564 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008565
Linus Torvalds1da177e2005-04-16 15:20:36 -07008566 cmd->advertising = tp->link_config.advertising;
8567 if (netif_running(dev)) {
8568 cmd->speed = tp->link_config.active_speed;
8569 cmd->duplex = tp->link_config.active_duplex;
8570 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008571 cmd->phy_address = PHY_ADDR;
8572 cmd->transceiver = 0;
8573 cmd->autoneg = tp->link_config.autoneg;
8574 cmd->maxtxpkt = 0;
8575 cmd->maxrxpkt = 0;
8576 return 0;
8577}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008578
Linus Torvalds1da177e2005-04-16 15:20:36 -07008579static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8580{
8581 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008582
8583 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008584 /* These are the only valid advertisement bits allowed. */
8585 if (cmd->autoneg == AUTONEG_ENABLE &&
8586 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8587 ADVERTISED_1000baseT_Full |
8588 ADVERTISED_Autoneg |
8589 ADVERTISED_FIBRE)))
8590 return -EINVAL;
Michael Chan37ff2382005-10-26 15:49:51 -07008591 /* Fiber can only do SPEED_1000. */
8592 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8593 (cmd->speed != SPEED_1000))
8594 return -EINVAL;
8595 /* Copper cannot force SPEED_1000. */
8596 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8597 (cmd->speed == SPEED_1000))
8598 return -EINVAL;
8599 else if ((cmd->speed == SPEED_1000) &&
8600 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8601 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008602
David S. Millerf47c11e2005-06-24 20:18:35 -07008603 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008604
8605 tp->link_config.autoneg = cmd->autoneg;
8606 if (cmd->autoneg == AUTONEG_ENABLE) {
Andy Gospodarek405d8e52007-10-08 01:08:47 -07008607 tp->link_config.advertising = (cmd->advertising |
8608 ADVERTISED_Autoneg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008609 tp->link_config.speed = SPEED_INVALID;
8610 tp->link_config.duplex = DUPLEX_INVALID;
8611 } else {
8612 tp->link_config.advertising = 0;
8613 tp->link_config.speed = cmd->speed;
8614 tp->link_config.duplex = cmd->duplex;
8615 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008616
Michael Chan24fcad62006-12-17 17:06:46 -08008617 tp->link_config.orig_speed = tp->link_config.speed;
8618 tp->link_config.orig_duplex = tp->link_config.duplex;
8619 tp->link_config.orig_autoneg = tp->link_config.autoneg;
8620
Linus Torvalds1da177e2005-04-16 15:20:36 -07008621 if (netif_running(dev))
8622 tg3_setup_phy(tp, 1);
8623
David S. Millerf47c11e2005-06-24 20:18:35 -07008624 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008625
Linus Torvalds1da177e2005-04-16 15:20:36 -07008626 return 0;
8627}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008628
Linus Torvalds1da177e2005-04-16 15:20:36 -07008629static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8630{
8631 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008632
Linus Torvalds1da177e2005-04-16 15:20:36 -07008633 strcpy(info->driver, DRV_MODULE_NAME);
8634 strcpy(info->version, DRV_MODULE_VERSION);
Michael Chanc4e65752006-03-20 22:29:32 -08008635 strcpy(info->fw_version, tp->fw_ver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008636 strcpy(info->bus_info, pci_name(tp->pdev));
8637}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008638
Linus Torvalds1da177e2005-04-16 15:20:36 -07008639static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8640{
8641 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008642
Gary Zambranoa85feb82007-05-05 11:52:19 -07008643 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8644 wol->supported = WAKE_MAGIC;
8645 else
8646 wol->supported = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008647 wol->wolopts = 0;
8648 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8649 wol->wolopts = WAKE_MAGIC;
8650 memset(&wol->sopass, 0, sizeof(wol->sopass));
8651}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008652
Linus Torvalds1da177e2005-04-16 15:20:36 -07008653static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8654{
8655 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008656
Linus Torvalds1da177e2005-04-16 15:20:36 -07008657 if (wol->wolopts & ~WAKE_MAGIC)
8658 return -EINVAL;
8659 if ((wol->wolopts & WAKE_MAGIC) &&
Gary Zambranoa85feb82007-05-05 11:52:19 -07008660 !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008661 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008662
David S. Millerf47c11e2005-06-24 20:18:35 -07008663 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008664 if (wol->wolopts & WAKE_MAGIC)
8665 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8666 else
8667 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
David S. Millerf47c11e2005-06-24 20:18:35 -07008668 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008669
Linus Torvalds1da177e2005-04-16 15:20:36 -07008670 return 0;
8671}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008672
Linus Torvalds1da177e2005-04-16 15:20:36 -07008673static u32 tg3_get_msglevel(struct net_device *dev)
8674{
8675 struct tg3 *tp = netdev_priv(dev);
8676 return tp->msg_enable;
8677}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008678
Linus Torvalds1da177e2005-04-16 15:20:36 -07008679static void tg3_set_msglevel(struct net_device *dev, u32 value)
8680{
8681 struct tg3 *tp = netdev_priv(dev);
8682 tp->msg_enable = value;
8683}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008684
Linus Torvalds1da177e2005-04-16 15:20:36 -07008685static int tg3_set_tso(struct net_device *dev, u32 value)
8686{
8687 struct tg3 *tp = netdev_priv(dev);
8688
8689 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8690 if (value)
8691 return -EINVAL;
8692 return 0;
8693 }
Michael Chanb5d37722006-09-27 16:06:21 -07008694 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8695 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
Matt Carlson9936bcf2007-10-10 18:03:07 -07008696 if (value) {
Michael Chanb0026622006-07-03 19:42:14 -07008697 dev->features |= NETIF_F_TSO6;
Matt Carlson9936bcf2007-10-10 18:03:07 -07008698 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8699 dev->features |= NETIF_F_TSO_ECN;
8700 } else
8701 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
Michael Chanb0026622006-07-03 19:42:14 -07008702 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008703 return ethtool_op_set_tso(dev, value);
8704}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008705
Linus Torvalds1da177e2005-04-16 15:20:36 -07008706static int tg3_nway_reset(struct net_device *dev)
8707{
8708 struct tg3 *tp = netdev_priv(dev);
8709 u32 bmcr;
8710 int r;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008711
Linus Torvalds1da177e2005-04-16 15:20:36 -07008712 if (!netif_running(dev))
8713 return -EAGAIN;
8714
Michael Chanc94e3942005-09-27 12:12:42 -07008715 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8716 return -EINVAL;
8717
David S. Millerf47c11e2005-06-24 20:18:35 -07008718 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008719 r = -EINVAL;
8720 tg3_readphy(tp, MII_BMCR, &bmcr);
8721 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
Michael Chanc94e3942005-09-27 12:12:42 -07008722 ((bmcr & BMCR_ANENABLE) ||
8723 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8724 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8725 BMCR_ANENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008726 r = 0;
8727 }
David S. Millerf47c11e2005-06-24 20:18:35 -07008728 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008729
Linus Torvalds1da177e2005-04-16 15:20:36 -07008730 return r;
8731}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008732
Linus Torvalds1da177e2005-04-16 15:20:36 -07008733static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8734{
8735 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008736
Linus Torvalds1da177e2005-04-16 15:20:36 -07008737 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8738 ering->rx_mini_max_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08008739 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8740 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8741 else
8742 ering->rx_jumbo_max_pending = 0;
8743
8744 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008745
8746 ering->rx_pending = tp->rx_pending;
8747 ering->rx_mini_pending = 0;
Michael Chan4f81c322006-03-20 21:33:42 -08008748 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8749 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8750 else
8751 ering->rx_jumbo_pending = 0;
8752
Linus Torvalds1da177e2005-04-16 15:20:36 -07008753 ering->tx_pending = tp->tx_pending;
8754}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008755
Linus Torvalds1da177e2005-04-16 15:20:36 -07008756static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8757{
8758 struct tg3 *tp = netdev_priv(dev);
Michael Chanb9ec6c12006-07-25 16:37:27 -07008759 int irq_sync = 0, err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008760
Linus Torvalds1da177e2005-04-16 15:20:36 -07008761 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8762 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
Michael Chanbc3a9252006-10-18 20:55:18 -07008763 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8764 (ering->tx_pending <= MAX_SKB_FRAGS) ||
Michael Chan7f62ad52007-02-20 23:25:40 -08008765 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
Michael Chanbc3a9252006-10-18 20:55:18 -07008766 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008767 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008768
Michael Chanbbe832c2005-06-24 20:20:04 -07008769 if (netif_running(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008770 tg3_netif_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07008771 irq_sync = 1;
8772 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008773
Michael Chanbbe832c2005-06-24 20:20:04 -07008774 tg3_full_lock(tp, irq_sync);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008775
Linus Torvalds1da177e2005-04-16 15:20:36 -07008776 tp->rx_pending = ering->rx_pending;
8777
8778 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8779 tp->rx_pending > 63)
8780 tp->rx_pending = 63;
8781 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8782 tp->tx_pending = ering->tx_pending;
8783
8784 if (netif_running(dev)) {
Michael Chan944d9802005-05-29 14:57:48 -07008785 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chanb9ec6c12006-07-25 16:37:27 -07008786 err = tg3_restart_hw(tp, 1);
8787 if (!err)
8788 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008789 }
8790
David S. Millerf47c11e2005-06-24 20:18:35 -07008791 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008792
Michael Chanb9ec6c12006-07-25 16:37:27 -07008793 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008794}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008795
Linus Torvalds1da177e2005-04-16 15:20:36 -07008796static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8797{
8798 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008799
Linus Torvalds1da177e2005-04-16 15:20:36 -07008800 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
Matt Carlson8d018622007-12-20 20:05:44 -08008801
8802 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
8803 epause->rx_pause = 1;
8804 else
8805 epause->rx_pause = 0;
8806
8807 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
8808 epause->tx_pause = 1;
8809 else
8810 epause->tx_pause = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008811}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008812
Linus Torvalds1da177e2005-04-16 15:20:36 -07008813static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8814{
8815 struct tg3 *tp = netdev_priv(dev);
Michael Chanb9ec6c12006-07-25 16:37:27 -07008816 int irq_sync = 0, err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008817
Michael Chanbbe832c2005-06-24 20:20:04 -07008818 if (netif_running(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008819 tg3_netif_stop(tp);
Michael Chanbbe832c2005-06-24 20:20:04 -07008820 irq_sync = 1;
8821 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008822
Michael Chanbbe832c2005-06-24 20:20:04 -07008823 tg3_full_lock(tp, irq_sync);
David S. Millerf47c11e2005-06-24 20:18:35 -07008824
Linus Torvalds1da177e2005-04-16 15:20:36 -07008825 if (epause->autoneg)
8826 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8827 else
8828 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8829 if (epause->rx_pause)
Matt Carlson8d018622007-12-20 20:05:44 -08008830 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008831 else
Matt Carlson8d018622007-12-20 20:05:44 -08008832 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008833 if (epause->tx_pause)
Matt Carlson8d018622007-12-20 20:05:44 -08008834 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008835 else
Matt Carlson8d018622007-12-20 20:05:44 -08008836 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008837
8838 if (netif_running(dev)) {
Michael Chan944d9802005-05-29 14:57:48 -07008839 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chanb9ec6c12006-07-25 16:37:27 -07008840 err = tg3_restart_hw(tp, 1);
8841 if (!err)
8842 tg3_netif_start(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008843 }
David S. Millerf47c11e2005-06-24 20:18:35 -07008844
8845 tg3_full_unlock(tp);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008846
Michael Chanb9ec6c12006-07-25 16:37:27 -07008847 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008848}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008849
Linus Torvalds1da177e2005-04-16 15:20:36 -07008850static u32 tg3_get_rx_csum(struct net_device *dev)
8851{
8852 struct tg3 *tp = netdev_priv(dev);
8853 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8854}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008855
Linus Torvalds1da177e2005-04-16 15:20:36 -07008856static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8857{
8858 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008859
Linus Torvalds1da177e2005-04-16 15:20:36 -07008860 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8861 if (data != 0)
8862 return -EINVAL;
8863 return 0;
8864 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008865
David S. Millerf47c11e2005-06-24 20:18:35 -07008866 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008867 if (data)
8868 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8869 else
8870 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
David S. Millerf47c11e2005-06-24 20:18:35 -07008871 spin_unlock_bh(&tp->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008872
Linus Torvalds1da177e2005-04-16 15:20:36 -07008873 return 0;
8874}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008875
Linus Torvalds1da177e2005-04-16 15:20:36 -07008876static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8877{
8878 struct tg3 *tp = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008879
Linus Torvalds1da177e2005-04-16 15:20:36 -07008880 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8881 if (data != 0)
8882 return -EINVAL;
8883 return 0;
8884 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008885
Michael Chanaf36e6b2006-03-23 01:28:06 -08008886 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07008887 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07008888 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8889 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
Michael Chan6460d942007-07-14 19:07:52 -07008890 ethtool_op_set_tx_ipv6_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008891 else
Michael Chan9c27dbd2006-03-20 22:28:27 -08008892 ethtool_op_set_tx_csum(dev, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008893
8894 return 0;
8895}
8896
Jeff Garzikb9f2c042007-10-03 18:07:32 -07008897static int tg3_get_sset_count (struct net_device *dev, int sset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008898{
Jeff Garzikb9f2c042007-10-03 18:07:32 -07008899 switch (sset) {
8900 case ETH_SS_TEST:
8901 return TG3_NUM_TEST;
8902 case ETH_SS_STATS:
8903 return TG3_NUM_STATS;
8904 default:
8905 return -EOPNOTSUPP;
8906 }
Michael Chan4cafd3f2005-05-29 14:56:34 -07008907}
8908
Linus Torvalds1da177e2005-04-16 15:20:36 -07008909static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8910{
8911 switch (stringset) {
8912 case ETH_SS_STATS:
8913 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8914 break;
Michael Chan4cafd3f2005-05-29 14:56:34 -07008915 case ETH_SS_TEST:
8916 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8917 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008918 default:
8919 WARN_ON(1); /* we need a WARN() */
8920 break;
8921 }
8922}
8923
Michael Chan4009a932005-09-05 17:52:54 -07008924static int tg3_phys_id(struct net_device *dev, u32 data)
8925{
8926 struct tg3 *tp = netdev_priv(dev);
8927 int i;
8928
8929 if (!netif_running(tp->dev))
8930 return -EAGAIN;
8931
8932 if (data == 0)
Stephen Hemminger759afc32008-02-23 19:51:59 -08008933 data = UINT_MAX / 2;
Michael Chan4009a932005-09-05 17:52:54 -07008934
8935 for (i = 0; i < (data * 2); i++) {
8936 if ((i % 2) == 0)
8937 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8938 LED_CTRL_1000MBPS_ON |
8939 LED_CTRL_100MBPS_ON |
8940 LED_CTRL_10MBPS_ON |
8941 LED_CTRL_TRAFFIC_OVERRIDE |
8942 LED_CTRL_TRAFFIC_BLINK |
8943 LED_CTRL_TRAFFIC_LED);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04008944
Michael Chan4009a932005-09-05 17:52:54 -07008945 else
8946 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8947 LED_CTRL_TRAFFIC_OVERRIDE);
8948
8949 if (msleep_interruptible(500))
8950 break;
8951 }
8952 tw32(MAC_LED_CTRL, tp->led_ctrl);
8953 return 0;
8954}
8955
Linus Torvalds1da177e2005-04-16 15:20:36 -07008956static void tg3_get_ethtool_stats (struct net_device *dev,
8957 struct ethtool_stats *estats, u64 *tmp_stats)
8958{
8959 struct tg3 *tp = netdev_priv(dev);
8960 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8961}
8962
Michael Chan566f86a2005-05-29 14:56:58 -07008963#define NVRAM_TEST_SIZE 0x100
Matt Carlsona5767de2007-11-12 21:10:58 -08008964#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
8965#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
8966#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
Michael Chanb16250e2006-09-27 16:10:14 -07008967#define NVRAM_SELFBOOT_HW_SIZE 0x20
8968#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
Michael Chan566f86a2005-05-29 14:56:58 -07008969
8970static int tg3_test_nvram(struct tg3 *tp)
8971{
Al Virob9fc7dc2007-12-17 22:59:57 -08008972 u32 csum, magic;
8973 __le32 *buf;
Andy Gospodarekab0049b2007-09-06 20:42:14 +01008974 int i, j, k, err = 0, size;
Michael Chan566f86a2005-05-29 14:56:58 -07008975
Michael Chan18201802006-03-20 22:29:15 -08008976 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Michael Chan1b277772006-03-20 22:27:48 -08008977 return -EIO;
8978
Michael Chan1b277772006-03-20 22:27:48 -08008979 if (magic == TG3_EEPROM_MAGIC)
8980 size = NVRAM_TEST_SIZE;
Michael Chanb16250e2006-09-27 16:10:14 -07008981 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
Matt Carlsona5767de2007-11-12 21:10:58 -08008982 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
8983 TG3_EEPROM_SB_FORMAT_1) {
8984 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
8985 case TG3_EEPROM_SB_REVISION_0:
8986 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
8987 break;
8988 case TG3_EEPROM_SB_REVISION_2:
8989 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
8990 break;
8991 case TG3_EEPROM_SB_REVISION_3:
8992 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
8993 break;
8994 default:
8995 return 0;
8996 }
8997 } else
Michael Chan1b277772006-03-20 22:27:48 -08008998 return 0;
Michael Chanb16250e2006-09-27 16:10:14 -07008999 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9000 size = NVRAM_SELFBOOT_HW_SIZE;
9001 else
Michael Chan1b277772006-03-20 22:27:48 -08009002 return -EIO;
9003
9004 buf = kmalloc(size, GFP_KERNEL);
Michael Chan566f86a2005-05-29 14:56:58 -07009005 if (buf == NULL)
9006 return -ENOMEM;
9007
Michael Chan1b277772006-03-20 22:27:48 -08009008 err = -EIO;
9009 for (i = 0, j = 0; i < size; i += 4, j++) {
Al Virob9fc7dc2007-12-17 22:59:57 -08009010 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
Michael Chan566f86a2005-05-29 14:56:58 -07009011 break;
Michael Chan566f86a2005-05-29 14:56:58 -07009012 }
Michael Chan1b277772006-03-20 22:27:48 -08009013 if (i < size)
Michael Chan566f86a2005-05-29 14:56:58 -07009014 goto out;
9015
Michael Chan1b277772006-03-20 22:27:48 -08009016 /* Selfboot format */
Al Virob9fc7dc2007-12-17 22:59:57 -08009017 magic = swab32(le32_to_cpu(buf[0]));
9018 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -07009019 TG3_EEPROM_MAGIC_FW) {
Michael Chan1b277772006-03-20 22:27:48 -08009020 u8 *buf8 = (u8 *) buf, csum8 = 0;
9021
Al Virob9fc7dc2007-12-17 22:59:57 -08009022 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
Matt Carlsona5767de2007-11-12 21:10:58 -08009023 TG3_EEPROM_SB_REVISION_2) {
9024 /* For rev 2, the csum doesn't include the MBA. */
9025 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9026 csum8 += buf8[i];
9027 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9028 csum8 += buf8[i];
9029 } else {
9030 for (i = 0; i < size; i++)
9031 csum8 += buf8[i];
9032 }
Michael Chan1b277772006-03-20 22:27:48 -08009033
Adrian Bunkad96b482006-04-05 22:21:04 -07009034 if (csum8 == 0) {
9035 err = 0;
9036 goto out;
9037 }
9038
9039 err = -EIO;
9040 goto out;
Michael Chan1b277772006-03-20 22:27:48 -08009041 }
Michael Chan566f86a2005-05-29 14:56:58 -07009042
Al Virob9fc7dc2007-12-17 22:59:57 -08009043 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
Michael Chanb16250e2006-09-27 16:10:14 -07009044 TG3_EEPROM_MAGIC_HW) {
9045 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9046 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9047 u8 *buf8 = (u8 *) buf;
Michael Chanb16250e2006-09-27 16:10:14 -07009048
9049 /* Separate the parity bits and the data bytes. */
9050 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9051 if ((i == 0) || (i == 8)) {
9052 int l;
9053 u8 msk;
9054
9055 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9056 parity[k++] = buf8[i] & msk;
9057 i++;
9058 }
9059 else if (i == 16) {
9060 int l;
9061 u8 msk;
9062
9063 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9064 parity[k++] = buf8[i] & msk;
9065 i++;
9066
9067 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9068 parity[k++] = buf8[i] & msk;
9069 i++;
9070 }
9071 data[j++] = buf8[i];
9072 }
9073
9074 err = -EIO;
9075 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9076 u8 hw8 = hweight8(data[i]);
9077
9078 if ((hw8 & 0x1) && parity[i])
9079 goto out;
9080 else if (!(hw8 & 0x1) && !parity[i])
9081 goto out;
9082 }
9083 err = 0;
9084 goto out;
9085 }
9086
Michael Chan566f86a2005-05-29 14:56:58 -07009087 /* Bootstrap checksum at offset 0x10 */
9088 csum = calc_crc((unsigned char *) buf, 0x10);
Al Virob9fc7dc2007-12-17 22:59:57 -08009089 if(csum != le32_to_cpu(buf[0x10/4]))
Michael Chan566f86a2005-05-29 14:56:58 -07009090 goto out;
9091
9092 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9093 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
Al Virob9fc7dc2007-12-17 22:59:57 -08009094 if (csum != le32_to_cpu(buf[0xfc/4]))
Michael Chan566f86a2005-05-29 14:56:58 -07009095 goto out;
9096
9097 err = 0;
9098
9099out:
9100 kfree(buf);
9101 return err;
9102}
9103
Michael Chanca430072005-05-29 14:57:23 -07009104#define TG3_SERDES_TIMEOUT_SEC 2
9105#define TG3_COPPER_TIMEOUT_SEC 6
9106
9107static int tg3_test_link(struct tg3 *tp)
9108{
9109 int i, max;
9110
9111 if (!netif_running(tp->dev))
9112 return -ENODEV;
9113
Michael Chan4c987482005-09-05 17:52:38 -07009114 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Michael Chanca430072005-05-29 14:57:23 -07009115 max = TG3_SERDES_TIMEOUT_SEC;
9116 else
9117 max = TG3_COPPER_TIMEOUT_SEC;
9118
9119 for (i = 0; i < max; i++) {
9120 if (netif_carrier_ok(tp->dev))
9121 return 0;
9122
9123 if (msleep_interruptible(1000))
9124 break;
9125 }
9126
9127 return -EIO;
9128}
9129
Michael Chana71116d2005-05-29 14:58:11 -07009130/* Only test the commonly used registers */
David S. Miller30ca3e32006-03-20 23:02:36 -08009131static int tg3_test_registers(struct tg3 *tp)
Michael Chana71116d2005-05-29 14:58:11 -07009132{
Michael Chanb16250e2006-09-27 16:10:14 -07009133 int i, is_5705, is_5750;
Michael Chana71116d2005-05-29 14:58:11 -07009134 u32 offset, read_mask, write_mask, val, save_val, read_val;
9135 static struct {
9136 u16 offset;
9137 u16 flags;
9138#define TG3_FL_5705 0x1
9139#define TG3_FL_NOT_5705 0x2
9140#define TG3_FL_NOT_5788 0x4
Michael Chanb16250e2006-09-27 16:10:14 -07009141#define TG3_FL_NOT_5750 0x8
Michael Chana71116d2005-05-29 14:58:11 -07009142 u32 read_mask;
9143 u32 write_mask;
9144 } reg_tbl[] = {
9145 /* MAC Control Registers */
9146 { MAC_MODE, TG3_FL_NOT_5705,
9147 0x00000000, 0x00ef6f8c },
9148 { MAC_MODE, TG3_FL_5705,
9149 0x00000000, 0x01ef6b8c },
9150 { MAC_STATUS, TG3_FL_NOT_5705,
9151 0x03800107, 0x00000000 },
9152 { MAC_STATUS, TG3_FL_5705,
9153 0x03800100, 0x00000000 },
9154 { MAC_ADDR_0_HIGH, 0x0000,
9155 0x00000000, 0x0000ffff },
9156 { MAC_ADDR_0_LOW, 0x0000,
9157 0x00000000, 0xffffffff },
9158 { MAC_RX_MTU_SIZE, 0x0000,
9159 0x00000000, 0x0000ffff },
9160 { MAC_TX_MODE, 0x0000,
9161 0x00000000, 0x00000070 },
9162 { MAC_TX_LENGTHS, 0x0000,
9163 0x00000000, 0x00003fff },
9164 { MAC_RX_MODE, TG3_FL_NOT_5705,
9165 0x00000000, 0x000007fc },
9166 { MAC_RX_MODE, TG3_FL_5705,
9167 0x00000000, 0x000007dc },
9168 { MAC_HASH_REG_0, 0x0000,
9169 0x00000000, 0xffffffff },
9170 { MAC_HASH_REG_1, 0x0000,
9171 0x00000000, 0xffffffff },
9172 { MAC_HASH_REG_2, 0x0000,
9173 0x00000000, 0xffffffff },
9174 { MAC_HASH_REG_3, 0x0000,
9175 0x00000000, 0xffffffff },
9176
9177 /* Receive Data and Receive BD Initiator Control Registers. */
9178 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9179 0x00000000, 0xffffffff },
9180 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9181 0x00000000, 0xffffffff },
9182 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9183 0x00000000, 0x00000003 },
9184 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9185 0x00000000, 0xffffffff },
9186 { RCVDBDI_STD_BD+0, 0x0000,
9187 0x00000000, 0xffffffff },
9188 { RCVDBDI_STD_BD+4, 0x0000,
9189 0x00000000, 0xffffffff },
9190 { RCVDBDI_STD_BD+8, 0x0000,
9191 0x00000000, 0xffff0002 },
9192 { RCVDBDI_STD_BD+0xc, 0x0000,
9193 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009194
Michael Chana71116d2005-05-29 14:58:11 -07009195 /* Receive BD Initiator Control Registers. */
9196 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9197 0x00000000, 0xffffffff },
9198 { RCVBDI_STD_THRESH, TG3_FL_5705,
9199 0x00000000, 0x000003ff },
9200 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9201 0x00000000, 0xffffffff },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009202
Michael Chana71116d2005-05-29 14:58:11 -07009203 /* Host Coalescing Control Registers. */
9204 { HOSTCC_MODE, TG3_FL_NOT_5705,
9205 0x00000000, 0x00000004 },
9206 { HOSTCC_MODE, TG3_FL_5705,
9207 0x00000000, 0x000000f6 },
9208 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9209 0x00000000, 0xffffffff },
9210 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9211 0x00000000, 0x000003ff },
9212 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9213 0x00000000, 0xffffffff },
9214 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9215 0x00000000, 0x000003ff },
9216 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9217 0x00000000, 0xffffffff },
9218 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9219 0x00000000, 0x000000ff },
9220 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9221 0x00000000, 0xffffffff },
9222 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9223 0x00000000, 0x000000ff },
9224 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9225 0x00000000, 0xffffffff },
9226 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9227 0x00000000, 0xffffffff },
9228 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9229 0x00000000, 0xffffffff },
9230 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9231 0x00000000, 0x000000ff },
9232 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9233 0x00000000, 0xffffffff },
9234 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9235 0x00000000, 0x000000ff },
9236 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9237 0x00000000, 0xffffffff },
9238 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9239 0x00000000, 0xffffffff },
9240 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9241 0x00000000, 0xffffffff },
9242 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9243 0x00000000, 0xffffffff },
9244 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9245 0x00000000, 0xffffffff },
9246 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9247 0xffffffff, 0x00000000 },
9248 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9249 0xffffffff, 0x00000000 },
9250
9251 /* Buffer Manager Control Registers. */
Michael Chanb16250e2006-09-27 16:10:14 -07009252 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -07009253 0x00000000, 0x007fff80 },
Michael Chanb16250e2006-09-27 16:10:14 -07009254 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
Michael Chana71116d2005-05-29 14:58:11 -07009255 0x00000000, 0x007fffff },
9256 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9257 0x00000000, 0x0000003f },
9258 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9259 0x00000000, 0x000001ff },
9260 { BUFMGR_MB_HIGH_WATER, 0x0000,
9261 0x00000000, 0x000001ff },
9262 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9263 0xffffffff, 0x00000000 },
9264 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9265 0xffffffff, 0x00000000 },
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009266
Michael Chana71116d2005-05-29 14:58:11 -07009267 /* Mailbox Registers */
9268 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9269 0x00000000, 0x000001ff },
9270 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9271 0x00000000, 0x000001ff },
9272 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9273 0x00000000, 0x000007ff },
9274 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9275 0x00000000, 0x000001ff },
9276
9277 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9278 };
9279
Michael Chanb16250e2006-09-27 16:10:14 -07009280 is_5705 = is_5750 = 0;
9281 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
Michael Chana71116d2005-05-29 14:58:11 -07009282 is_5705 = 1;
Michael Chanb16250e2006-09-27 16:10:14 -07009283 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9284 is_5750 = 1;
9285 }
Michael Chana71116d2005-05-29 14:58:11 -07009286
9287 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9288 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9289 continue;
9290
9291 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9292 continue;
9293
9294 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9295 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9296 continue;
9297
Michael Chanb16250e2006-09-27 16:10:14 -07009298 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9299 continue;
9300
Michael Chana71116d2005-05-29 14:58:11 -07009301 offset = (u32) reg_tbl[i].offset;
9302 read_mask = reg_tbl[i].read_mask;
9303 write_mask = reg_tbl[i].write_mask;
9304
9305 /* Save the original register content */
9306 save_val = tr32(offset);
9307
9308 /* Determine the read-only value. */
9309 read_val = save_val & read_mask;
9310
9311 /* Write zero to the register, then make sure the read-only bits
9312 * are not changed and the read/write bits are all zeros.
9313 */
9314 tw32(offset, 0);
9315
9316 val = tr32(offset);
9317
9318 /* Test the read-only and read/write bits. */
9319 if (((val & read_mask) != read_val) || (val & write_mask))
9320 goto out;
9321
9322 /* Write ones to all the bits defined by RdMask and WrMask, then
9323 * make sure the read-only bits are not changed and the
9324 * read/write bits are all ones.
9325 */
9326 tw32(offset, read_mask | write_mask);
9327
9328 val = tr32(offset);
9329
9330 /* Test the read-only bits. */
9331 if ((val & read_mask) != read_val)
9332 goto out;
9333
9334 /* Test the read/write bits. */
9335 if ((val & write_mask) != write_mask)
9336 goto out;
9337
9338 tw32(offset, save_val);
9339 }
9340
9341 return 0;
9342
9343out:
Michael Chan9f88f292006-12-07 00:22:54 -08009344 if (netif_msg_hw(tp))
9345 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9346 offset);
Michael Chana71116d2005-05-29 14:58:11 -07009347 tw32(offset, save_val);
9348 return -EIO;
9349}
9350
Michael Chan7942e1d2005-05-29 14:58:36 -07009351static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9352{
Arjan van de Venf71e1302006-03-03 21:33:57 -05009353 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
Michael Chan7942e1d2005-05-29 14:58:36 -07009354 int i;
9355 u32 j;
9356
Alejandro Martinez Ruize9edda62007-10-15 03:37:43 +02009357 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
Michael Chan7942e1d2005-05-29 14:58:36 -07009358 for (j = 0; j < len; j += 4) {
9359 u32 val;
9360
9361 tg3_write_mem(tp, offset + j, test_pattern[i]);
9362 tg3_read_mem(tp, offset + j, &val);
9363 if (val != test_pattern[i])
9364 return -EIO;
9365 }
9366 }
9367 return 0;
9368}
9369
9370static int tg3_test_memory(struct tg3 *tp)
9371{
9372 static struct mem_entry {
9373 u32 offset;
9374 u32 len;
9375 } mem_tbl_570x[] = {
Michael Chan38690192005-12-19 16:27:28 -08009376 { 0x00000000, 0x00b50},
Michael Chan7942e1d2005-05-29 14:58:36 -07009377 { 0x00002000, 0x1c000},
9378 { 0xffffffff, 0x00000}
9379 }, mem_tbl_5705[] = {
9380 { 0x00000100, 0x0000c},
9381 { 0x00000200, 0x00008},
Michael Chan7942e1d2005-05-29 14:58:36 -07009382 { 0x00004000, 0x00800},
9383 { 0x00006000, 0x01000},
9384 { 0x00008000, 0x02000},
9385 { 0x00010000, 0x0e000},
9386 { 0xffffffff, 0x00000}
Michael Chan79f4d132006-03-20 22:28:57 -08009387 }, mem_tbl_5755[] = {
9388 { 0x00000200, 0x00008},
9389 { 0x00004000, 0x00800},
9390 { 0x00006000, 0x00800},
9391 { 0x00008000, 0x02000},
9392 { 0x00010000, 0x0c000},
9393 { 0xffffffff, 0x00000}
Michael Chanb16250e2006-09-27 16:10:14 -07009394 }, mem_tbl_5906[] = {
9395 { 0x00000200, 0x00008},
9396 { 0x00004000, 0x00400},
9397 { 0x00006000, 0x00400},
9398 { 0x00008000, 0x01000},
9399 { 0x00010000, 0x01000},
9400 { 0xffffffff, 0x00000}
Michael Chan7942e1d2005-05-29 14:58:36 -07009401 };
9402 struct mem_entry *mem_tbl;
9403 int err = 0;
9404 int i;
9405
Michael Chan79f4d132006-03-20 22:28:57 -08009406 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
Michael Chanaf36e6b2006-03-23 01:28:06 -08009407 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -07009408 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -07009409 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9410 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
Michael Chan79f4d132006-03-20 22:28:57 -08009411 mem_tbl = mem_tbl_5755;
Michael Chanb16250e2006-09-27 16:10:14 -07009412 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9413 mem_tbl = mem_tbl_5906;
Michael Chan79f4d132006-03-20 22:28:57 -08009414 else
9415 mem_tbl = mem_tbl_5705;
9416 } else
Michael Chan7942e1d2005-05-29 14:58:36 -07009417 mem_tbl = mem_tbl_570x;
9418
9419 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9420 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9421 mem_tbl[i].len)) != 0)
9422 break;
9423 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009424
Michael Chan7942e1d2005-05-29 14:58:36 -07009425 return err;
9426}
9427
Michael Chan9f40dea2005-09-05 17:53:06 -07009428#define TG3_MAC_LOOPBACK 0
9429#define TG3_PHY_LOOPBACK 1
9430
9431static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
Michael Chanc76949a2005-05-29 14:58:59 -07009432{
Michael Chan9f40dea2005-09-05 17:53:06 -07009433 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
Michael Chanc76949a2005-05-29 14:58:59 -07009434 u32 desc_idx;
9435 struct sk_buff *skb, *rx_skb;
9436 u8 *tx_data;
9437 dma_addr_t map;
9438 int num_pkts, tx_len, rx_len, i, err;
9439 struct tg3_rx_buffer_desc *desc;
9440
Michael Chan9f40dea2005-09-05 17:53:06 -07009441 if (loopback_mode == TG3_MAC_LOOPBACK) {
Michael Chanc94e3942005-09-27 12:12:42 -07009442 /* HW errata - mac loopback fails in some cases on 5780.
9443 * Normal traffic and PHY loopback are not affected by
9444 * errata.
9445 */
9446 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9447 return 0;
9448
Michael Chan9f40dea2005-09-05 17:53:06 -07009449 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07009450 MAC_MODE_PORT_INT_LPBACK;
9451 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9452 mac_mode |= MAC_MODE_LINK_POLARITY;
Michael Chan3f7045c2006-09-27 16:02:29 -07009453 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9454 mac_mode |= MAC_MODE_PORT_MODE_MII;
9455 else
9456 mac_mode |= MAC_MODE_PORT_MODE_GMII;
Michael Chan9f40dea2005-09-05 17:53:06 -07009457 tw32(MAC_MODE, mac_mode);
9458 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
Michael Chan3f7045c2006-09-27 16:02:29 -07009459 u32 val;
9460
Michael Chanb16250e2006-09-27 16:10:14 -07009461 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9462 u32 phytest;
9463
9464 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9465 u32 phy;
9466
9467 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9468 phytest | MII_TG3_EPHY_SHADOW_EN);
9469 if (!tg3_readphy(tp, 0x1b, &phy))
9470 tg3_writephy(tp, 0x1b, phy & ~0x20);
Michael Chanb16250e2006-09-27 16:10:14 -07009471 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9472 }
Michael Chan5d64ad32006-12-07 00:19:40 -08009473 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9474 } else
9475 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
Michael Chan3f7045c2006-09-27 16:02:29 -07009476
Matt Carlson9ef8ca92007-07-11 19:48:29 -07009477 tg3_phy_toggle_automdix(tp, 0);
9478
Michael Chan3f7045c2006-09-27 16:02:29 -07009479 tg3_writephy(tp, MII_BMCR, val);
Michael Chanc94e3942005-09-27 12:12:42 -07009480 udelay(40);
Michael Chan5d64ad32006-12-07 00:19:40 -08009481
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07009482 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
Michael Chan5d64ad32006-12-07 00:19:40 -08009483 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chanb16250e2006-09-27 16:10:14 -07009484 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
Michael Chan5d64ad32006-12-07 00:19:40 -08009485 mac_mode |= MAC_MODE_PORT_MODE_MII;
9486 } else
9487 mac_mode |= MAC_MODE_PORT_MODE_GMII;
Michael Chanb16250e2006-09-27 16:10:14 -07009488
Michael Chanc94e3942005-09-27 12:12:42 -07009489 /* reset to prevent losing 1st rx packet intermittently */
9490 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9491 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9492 udelay(10);
9493 tw32_f(MAC_RX_MODE, tp->rx_mode);
9494 }
Matt Carlsone8f3f6c2007-07-11 19:47:55 -07009495 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9496 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9497 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9498 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9499 mac_mode |= MAC_MODE_LINK_POLARITY;
Michael Chanff18ff02006-03-27 23:17:27 -08009500 tg3_writephy(tp, MII_TG3_EXT_CTRL,
9501 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9502 }
Michael Chan9f40dea2005-09-05 17:53:06 -07009503 tw32(MAC_MODE, mac_mode);
Michael Chan9f40dea2005-09-05 17:53:06 -07009504 }
9505 else
9506 return -EINVAL;
Michael Chanc76949a2005-05-29 14:58:59 -07009507
9508 err = -EIO;
9509
Michael Chanc76949a2005-05-29 14:58:59 -07009510 tx_len = 1514;
David S. Millera20e9c62006-07-31 22:38:16 -07009511 skb = netdev_alloc_skb(tp->dev, tx_len);
Jesper Juhla50bb7b2006-05-09 23:14:35 -07009512 if (!skb)
9513 return -ENOMEM;
9514
Michael Chanc76949a2005-05-29 14:58:59 -07009515 tx_data = skb_put(skb, tx_len);
9516 memcpy(tx_data, tp->dev->dev_addr, 6);
9517 memset(tx_data + 6, 0x0, 8);
9518
9519 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9520
9521 for (i = 14; i < tx_len; i++)
9522 tx_data[i] = (u8) (i & 0xff);
9523
9524 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9525
9526 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9527 HOSTCC_MODE_NOW);
9528
9529 udelay(10);
9530
9531 rx_start_idx = tp->hw_status->idx[0].rx_producer;
9532
Michael Chanc76949a2005-05-29 14:58:59 -07009533 num_pkts = 0;
9534
Michael Chan9f40dea2005-09-05 17:53:06 -07009535 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
Michael Chanc76949a2005-05-29 14:58:59 -07009536
Michael Chan9f40dea2005-09-05 17:53:06 -07009537 tp->tx_prod++;
Michael Chanc76949a2005-05-29 14:58:59 -07009538 num_pkts++;
9539
Michael Chan9f40dea2005-09-05 17:53:06 -07009540 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9541 tp->tx_prod);
Michael Chan09ee9292005-08-09 20:17:00 -07009542 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
Michael Chanc76949a2005-05-29 14:58:59 -07009543
9544 udelay(10);
9545
Michael Chan3f7045c2006-09-27 16:02:29 -07009546 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
9547 for (i = 0; i < 25; i++) {
Michael Chanc76949a2005-05-29 14:58:59 -07009548 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9549 HOSTCC_MODE_NOW);
9550
9551 udelay(10);
9552
9553 tx_idx = tp->hw_status->idx[0].tx_consumer;
9554 rx_idx = tp->hw_status->idx[0].rx_producer;
Michael Chan9f40dea2005-09-05 17:53:06 -07009555 if ((tx_idx == tp->tx_prod) &&
Michael Chanc76949a2005-05-29 14:58:59 -07009556 (rx_idx == (rx_start_idx + num_pkts)))
9557 break;
9558 }
9559
9560 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9561 dev_kfree_skb(skb);
9562
Michael Chan9f40dea2005-09-05 17:53:06 -07009563 if (tx_idx != tp->tx_prod)
Michael Chanc76949a2005-05-29 14:58:59 -07009564 goto out;
9565
9566 if (rx_idx != rx_start_idx + num_pkts)
9567 goto out;
9568
9569 desc = &tp->rx_rcb[rx_start_idx];
9570 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9571 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9572 if (opaque_key != RXD_OPAQUE_RING_STD)
9573 goto out;
9574
9575 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9576 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9577 goto out;
9578
9579 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9580 if (rx_len != tx_len)
9581 goto out;
9582
9583 rx_skb = tp->rx_std_buffers[desc_idx].skb;
9584
9585 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9586 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9587
9588 for (i = 14; i < tx_len; i++) {
9589 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9590 goto out;
9591 }
9592 err = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009593
Michael Chanc76949a2005-05-29 14:58:59 -07009594 /* tg3_free_rings will unmap and free the rx_skb */
9595out:
9596 return err;
9597}
9598
Michael Chan9f40dea2005-09-05 17:53:06 -07009599#define TG3_MAC_LOOPBACK_FAILED 1
9600#define TG3_PHY_LOOPBACK_FAILED 2
9601#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
9602 TG3_PHY_LOOPBACK_FAILED)
9603
9604static int tg3_test_loopback(struct tg3 *tp)
9605{
9606 int err = 0;
Matt Carlson9936bcf2007-10-10 18:03:07 -07009607 u32 cpmuctrl = 0;
Michael Chan9f40dea2005-09-05 17:53:06 -07009608
9609 if (!netif_running(tp->dev))
9610 return TG3_LOOPBACK_FAILED;
9611
Michael Chanb9ec6c12006-07-25 16:37:27 -07009612 err = tg3_reset_hw(tp, 1);
9613 if (err)
9614 return TG3_LOOPBACK_FAILED;
Michael Chan9f40dea2005-09-05 17:53:06 -07009615
Matt Carlsonb2a5c192008-04-03 21:44:44 -07009616 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9617 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
Matt Carlson9936bcf2007-10-10 18:03:07 -07009618 int i;
9619 u32 status;
9620
9621 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9622
9623 /* Wait for up to 40 microseconds to acquire lock. */
9624 for (i = 0; i < 4; i++) {
9625 status = tr32(TG3_CPMU_MUTEX_GNT);
9626 if (status == CPMU_MUTEX_GNT_DRIVER)
9627 break;
9628 udelay(10);
9629 }
9630
9631 if (status != CPMU_MUTEX_GNT_DRIVER)
9632 return TG3_LOOPBACK_FAILED;
9633
Matt Carlsonb2a5c192008-04-03 21:44:44 -07009634 /* Turn off link-based power management. */
Matt Carlsone8750932007-11-12 21:11:51 -08009635 cpmuctrl = tr32(TG3_CPMU_CTRL);
Matt Carlson109115e2008-05-02 16:48:59 -07009636 tw32(TG3_CPMU_CTRL,
9637 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
9638 CPMU_CTRL_LINK_AWARE_MODE));
Matt Carlson9936bcf2007-10-10 18:03:07 -07009639 }
9640
Michael Chan9f40dea2005-09-05 17:53:06 -07009641 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9642 err |= TG3_MAC_LOOPBACK_FAILED;
Matt Carlson9936bcf2007-10-10 18:03:07 -07009643
Matt Carlsonb2a5c192008-04-03 21:44:44 -07009644 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9645 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
Matt Carlson9936bcf2007-10-10 18:03:07 -07009646 tw32(TG3_CPMU_CTRL, cpmuctrl);
9647
9648 /* Release the mutex */
9649 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9650 }
9651
Michael Chan9f40dea2005-09-05 17:53:06 -07009652 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9653 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9654 err |= TG3_PHY_LOOPBACK_FAILED;
9655 }
9656
9657 return err;
9658}
9659
Michael Chan4cafd3f2005-05-29 14:56:34 -07009660static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9661 u64 *data)
9662{
Michael Chan566f86a2005-05-29 14:56:58 -07009663 struct tg3 *tp = netdev_priv(dev);
9664
Michael Chanbc1c7562006-03-20 17:48:03 -08009665 if (tp->link_config.phy_is_low_power)
9666 tg3_set_power_state(tp, PCI_D0);
9667
Michael Chan566f86a2005-05-29 14:56:58 -07009668 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9669
9670 if (tg3_test_nvram(tp) != 0) {
9671 etest->flags |= ETH_TEST_FL_FAILED;
9672 data[0] = 1;
9673 }
Michael Chanca430072005-05-29 14:57:23 -07009674 if (tg3_test_link(tp) != 0) {
9675 etest->flags |= ETH_TEST_FL_FAILED;
9676 data[1] = 1;
9677 }
Michael Chana71116d2005-05-29 14:58:11 -07009678 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Michael Chanec41c7d2006-01-17 02:40:55 -08009679 int err, irq_sync = 0;
Michael Chana71116d2005-05-29 14:58:11 -07009680
Michael Chanbbe832c2005-06-24 20:20:04 -07009681 if (netif_running(dev)) {
9682 tg3_netif_stop(tp);
9683 irq_sync = 1;
9684 }
9685
9686 tg3_full_lock(tp, irq_sync);
Michael Chana71116d2005-05-29 14:58:11 -07009687
9688 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
Michael Chanec41c7d2006-01-17 02:40:55 -08009689 err = tg3_nvram_lock(tp);
Michael Chana71116d2005-05-29 14:58:11 -07009690 tg3_halt_cpu(tp, RX_CPU_BASE);
9691 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9692 tg3_halt_cpu(tp, TX_CPU_BASE);
Michael Chanec41c7d2006-01-17 02:40:55 -08009693 if (!err)
9694 tg3_nvram_unlock(tp);
Michael Chana71116d2005-05-29 14:58:11 -07009695
Michael Chand9ab5ad2006-03-20 22:27:35 -08009696 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9697 tg3_phy_reset(tp);
9698
Michael Chana71116d2005-05-29 14:58:11 -07009699 if (tg3_test_registers(tp) != 0) {
9700 etest->flags |= ETH_TEST_FL_FAILED;
9701 data[2] = 1;
9702 }
Michael Chan7942e1d2005-05-29 14:58:36 -07009703 if (tg3_test_memory(tp) != 0) {
9704 etest->flags |= ETH_TEST_FL_FAILED;
9705 data[3] = 1;
9706 }
Michael Chan9f40dea2005-09-05 17:53:06 -07009707 if ((data[4] = tg3_test_loopback(tp)) != 0)
Michael Chanc76949a2005-05-29 14:58:59 -07009708 etest->flags |= ETH_TEST_FL_FAILED;
Michael Chana71116d2005-05-29 14:58:11 -07009709
David S. Millerf47c11e2005-06-24 20:18:35 -07009710 tg3_full_unlock(tp);
9711
Michael Chand4bc3922005-05-29 14:59:20 -07009712 if (tg3_test_interrupt(tp) != 0) {
9713 etest->flags |= ETH_TEST_FL_FAILED;
9714 data[5] = 1;
9715 }
David S. Millerf47c11e2005-06-24 20:18:35 -07009716
9717 tg3_full_lock(tp, 0);
Michael Chand4bc3922005-05-29 14:59:20 -07009718
Michael Chana71116d2005-05-29 14:58:11 -07009719 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9720 if (netif_running(dev)) {
9721 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Michael Chanb9ec6c12006-07-25 16:37:27 -07009722 if (!tg3_restart_hw(tp, 1))
9723 tg3_netif_start(tp);
Michael Chana71116d2005-05-29 14:58:11 -07009724 }
David S. Millerf47c11e2005-06-24 20:18:35 -07009725
9726 tg3_full_unlock(tp);
Michael Chana71116d2005-05-29 14:58:11 -07009727 }
Michael Chanbc1c7562006-03-20 17:48:03 -08009728 if (tp->link_config.phy_is_low_power)
9729 tg3_set_power_state(tp, PCI_D3hot);
9730
Michael Chan4cafd3f2005-05-29 14:56:34 -07009731}
9732
Linus Torvalds1da177e2005-04-16 15:20:36 -07009733static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9734{
9735 struct mii_ioctl_data *data = if_mii(ifr);
9736 struct tg3 *tp = netdev_priv(dev);
9737 int err;
9738
9739 switch(cmd) {
9740 case SIOCGMIIPHY:
9741 data->phy_id = PHY_ADDR;
9742
9743 /* fallthru */
9744 case SIOCGMIIREG: {
9745 u32 mii_regval;
9746
9747 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9748 break; /* We have no PHY */
9749
Michael Chanbc1c7562006-03-20 17:48:03 -08009750 if (tp->link_config.phy_is_low_power)
9751 return -EAGAIN;
9752
David S. Millerf47c11e2005-06-24 20:18:35 -07009753 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009754 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
David S. Millerf47c11e2005-06-24 20:18:35 -07009755 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009756
9757 data->val_out = mii_regval;
9758
9759 return err;
9760 }
9761
9762 case SIOCSMIIREG:
9763 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9764 break; /* We have no PHY */
9765
9766 if (!capable(CAP_NET_ADMIN))
9767 return -EPERM;
9768
Michael Chanbc1c7562006-03-20 17:48:03 -08009769 if (tp->link_config.phy_is_low_power)
9770 return -EAGAIN;
9771
David S. Millerf47c11e2005-06-24 20:18:35 -07009772 spin_lock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009773 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
David S. Millerf47c11e2005-06-24 20:18:35 -07009774 spin_unlock_bh(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009775
9776 return err;
9777
9778 default:
9779 /* do nothing */
9780 break;
9781 }
9782 return -EOPNOTSUPP;
9783}
9784
9785#if TG3_VLAN_TAG_USED
9786static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9787{
9788 struct tg3 *tp = netdev_priv(dev);
9789
Michael Chan29315e82006-06-29 20:12:30 -07009790 if (netif_running(dev))
9791 tg3_netif_stop(tp);
9792
David S. Millerf47c11e2005-06-24 20:18:35 -07009793 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009794
9795 tp->vlgrp = grp;
9796
9797 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9798 __tg3_set_rx_mode(dev);
9799
Michael Chan29315e82006-06-29 20:12:30 -07009800 if (netif_running(dev))
9801 tg3_netif_start(tp);
Michael Chan46966542007-07-11 19:47:19 -07009802
9803 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009804}
Linus Torvalds1da177e2005-04-16 15:20:36 -07009805#endif
9806
David S. Miller15f98502005-05-18 22:49:26 -07009807static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9808{
9809 struct tg3 *tp = netdev_priv(dev);
9810
9811 memcpy(ec, &tp->coal, sizeof(*ec));
9812 return 0;
9813}
9814
Michael Chand244c892005-07-05 14:42:33 -07009815static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9816{
9817 struct tg3 *tp = netdev_priv(dev);
9818 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9819 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9820
9821 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9822 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9823 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9824 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9825 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9826 }
9827
9828 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9829 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9830 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9831 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9832 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9833 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9834 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9835 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9836 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9837 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9838 return -EINVAL;
9839
9840 /* No rx interrupts will be generated if both are zero */
9841 if ((ec->rx_coalesce_usecs == 0) &&
9842 (ec->rx_max_coalesced_frames == 0))
9843 return -EINVAL;
9844
9845 /* No tx interrupts will be generated if both are zero */
9846 if ((ec->tx_coalesce_usecs == 0) &&
9847 (ec->tx_max_coalesced_frames == 0))
9848 return -EINVAL;
9849
9850 /* Only copy relevant parameters, ignore all others. */
9851 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9852 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9853 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9854 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9855 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9856 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9857 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9858 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9859 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9860
9861 if (netif_running(dev)) {
9862 tg3_full_lock(tp, 0);
9863 __tg3_set_coalesce(tp, &tp->coal);
9864 tg3_full_unlock(tp);
9865 }
9866 return 0;
9867}
9868
Jeff Garzik7282d492006-09-13 14:30:00 -04009869static const struct ethtool_ops tg3_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009870 .get_settings = tg3_get_settings,
9871 .set_settings = tg3_set_settings,
9872 .get_drvinfo = tg3_get_drvinfo,
9873 .get_regs_len = tg3_get_regs_len,
9874 .get_regs = tg3_get_regs,
9875 .get_wol = tg3_get_wol,
9876 .set_wol = tg3_set_wol,
9877 .get_msglevel = tg3_get_msglevel,
9878 .set_msglevel = tg3_set_msglevel,
9879 .nway_reset = tg3_nway_reset,
9880 .get_link = ethtool_op_get_link,
9881 .get_eeprom_len = tg3_get_eeprom_len,
9882 .get_eeprom = tg3_get_eeprom,
9883 .set_eeprom = tg3_set_eeprom,
9884 .get_ringparam = tg3_get_ringparam,
9885 .set_ringparam = tg3_set_ringparam,
9886 .get_pauseparam = tg3_get_pauseparam,
9887 .set_pauseparam = tg3_set_pauseparam,
9888 .get_rx_csum = tg3_get_rx_csum,
9889 .set_rx_csum = tg3_set_rx_csum,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009890 .set_tx_csum = tg3_set_tx_csum,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009891 .set_sg = ethtool_op_set_sg,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009892 .set_tso = tg3_set_tso,
Michael Chan4cafd3f2005-05-29 14:56:34 -07009893 .self_test = tg3_self_test,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009894 .get_strings = tg3_get_strings,
Michael Chan4009a932005-09-05 17:52:54 -07009895 .phys_id = tg3_phys_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009896 .get_ethtool_stats = tg3_get_ethtool_stats,
David S. Miller15f98502005-05-18 22:49:26 -07009897 .get_coalesce = tg3_get_coalesce,
Michael Chand244c892005-07-05 14:42:33 -07009898 .set_coalesce = tg3_set_coalesce,
Jeff Garzikb9f2c042007-10-03 18:07:32 -07009899 .get_sset_count = tg3_get_sset_count,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009900};
9901
9902static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9903{
Michael Chan1b277772006-03-20 22:27:48 -08009904 u32 cursize, val, magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009905
9906 tp->nvram_size = EEPROM_CHIP_SIZE;
9907
Michael Chan18201802006-03-20 22:29:15 -08009908 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009909 return;
9910
Michael Chanb16250e2006-09-27 16:10:14 -07009911 if ((magic != TG3_EEPROM_MAGIC) &&
9912 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9913 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009914 return;
9915
9916 /*
9917 * Size the chip by reading offsets at increasing powers of two.
9918 * When we encounter our validation signature, we know the addressing
9919 * has wrapped around, and thus have our chip size.
9920 */
Michael Chan1b277772006-03-20 22:27:48 -08009921 cursize = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009922
9923 while (cursize < tp->nvram_size) {
Michael Chan18201802006-03-20 22:29:15 -08009924 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009925 return;
9926
Michael Chan18201802006-03-20 22:29:15 -08009927 if (val == magic)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009928 break;
9929
9930 cursize <<= 1;
9931 }
9932
9933 tp->nvram_size = cursize;
9934}
Jeff Garzik6aa20a22006-09-13 13:24:59 -04009935
Linus Torvalds1da177e2005-04-16 15:20:36 -07009936static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9937{
9938 u32 val;
9939
Michael Chan18201802006-03-20 22:29:15 -08009940 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
Michael Chan1b277772006-03-20 22:27:48 -08009941 return;
9942
9943 /* Selfboot format */
Michael Chan18201802006-03-20 22:29:15 -08009944 if (val != TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -08009945 tg3_get_eeprom_size(tp);
9946 return;
9947 }
9948
Linus Torvalds1da177e2005-04-16 15:20:36 -07009949 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9950 if (val != 0) {
9951 tp->nvram_size = (val >> 16) * 1024;
9952 return;
9953 }
9954 }
Matt Carlsonfd1122a2008-05-02 16:48:36 -07009955 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009956}
9957
9958static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9959{
9960 u32 nvcfg1;
9961
9962 nvcfg1 = tr32(NVRAM_CFG1);
9963 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9964 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9965 }
9966 else {
9967 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9968 tw32(NVRAM_CFG1, nvcfg1);
9969 }
9970
Michael Chan4c987482005-09-05 17:52:38 -07009971 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
Michael Chana4e2b342005-10-26 15:46:52 -07009972 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009973 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9974 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9975 tp->nvram_jedecnum = JEDEC_ATMEL;
9976 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9977 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9978 break;
9979 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9980 tp->nvram_jedecnum = JEDEC_ATMEL;
9981 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9982 break;
9983 case FLASH_VENDOR_ATMEL_EEPROM:
9984 tp->nvram_jedecnum = JEDEC_ATMEL;
9985 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9986 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9987 break;
9988 case FLASH_VENDOR_ST:
9989 tp->nvram_jedecnum = JEDEC_ST;
9990 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9991 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9992 break;
9993 case FLASH_VENDOR_SAIFUN:
9994 tp->nvram_jedecnum = JEDEC_SAIFUN;
9995 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9996 break;
9997 case FLASH_VENDOR_SST_SMALL:
9998 case FLASH_VENDOR_SST_LARGE:
9999 tp->nvram_jedecnum = JEDEC_SST;
10000 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10001 break;
10002 }
10003 }
10004 else {
10005 tp->nvram_jedecnum = JEDEC_ATMEL;
10006 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10007 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10008 }
10009}
10010
Michael Chan361b4ac2005-04-21 17:11:21 -070010011static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10012{
10013 u32 nvcfg1;
10014
10015 nvcfg1 = tr32(NVRAM_CFG1);
10016
Michael Chane6af3012005-04-21 17:12:05 -070010017 /* NVRAM protection for TPM */
10018 if (nvcfg1 & (1 << 27))
10019 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10020
Michael Chan361b4ac2005-04-21 17:11:21 -070010021 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10022 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10023 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10024 tp->nvram_jedecnum = JEDEC_ATMEL;
10025 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10026 break;
10027 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10028 tp->nvram_jedecnum = JEDEC_ATMEL;
10029 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10030 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10031 break;
10032 case FLASH_5752VENDOR_ST_M45PE10:
10033 case FLASH_5752VENDOR_ST_M45PE20:
10034 case FLASH_5752VENDOR_ST_M45PE40:
10035 tp->nvram_jedecnum = JEDEC_ST;
10036 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10037 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10038 break;
10039 }
10040
10041 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10042 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10043 case FLASH_5752PAGE_SIZE_256:
10044 tp->nvram_pagesize = 256;
10045 break;
10046 case FLASH_5752PAGE_SIZE_512:
10047 tp->nvram_pagesize = 512;
10048 break;
10049 case FLASH_5752PAGE_SIZE_1K:
10050 tp->nvram_pagesize = 1024;
10051 break;
10052 case FLASH_5752PAGE_SIZE_2K:
10053 tp->nvram_pagesize = 2048;
10054 break;
10055 case FLASH_5752PAGE_SIZE_4K:
10056 tp->nvram_pagesize = 4096;
10057 break;
10058 case FLASH_5752PAGE_SIZE_264:
10059 tp->nvram_pagesize = 264;
10060 break;
10061 }
10062 }
10063 else {
10064 /* For eeprom, set pagesize to maximum eeprom size */
10065 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10066
10067 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10068 tw32(NVRAM_CFG1, nvcfg1);
10069 }
10070}
10071
Michael Chand3c7b882006-03-23 01:28:25 -080010072static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10073{
Matt Carlson989a9d22007-05-05 11:51:05 -070010074 u32 nvcfg1, protect = 0;
Michael Chand3c7b882006-03-23 01:28:25 -080010075
10076 nvcfg1 = tr32(NVRAM_CFG1);
10077
10078 /* NVRAM protection for TPM */
Matt Carlson989a9d22007-05-05 11:51:05 -070010079 if (nvcfg1 & (1 << 27)) {
Michael Chand3c7b882006-03-23 01:28:25 -080010080 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
Matt Carlson989a9d22007-05-05 11:51:05 -070010081 protect = 1;
10082 }
Michael Chand3c7b882006-03-23 01:28:25 -080010083
Matt Carlson989a9d22007-05-05 11:51:05 -070010084 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10085 switch (nvcfg1) {
Michael Chand3c7b882006-03-23 01:28:25 -080010086 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10087 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10088 case FLASH_5755VENDOR_ATMEL_FLASH_3:
Matt Carlson70b65a22007-07-11 19:48:50 -070010089 case FLASH_5755VENDOR_ATMEL_FLASH_5:
Michael Chand3c7b882006-03-23 01:28:25 -080010090 tp->nvram_jedecnum = JEDEC_ATMEL;
10091 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10092 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10093 tp->nvram_pagesize = 264;
Matt Carlson70b65a22007-07-11 19:48:50 -070010094 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10095 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010096 tp->nvram_size = (protect ? 0x3e200 :
10097 TG3_NVRAM_SIZE_512KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010098 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010099 tp->nvram_size = (protect ? 0x1f200 :
10100 TG3_NVRAM_SIZE_256KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010101 else
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010102 tp->nvram_size = (protect ? 0x1f200 :
10103 TG3_NVRAM_SIZE_128KB);
Michael Chand3c7b882006-03-23 01:28:25 -080010104 break;
10105 case FLASH_5752VENDOR_ST_M45PE10:
10106 case FLASH_5752VENDOR_ST_M45PE20:
10107 case FLASH_5752VENDOR_ST_M45PE40:
10108 tp->nvram_jedecnum = JEDEC_ST;
10109 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10110 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10111 tp->nvram_pagesize = 256;
Matt Carlson989a9d22007-05-05 11:51:05 -070010112 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010113 tp->nvram_size = (protect ?
10114 TG3_NVRAM_SIZE_64KB :
10115 TG3_NVRAM_SIZE_128KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010116 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010117 tp->nvram_size = (protect ?
10118 TG3_NVRAM_SIZE_64KB :
10119 TG3_NVRAM_SIZE_256KB);
Matt Carlson989a9d22007-05-05 11:51:05 -070010120 else
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010121 tp->nvram_size = (protect ?
10122 TG3_NVRAM_SIZE_128KB :
10123 TG3_NVRAM_SIZE_512KB);
Michael Chand3c7b882006-03-23 01:28:25 -080010124 break;
10125 }
10126}
10127
Michael Chan1b277772006-03-20 22:27:48 -080010128static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10129{
10130 u32 nvcfg1;
10131
10132 nvcfg1 = tr32(NVRAM_CFG1);
10133
10134 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10135 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10136 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10137 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10138 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10139 tp->nvram_jedecnum = JEDEC_ATMEL;
10140 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10141 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10142
10143 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10144 tw32(NVRAM_CFG1, nvcfg1);
10145 break;
10146 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10147 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10148 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10149 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10150 tp->nvram_jedecnum = JEDEC_ATMEL;
10151 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10152 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10153 tp->nvram_pagesize = 264;
10154 break;
10155 case FLASH_5752VENDOR_ST_M45PE10:
10156 case FLASH_5752VENDOR_ST_M45PE20:
10157 case FLASH_5752VENDOR_ST_M45PE40:
10158 tp->nvram_jedecnum = JEDEC_ST;
10159 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10160 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10161 tp->nvram_pagesize = 256;
10162 break;
10163 }
10164}
10165
Matt Carlson6b91fa02007-10-10 18:01:09 -070010166static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10167{
10168 u32 nvcfg1, protect = 0;
10169
10170 nvcfg1 = tr32(NVRAM_CFG1);
10171
10172 /* NVRAM protection for TPM */
10173 if (nvcfg1 & (1 << 27)) {
10174 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10175 protect = 1;
10176 }
10177
10178 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10179 switch (nvcfg1) {
10180 case FLASH_5761VENDOR_ATMEL_ADB021D:
10181 case FLASH_5761VENDOR_ATMEL_ADB041D:
10182 case FLASH_5761VENDOR_ATMEL_ADB081D:
10183 case FLASH_5761VENDOR_ATMEL_ADB161D:
10184 case FLASH_5761VENDOR_ATMEL_MDB021D:
10185 case FLASH_5761VENDOR_ATMEL_MDB041D:
10186 case FLASH_5761VENDOR_ATMEL_MDB081D:
10187 case FLASH_5761VENDOR_ATMEL_MDB161D:
10188 tp->nvram_jedecnum = JEDEC_ATMEL;
10189 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10190 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10191 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10192 tp->nvram_pagesize = 256;
10193 break;
10194 case FLASH_5761VENDOR_ST_A_M45PE20:
10195 case FLASH_5761VENDOR_ST_A_M45PE40:
10196 case FLASH_5761VENDOR_ST_A_M45PE80:
10197 case FLASH_5761VENDOR_ST_A_M45PE16:
10198 case FLASH_5761VENDOR_ST_M_M45PE20:
10199 case FLASH_5761VENDOR_ST_M_M45PE40:
10200 case FLASH_5761VENDOR_ST_M_M45PE80:
10201 case FLASH_5761VENDOR_ST_M_M45PE16:
10202 tp->nvram_jedecnum = JEDEC_ST;
10203 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10204 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10205 tp->nvram_pagesize = 256;
10206 break;
10207 }
10208
10209 if (protect) {
10210 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10211 } else {
10212 switch (nvcfg1) {
10213 case FLASH_5761VENDOR_ATMEL_ADB161D:
10214 case FLASH_5761VENDOR_ATMEL_MDB161D:
10215 case FLASH_5761VENDOR_ST_A_M45PE16:
10216 case FLASH_5761VENDOR_ST_M_M45PE16:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010217 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010218 break;
10219 case FLASH_5761VENDOR_ATMEL_ADB081D:
10220 case FLASH_5761VENDOR_ATMEL_MDB081D:
10221 case FLASH_5761VENDOR_ST_A_M45PE80:
10222 case FLASH_5761VENDOR_ST_M_M45PE80:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010223 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010224 break;
10225 case FLASH_5761VENDOR_ATMEL_ADB041D:
10226 case FLASH_5761VENDOR_ATMEL_MDB041D:
10227 case FLASH_5761VENDOR_ST_A_M45PE40:
10228 case FLASH_5761VENDOR_ST_M_M45PE40:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010229 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010230 break;
10231 case FLASH_5761VENDOR_ATMEL_ADB021D:
10232 case FLASH_5761VENDOR_ATMEL_MDB021D:
10233 case FLASH_5761VENDOR_ST_A_M45PE20:
10234 case FLASH_5761VENDOR_ST_M_M45PE20:
Matt Carlsonfd1122a2008-05-02 16:48:36 -070010235 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
Matt Carlson6b91fa02007-10-10 18:01:09 -070010236 break;
10237 }
10238 }
10239}
10240
Michael Chanb5d37722006-09-27 16:06:21 -070010241static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10242{
10243 tp->nvram_jedecnum = JEDEC_ATMEL;
10244 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10245 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10246}
10247
Linus Torvalds1da177e2005-04-16 15:20:36 -070010248/* Chips other than 5700/5701 use the NVRAM for fetching info. */
10249static void __devinit tg3_nvram_init(struct tg3 *tp)
10250{
Linus Torvalds1da177e2005-04-16 15:20:36 -070010251 tw32_f(GRC_EEPROM_ADDR,
10252 (EEPROM_ADDR_FSM_RESET |
10253 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10254 EEPROM_ADDR_CLKPERD_SHIFT)));
10255
Michael Chan9d57f012006-12-07 00:23:25 -080010256 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010257
10258 /* Enable seeprom accesses. */
10259 tw32_f(GRC_LOCAL_CTRL,
10260 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10261 udelay(100);
10262
10263 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10264 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10265 tp->tg3_flags |= TG3_FLAG_NVRAM;
10266
Michael Chanec41c7d2006-01-17 02:40:55 -080010267 if (tg3_nvram_lock(tp)) {
10268 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10269 "tg3_nvram_init failed.\n", tp->dev->name);
10270 return;
10271 }
Michael Chane6af3012005-04-21 17:12:05 -070010272 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010273
Matt Carlson989a9d22007-05-05 11:51:05 -070010274 tp->nvram_size = 0;
10275
Michael Chan361b4ac2005-04-21 17:11:21 -070010276 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10277 tg3_get_5752_nvram_info(tp);
Michael Chand3c7b882006-03-23 01:28:25 -080010278 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10279 tg3_get_5755_nvram_info(tp);
Matt Carlsond30cdd22007-10-07 23:28:35 -070010280 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10281 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
Michael Chan1b277772006-03-20 22:27:48 -080010282 tg3_get_5787_nvram_info(tp);
Matt Carlson6b91fa02007-10-10 18:01:09 -070010283 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10284 tg3_get_5761_nvram_info(tp);
Michael Chanb5d37722006-09-27 16:06:21 -070010285 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10286 tg3_get_5906_nvram_info(tp);
Michael Chan361b4ac2005-04-21 17:11:21 -070010287 else
10288 tg3_get_nvram_info(tp);
10289
Matt Carlson989a9d22007-05-05 11:51:05 -070010290 if (tp->nvram_size == 0)
10291 tg3_get_nvram_size(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010292
Michael Chane6af3012005-04-21 17:12:05 -070010293 tg3_disable_nvram_access(tp);
Michael Chan381291b2005-12-13 21:08:21 -080010294 tg3_nvram_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010295
10296 } else {
10297 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10298
10299 tg3_get_eeprom_size(tp);
10300 }
10301}
10302
10303static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10304 u32 offset, u32 *val)
10305{
10306 u32 tmp;
10307 int i;
10308
10309 if (offset > EEPROM_ADDR_ADDR_MASK ||
10310 (offset % 4) != 0)
10311 return -EINVAL;
10312
10313 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10314 EEPROM_ADDR_DEVID_MASK |
10315 EEPROM_ADDR_READ);
10316 tw32(GRC_EEPROM_ADDR,
10317 tmp |
10318 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10319 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10320 EEPROM_ADDR_ADDR_MASK) |
10321 EEPROM_ADDR_READ | EEPROM_ADDR_START);
10322
Michael Chan9d57f012006-12-07 00:23:25 -080010323 for (i = 0; i < 1000; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010324 tmp = tr32(GRC_EEPROM_ADDR);
10325
10326 if (tmp & EEPROM_ADDR_COMPLETE)
10327 break;
Michael Chan9d57f012006-12-07 00:23:25 -080010328 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010329 }
10330 if (!(tmp & EEPROM_ADDR_COMPLETE))
10331 return -EBUSY;
10332
10333 *val = tr32(GRC_EEPROM_DATA);
10334 return 0;
10335}
10336
10337#define NVRAM_CMD_TIMEOUT 10000
10338
10339static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10340{
10341 int i;
10342
10343 tw32(NVRAM_CMD, nvram_cmd);
10344 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10345 udelay(10);
10346 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10347 udelay(10);
10348 break;
10349 }
10350 }
10351 if (i == NVRAM_CMD_TIMEOUT) {
10352 return -EBUSY;
10353 }
10354 return 0;
10355}
10356
Michael Chan18201802006-03-20 22:29:15 -080010357static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10358{
10359 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10360 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10361 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
Matt Carlson6b91fa02007-10-10 18:01:09 -070010362 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
Michael Chan18201802006-03-20 22:29:15 -080010363 (tp->nvram_jedecnum == JEDEC_ATMEL))
10364
10365 addr = ((addr / tp->nvram_pagesize) <<
10366 ATMEL_AT45DB0X1B_PAGE_POS) +
10367 (addr % tp->nvram_pagesize);
10368
10369 return addr;
10370}
10371
Michael Chanc4e65752006-03-20 22:29:32 -080010372static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10373{
10374 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10375 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10376 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
Matt Carlson6b91fa02007-10-10 18:01:09 -070010377 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
Michael Chanc4e65752006-03-20 22:29:32 -080010378 (tp->nvram_jedecnum == JEDEC_ATMEL))
10379
10380 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10381 tp->nvram_pagesize) +
10382 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10383
10384 return addr;
10385}
10386
Linus Torvalds1da177e2005-04-16 15:20:36 -070010387static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10388{
10389 int ret;
10390
Linus Torvalds1da177e2005-04-16 15:20:36 -070010391 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10392 return tg3_nvram_read_using_eeprom(tp, offset, val);
10393
Michael Chan18201802006-03-20 22:29:15 -080010394 offset = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010395
10396 if (offset > NVRAM_ADDR_MSK)
10397 return -EINVAL;
10398
Michael Chanec41c7d2006-01-17 02:40:55 -080010399 ret = tg3_nvram_lock(tp);
10400 if (ret)
10401 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010402
Michael Chane6af3012005-04-21 17:12:05 -070010403 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010404
10405 tw32(NVRAM_ADDR, offset);
10406 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10407 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10408
10409 if (ret == 0)
10410 *val = swab32(tr32(NVRAM_RDDATA));
10411
Michael Chane6af3012005-04-21 17:12:05 -070010412 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010413
Michael Chan381291b2005-12-13 21:08:21 -080010414 tg3_nvram_unlock(tp);
10415
Linus Torvalds1da177e2005-04-16 15:20:36 -070010416 return ret;
10417}
10418
Al Virob9fc7dc2007-12-17 22:59:57 -080010419static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10420{
10421 u32 v;
10422 int res = tg3_nvram_read(tp, offset, &v);
10423 if (!res)
10424 *val = cpu_to_le32(v);
10425 return res;
10426}
10427
Michael Chan18201802006-03-20 22:29:15 -080010428static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10429{
10430 int err;
10431 u32 tmp;
10432
10433 err = tg3_nvram_read(tp, offset, &tmp);
10434 *val = swab32(tmp);
10435 return err;
10436}
10437
Linus Torvalds1da177e2005-04-16 15:20:36 -070010438static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10439 u32 offset, u32 len, u8 *buf)
10440{
10441 int i, j, rc = 0;
10442 u32 val;
10443
10444 for (i = 0; i < len; i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080010445 u32 addr;
10446 __le32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010447
10448 addr = offset + i;
10449
10450 memcpy(&data, buf + i, 4);
10451
Al Virob9fc7dc2007-12-17 22:59:57 -080010452 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070010453
10454 val = tr32(GRC_EEPROM_ADDR);
10455 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10456
10457 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10458 EEPROM_ADDR_READ);
10459 tw32(GRC_EEPROM_ADDR, val |
10460 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10461 (addr & EEPROM_ADDR_ADDR_MASK) |
10462 EEPROM_ADDR_START |
10463 EEPROM_ADDR_WRITE);
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010464
Michael Chan9d57f012006-12-07 00:23:25 -080010465 for (j = 0; j < 1000; j++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010466 val = tr32(GRC_EEPROM_ADDR);
10467
10468 if (val & EEPROM_ADDR_COMPLETE)
10469 break;
Michael Chan9d57f012006-12-07 00:23:25 -080010470 msleep(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010471 }
10472 if (!(val & EEPROM_ADDR_COMPLETE)) {
10473 rc = -EBUSY;
10474 break;
10475 }
10476 }
10477
10478 return rc;
10479}
10480
10481/* offset and length are dword aligned */
10482static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10483 u8 *buf)
10484{
10485 int ret = 0;
10486 u32 pagesize = tp->nvram_pagesize;
10487 u32 pagemask = pagesize - 1;
10488 u32 nvram_cmd;
10489 u8 *tmp;
10490
10491 tmp = kmalloc(pagesize, GFP_KERNEL);
10492 if (tmp == NULL)
10493 return -ENOMEM;
10494
10495 while (len) {
10496 int j;
Michael Chane6af3012005-04-21 17:12:05 -070010497 u32 phy_addr, page_off, size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010498
10499 phy_addr = offset & ~pagemask;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040010500
Linus Torvalds1da177e2005-04-16 15:20:36 -070010501 for (j = 0; j < pagesize; j += 4) {
Al Viro286e3102007-12-17 23:00:31 -080010502 if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
Al Virob9fc7dc2007-12-17 22:59:57 -080010503 (__le32 *) (tmp + j))))
Linus Torvalds1da177e2005-04-16 15:20:36 -070010504 break;
10505 }
10506 if (ret)
10507 break;
10508
10509 page_off = offset & pagemask;
10510 size = pagesize;
10511 if (len < size)
10512 size = len;
10513
10514 len -= size;
10515
10516 memcpy(tmp + page_off, buf, size);
10517
10518 offset = offset + (pagesize - page_off);
10519
Michael Chane6af3012005-04-21 17:12:05 -070010520 tg3_enable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010521
10522 /*
10523 * Before we can erase the flash page, we need
10524 * to issue a special "write enable" command.
10525 */
10526 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10527
10528 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10529 break;
10530
10531 /* Erase the target page */
10532 tw32(NVRAM_ADDR, phy_addr);
10533
10534 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10535 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10536
10537 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10538 break;
10539
10540 /* Issue another write enable to start the write. */
10541 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10542
10543 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10544 break;
10545
10546 for (j = 0; j < pagesize; j += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080010547 __be32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010548
Al Virob9fc7dc2007-12-17 22:59:57 -080010549 data = *((__be32 *) (tmp + j));
10550 /* swab32(le32_to_cpu(data)), actually */
10551 tw32(NVRAM_WRDATA, be32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070010552
10553 tw32(NVRAM_ADDR, phy_addr + j);
10554
10555 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10556 NVRAM_CMD_WR;
10557
10558 if (j == 0)
10559 nvram_cmd |= NVRAM_CMD_FIRST;
10560 else if (j == (pagesize - 4))
10561 nvram_cmd |= NVRAM_CMD_LAST;
10562
10563 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10564 break;
10565 }
10566 if (ret)
10567 break;
10568 }
10569
10570 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10571 tg3_nvram_exec_cmd(tp, nvram_cmd);
10572
10573 kfree(tmp);
10574
10575 return ret;
10576}
10577
10578/* offset and length are dword aligned */
10579static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10580 u8 *buf)
10581{
10582 int i, ret = 0;
10583
10584 for (i = 0; i < len; i += 4, offset += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080010585 u32 page_off, phy_addr, nvram_cmd;
10586 __be32 data;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010587
10588 memcpy(&data, buf + i, 4);
Al Virob9fc7dc2007-12-17 22:59:57 -080010589 tw32(NVRAM_WRDATA, be32_to_cpu(data));
Linus Torvalds1da177e2005-04-16 15:20:36 -070010590
10591 page_off = offset % tp->nvram_pagesize;
10592
Michael Chan18201802006-03-20 22:29:15 -080010593 phy_addr = tg3_nvram_phys_addr(tp, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010594
10595 tw32(NVRAM_ADDR, phy_addr);
10596
10597 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10598
10599 if ((page_off == 0) || (i == 0))
10600 nvram_cmd |= NVRAM_CMD_FIRST;
Michael Chanf6d9a252006-04-29 19:00:24 -070010601 if (page_off == (tp->nvram_pagesize - 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -070010602 nvram_cmd |= NVRAM_CMD_LAST;
10603
10604 if (i == (len - 4))
10605 nvram_cmd |= NVRAM_CMD_LAST;
10606
Michael Chan4c987482005-09-05 17:52:38 -070010607 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
Michael Chanaf36e6b2006-03-23 01:28:06 -080010608 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
Michael Chan1b277772006-03-20 22:27:48 -080010609 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
Matt Carlsond30cdd22007-10-07 23:28:35 -070010610 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
Matt Carlson9936bcf2007-10-10 18:03:07 -070010611 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
Michael Chan4c987482005-09-05 17:52:38 -070010612 (tp->nvram_jedecnum == JEDEC_ST) &&
10613 (nvram_cmd & NVRAM_CMD_FIRST)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010614
10615 if ((ret = tg3_nvram_exec_cmd(tp,
10616 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10617 NVRAM_CMD_DONE)))
10618
10619 break;
10620 }
10621 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10622 /* We always do complete word writes to eeprom. */
10623 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10624 }
10625
10626 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10627 break;
10628 }
10629 return ret;
10630}
10631
10632/* offset and length are dword aligned */
10633static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10634{
10635 int ret;
10636
Linus Torvalds1da177e2005-04-16 15:20:36 -070010637 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -070010638 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10639 ~GRC_LCLCTRL_GPIO_OUTPUT1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010640 udelay(40);
10641 }
10642
10643 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10644 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10645 }
10646 else {
10647 u32 grc_mode;
10648
Michael Chanec41c7d2006-01-17 02:40:55 -080010649 ret = tg3_nvram_lock(tp);
10650 if (ret)
10651 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010652
Michael Chane6af3012005-04-21 17:12:05 -070010653 tg3_enable_nvram_access(tp);
10654 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10655 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
Linus Torvalds1da177e2005-04-16 15:20:36 -070010656 tw32(NVRAM_WRITE1, 0x406);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010657
10658 grc_mode = tr32(GRC_MODE);
10659 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10660
10661 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10662 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10663
10664 ret = tg3_nvram_write_block_buffered(tp, offset, len,
10665 buf);
10666 }
10667 else {
10668 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10669 buf);
10670 }
10671
10672 grc_mode = tr32(GRC_MODE);
10673 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10674
Michael Chane6af3012005-04-21 17:12:05 -070010675 tg3_disable_nvram_access(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010676 tg3_nvram_unlock(tp);
10677 }
10678
10679 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
Michael Chan314fba32005-04-21 17:07:04 -070010680 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010681 udelay(40);
10682 }
10683
10684 return ret;
10685}
10686
10687struct subsys_tbl_ent {
10688 u16 subsys_vendor, subsys_devid;
10689 u32 phy_id;
10690};
10691
10692static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10693 /* Broadcom boards. */
10694 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10695 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10696 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10697 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
10698 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10699 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10700 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
10701 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10702 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10703 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10704 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10705
10706 /* 3com boards. */
10707 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10708 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10709 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
10710 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10711 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10712
10713 /* DELL boards. */
10714 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10715 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10716 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10717 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10718
10719 /* Compaq boards. */
10720 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10721 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10722 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
10723 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10724 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10725
10726 /* IBM boards. */
10727 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10728};
10729
10730static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10731{
10732 int i;
10733
10734 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10735 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10736 tp->pdev->subsystem_vendor) &&
10737 (subsys_id_to_phy_id[i].subsys_devid ==
10738 tp->pdev->subsystem_device))
10739 return &subsys_id_to_phy_id[i];
10740 }
10741 return NULL;
10742}
10743
Michael Chan7d0c41e2005-04-21 17:06:20 -070010744static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010745{
Linus Torvalds1da177e2005-04-16 15:20:36 -070010746 u32 val;
Michael Chancaf636c72006-03-22 01:05:31 -080010747 u16 pmcsr;
10748
10749 /* On some early chips the SRAM cannot be accessed in D3hot state,
10750 * so need make sure we're in D0.
10751 */
10752 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10753 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10754 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10755 msleep(1);
Michael Chan7d0c41e2005-04-21 17:06:20 -070010756
10757 /* Make sure register accesses (indirect or otherwise)
10758 * will function correctly.
10759 */
10760 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10761 tp->misc_host_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010762
David S. Millerf49639e2006-06-09 11:58:36 -070010763 /* The memory arbiter has to be enabled in order for SRAM accesses
10764 * to succeed. Normally on powerup the tg3 chip firmware will make
10765 * sure it is enabled, but other entities such as system netboot
10766 * code might disable it.
10767 */
10768 val = tr32(MEMARB_MODE);
10769 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10770
Linus Torvalds1da177e2005-04-16 15:20:36 -070010771 tp->phy_id = PHY_ID_INVALID;
Michael Chan7d0c41e2005-04-21 17:06:20 -070010772 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10773
Gary Zambranoa85feb82007-05-05 11:52:19 -070010774 /* Assume an onboard device and WOL capable by default. */
10775 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
David S. Miller72b845e2006-03-14 14:11:48 -080010776
Michael Chanb5d37722006-09-27 16:06:21 -070010777 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan9d26e212006-12-07 00:21:14 -080010778 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
Michael Chanb5d37722006-09-27 16:06:21 -070010779 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080010780 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10781 }
Matt Carlson0527ba32007-10-10 18:03:30 -070010782 val = tr32(VCPU_CFGSHDW);
10783 if (val & VCPU_CFGSHDW_ASPM_DBNC)
Matt Carlson8ed5d972007-05-07 00:25:49 -070010784 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
Matt Carlson0527ba32007-10-10 18:03:30 -070010785 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10786 (val & VCPU_CFGSHDW_WOL_MAGPKT))
10787 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
Michael Chanb5d37722006-09-27 16:06:21 -070010788 return;
10789 }
10790
Linus Torvalds1da177e2005-04-16 15:20:36 -070010791 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10792 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10793 u32 nic_cfg, led_cfg;
Michael Chan7d0c41e2005-04-21 17:06:20 -070010794 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10795 int eeprom_phy_serdes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010796
10797 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10798 tp->nic_sram_data_cfg = nic_cfg;
10799
10800 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10801 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10802 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10803 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10804 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10805 (ver > 0) && (ver < 0x100))
10806 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10807
Linus Torvalds1da177e2005-04-16 15:20:36 -070010808 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10809 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10810 eeprom_phy_serdes = 1;
10811
10812 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10813 if (nic_phy_id != 0) {
10814 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10815 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10816
10817 eeprom_phy_id = (id1 >> 16) << 10;
10818 eeprom_phy_id |= (id2 & 0xfc00) << 16;
10819 eeprom_phy_id |= (id2 & 0x03ff) << 0;
10820 } else
10821 eeprom_phy_id = 0;
10822
Michael Chan7d0c41e2005-04-21 17:06:20 -070010823 tp->phy_id = eeprom_phy_id;
Michael Chan747e8f82005-07-25 12:33:22 -070010824 if (eeprom_phy_serdes) {
Michael Chana4e2b342005-10-26 15:46:52 -070010825 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
Michael Chan747e8f82005-07-25 12:33:22 -070010826 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10827 else
10828 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10829 }
Michael Chan7d0c41e2005-04-21 17:06:20 -070010830
John W. Linvillecbf46852005-04-21 17:01:29 -070010831 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010832 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10833 SHASTA_EXT_LED_MODE_MASK);
John W. Linvillecbf46852005-04-21 17:01:29 -070010834 else
Linus Torvalds1da177e2005-04-16 15:20:36 -070010835 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10836
10837 switch (led_cfg) {
10838 default:
10839 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10840 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10841 break;
10842
10843 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10844 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10845 break;
10846
10847 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10848 tp->led_ctrl = LED_CTRL_MODE_MAC;
Michael Chan9ba27792005-06-06 15:16:20 -070010849
10850 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10851 * read on some older 5700/5701 bootcode.
10852 */
10853 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10854 ASIC_REV_5700 ||
10855 GET_ASIC_REV(tp->pci_chip_rev_id) ==
10856 ASIC_REV_5701)
10857 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10858
Linus Torvalds1da177e2005-04-16 15:20:36 -070010859 break;
10860
10861 case SHASTA_EXT_LED_SHARED:
10862 tp->led_ctrl = LED_CTRL_MODE_SHARED;
10863 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10864 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10865 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10866 LED_CTRL_MODE_PHY_2);
10867 break;
10868
10869 case SHASTA_EXT_LED_MAC:
10870 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10871 break;
10872
10873 case SHASTA_EXT_LED_COMBO:
10874 tp->led_ctrl = LED_CTRL_MODE_COMBO;
10875 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10876 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10877 LED_CTRL_MODE_PHY_2);
10878 break;
10879
10880 };
10881
10882 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10883 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10884 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10885 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10886
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010887 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
10888 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
Matt Carlson5f608912007-11-12 21:17:07 -080010889
Michael Chan9d26e212006-12-07 00:21:14 -080010890 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010891 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080010892 if ((tp->pdev->subsystem_vendor ==
10893 PCI_VENDOR_ID_ARIMA) &&
10894 (tp->pdev->subsystem_device == 0x205a ||
10895 tp->pdev->subsystem_device == 0x2063))
10896 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10897 } else {
David S. Millerf49639e2006-06-09 11:58:36 -070010898 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
Michael Chan9d26e212006-12-07 00:21:14 -080010899 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10900 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010901
10902 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10903 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
John W. Linvillecbf46852005-04-21 17:01:29 -070010904 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010905 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10906 }
Matt Carlson0d3031d2007-10-10 18:02:43 -070010907 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10908 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
Gary Zambranoa85feb82007-05-05 11:52:19 -070010909 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10910 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10911 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010912
Matt Carlson0527ba32007-10-10 18:03:30 -070010913 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10914 nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10915 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10916
Linus Torvalds1da177e2005-04-16 15:20:36 -070010917 if (cfg2 & (1 << 17))
10918 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10919
10920 /* serdes signal pre-emphasis in register 0x590 set by */
10921 /* bootcode if bit 18 is set */
10922 if (cfg2 & (1 << 18))
10923 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
Matt Carlson8ed5d972007-05-07 00:25:49 -070010924
10925 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10926 u32 cfg3;
10927
10928 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10929 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10930 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10931 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010932 }
Michael Chan7d0c41e2005-04-21 17:06:20 -070010933}
10934
Matt Carlsonb2a5c192008-04-03 21:44:44 -070010935static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
10936{
10937 int i;
10938 u32 val;
10939
10940 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
10941 tw32(OTP_CTRL, cmd);
10942
10943 /* Wait for up to 1 ms for command to execute. */
10944 for (i = 0; i < 100; i++) {
10945 val = tr32(OTP_STATUS);
10946 if (val & OTP_STATUS_CMD_DONE)
10947 break;
10948 udelay(10);
10949 }
10950
10951 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
10952}
10953
10954/* Read the gphy configuration from the OTP region of the chip. The gphy
10955 * configuration is a 32-bit value that straddles the alignment boundary.
10956 * We do two 32-bit reads and then shift and merge the results.
10957 */
10958static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
10959{
10960 u32 bhalf_otp, thalf_otp;
10961
10962 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
10963
10964 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
10965 return 0;
10966
10967 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
10968
10969 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
10970 return 0;
10971
10972 thalf_otp = tr32(OTP_READ_DATA);
10973
10974 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
10975
10976 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
10977 return 0;
10978
10979 bhalf_otp = tr32(OTP_READ_DATA);
10980
10981 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
10982}
10983
Michael Chan7d0c41e2005-04-21 17:06:20 -070010984static int __devinit tg3_phy_probe(struct tg3 *tp)
10985{
10986 u32 hw_phy_id_1, hw_phy_id_2;
10987 u32 hw_phy_id, hw_phy_id_masked;
10988 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010989
10990 /* Reading the PHY ID register can conflict with ASF
10991 * firwmare access to the PHY hardware.
10992 */
10993 err = 0;
Matt Carlson0d3031d2007-10-10 18:02:43 -070010994 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10995 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010996 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10997 } else {
10998 /* Now read the physical PHY_ID from the chip and verify
10999 * that it is sane. If it doesn't look good, we fall back
11000 * to either the hard-coded table based PHY_ID and failing
11001 * that the value found in the eeprom area.
11002 */
11003 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11004 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11005
11006 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
11007 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11008 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
11009
11010 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11011 }
11012
11013 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11014 tp->phy_id = hw_phy_id;
11015 if (hw_phy_id_masked == PHY_ID_BCM8002)
11016 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
Michael Chanda6b2d02005-08-19 12:54:29 -070011017 else
11018 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011019 } else {
Michael Chan7d0c41e2005-04-21 17:06:20 -070011020 if (tp->phy_id != PHY_ID_INVALID) {
11021 /* Do nothing, phy ID already set up in
11022 * tg3_get_eeprom_hw_cfg().
11023 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070011024 } else {
11025 struct subsys_tbl_ent *p;
11026
11027 /* No eeprom signature? Try the hardcoded
11028 * subsys device table.
11029 */
11030 p = lookup_by_subsys(tp);
11031 if (!p)
11032 return -ENODEV;
11033
11034 tp->phy_id = p->phy_id;
11035 if (!tp->phy_id ||
11036 tp->phy_id == PHY_ID_BCM8002)
11037 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11038 }
11039 }
11040
Michael Chan747e8f82005-07-25 12:33:22 -070011041 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
Matt Carlson0d3031d2007-10-10 18:02:43 -070011042 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070011043 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
Michael Chan3600d912006-12-07 00:21:48 -080011044 u32 bmsr, adv_reg, tg3_ctrl, mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011045
11046 tg3_readphy(tp, MII_BMSR, &bmsr);
11047 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11048 (bmsr & BMSR_LSTATUS))
11049 goto skip_phy_reset;
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011050
Linus Torvalds1da177e2005-04-16 15:20:36 -070011051 err = tg3_phy_reset(tp);
11052 if (err)
11053 return err;
11054
11055 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11056 ADVERTISE_100HALF | ADVERTISE_100FULL |
11057 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11058 tg3_ctrl = 0;
11059 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11060 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11061 MII_TG3_CTRL_ADV_1000_FULL);
11062 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11063 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11064 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11065 MII_TG3_CTRL_ENABLE_AS_MASTER);
11066 }
11067
Michael Chan3600d912006-12-07 00:21:48 -080011068 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11069 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11070 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11071 if (!tg3_copper_is_advertising_all(tp, mask)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011072 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11073
11074 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11075 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11076
11077 tg3_writephy(tp, MII_BMCR,
11078 BMCR_ANENABLE | BMCR_ANRESTART);
11079 }
11080 tg3_phy_set_wirespeed(tp);
11081
11082 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11083 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11084 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11085 }
11086
11087skip_phy_reset:
11088 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11089 err = tg3_init_5401phy_dsp(tp);
11090 if (err)
11091 return err;
11092 }
11093
11094 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11095 err = tg3_init_5401phy_dsp(tp);
11096 }
11097
Michael Chan747e8f82005-07-25 12:33:22 -070011098 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011099 tp->link_config.advertising =
11100 (ADVERTISED_1000baseT_Half |
11101 ADVERTISED_1000baseT_Full |
11102 ADVERTISED_Autoneg |
11103 ADVERTISED_FIBRE);
11104 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11105 tp->link_config.advertising &=
11106 ~(ADVERTISED_1000baseT_Half |
11107 ADVERTISED_1000baseT_Full);
11108
11109 return err;
11110}
11111
11112static void __devinit tg3_read_partno(struct tg3 *tp)
11113{
11114 unsigned char vpd_data[256];
Michael Chanaf2c6a42006-11-07 14:57:51 -080011115 unsigned int i;
Michael Chan1b277772006-03-20 22:27:48 -080011116 u32 magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011117
Michael Chan18201802006-03-20 22:29:15 -080011118 if (tg3_nvram_read_swab(tp, 0x0, &magic))
David S. Millerf49639e2006-06-09 11:58:36 -070011119 goto out_not_found;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011120
Michael Chan18201802006-03-20 22:29:15 -080011121 if (magic == TG3_EEPROM_MAGIC) {
Michael Chan1b277772006-03-20 22:27:48 -080011122 for (i = 0; i < 256; i += 4) {
11123 u32 tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011124
Michael Chan1b277772006-03-20 22:27:48 -080011125 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11126 goto out_not_found;
11127
11128 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
11129 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
11130 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11131 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11132 }
11133 } else {
11134 int vpd_cap;
11135
11136 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11137 for (i = 0; i < 256; i += 4) {
11138 u32 tmp, j = 0;
Al Virob9fc7dc2007-12-17 22:59:57 -080011139 __le32 v;
Michael Chan1b277772006-03-20 22:27:48 -080011140 u16 tmp16;
11141
11142 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11143 i);
11144 while (j++ < 100) {
11145 pci_read_config_word(tp->pdev, vpd_cap +
11146 PCI_VPD_ADDR, &tmp16);
11147 if (tmp16 & 0x8000)
11148 break;
11149 msleep(1);
11150 }
David S. Millerf49639e2006-06-09 11:58:36 -070011151 if (!(tmp16 & 0x8000))
11152 goto out_not_found;
11153
Michael Chan1b277772006-03-20 22:27:48 -080011154 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11155 &tmp);
Al Virob9fc7dc2007-12-17 22:59:57 -080011156 v = cpu_to_le32(tmp);
11157 memcpy(&vpd_data[i], &v, 4);
Michael Chan1b277772006-03-20 22:27:48 -080011158 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011159 }
11160
11161 /* Now parse and find the part number. */
Michael Chanaf2c6a42006-11-07 14:57:51 -080011162 for (i = 0; i < 254; ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011163 unsigned char val = vpd_data[i];
Michael Chanaf2c6a42006-11-07 14:57:51 -080011164 unsigned int block_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011165
11166 if (val == 0x82 || val == 0x91) {
11167 i = (i + 3 +
11168 (vpd_data[i + 1] +
11169 (vpd_data[i + 2] << 8)));
11170 continue;
11171 }
11172
11173 if (val != 0x90)
11174 goto out_not_found;
11175
11176 block_end = (i + 3 +
11177 (vpd_data[i + 1] +
11178 (vpd_data[i + 2] << 8)));
11179 i += 3;
Michael Chanaf2c6a42006-11-07 14:57:51 -080011180
11181 if (block_end > 256)
11182 goto out_not_found;
11183
11184 while (i < (block_end - 2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011185 if (vpd_data[i + 0] == 'P' &&
11186 vpd_data[i + 1] == 'N') {
11187 int partno_len = vpd_data[i + 2];
11188
Michael Chanaf2c6a42006-11-07 14:57:51 -080011189 i += 3;
11190 if (partno_len > 24 || (partno_len + i) > 256)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011191 goto out_not_found;
11192
11193 memcpy(tp->board_part_number,
Michael Chanaf2c6a42006-11-07 14:57:51 -080011194 &vpd_data[i], partno_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011195
11196 /* Success. */
11197 return;
11198 }
Michael Chanaf2c6a42006-11-07 14:57:51 -080011199 i += 3 + vpd_data[i + 2];
Linus Torvalds1da177e2005-04-16 15:20:36 -070011200 }
11201
11202 /* Part number not found. */
11203 goto out_not_found;
11204 }
11205
11206out_not_found:
Michael Chanb5d37722006-09-27 16:06:21 -070011207 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11208 strcpy(tp->board_part_number, "BCM95906");
11209 else
11210 strcpy(tp->board_part_number, "none");
Linus Torvalds1da177e2005-04-16 15:20:36 -070011211}
11212
Matt Carlson9c8a6202007-10-21 16:16:08 -070011213static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11214{
11215 u32 val;
11216
11217 if (tg3_nvram_read_swab(tp, offset, &val) ||
11218 (val & 0xfc000000) != 0x0c000000 ||
11219 tg3_nvram_read_swab(tp, offset + 4, &val) ||
11220 val != 0)
11221 return 0;
11222
11223 return 1;
11224}
11225
Michael Chanc4e65752006-03-20 22:29:32 -080011226static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11227{
11228 u32 val, offset, start;
Matt Carlson9c8a6202007-10-21 16:16:08 -070011229 u32 ver_offset;
11230 int i, bcnt;
Michael Chanc4e65752006-03-20 22:29:32 -080011231
11232 if (tg3_nvram_read_swab(tp, 0, &val))
11233 return;
11234
11235 if (val != TG3_EEPROM_MAGIC)
11236 return;
11237
11238 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11239 tg3_nvram_read_swab(tp, 0x4, &start))
11240 return;
11241
11242 offset = tg3_nvram_logical_addr(tp, offset);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011243
11244 if (!tg3_fw_img_is_valid(tp, offset) ||
11245 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
Michael Chanc4e65752006-03-20 22:29:32 -080011246 return;
11247
Matt Carlson9c8a6202007-10-21 16:16:08 -070011248 offset = offset + ver_offset - start;
11249 for (i = 0; i < 16; i += 4) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011250 __le32 v;
11251 if (tg3_nvram_read_le(tp, offset + i, &v))
Michael Chanc4e65752006-03-20 22:29:32 -080011252 return;
11253
Al Virob9fc7dc2007-12-17 22:59:57 -080011254 memcpy(tp->fw_ver + i, &v, 4);
Michael Chanc4e65752006-03-20 22:29:32 -080011255 }
Matt Carlson9c8a6202007-10-21 16:16:08 -070011256
11257 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
Matt Carlson84af67f2007-11-12 21:08:59 -080011258 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
Matt Carlson9c8a6202007-10-21 16:16:08 -070011259 return;
11260
11261 for (offset = TG3_NVM_DIR_START;
11262 offset < TG3_NVM_DIR_END;
11263 offset += TG3_NVM_DIRENT_SIZE) {
11264 if (tg3_nvram_read_swab(tp, offset, &val))
11265 return;
11266
11267 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11268 break;
11269 }
11270
11271 if (offset == TG3_NVM_DIR_END)
11272 return;
11273
11274 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11275 start = 0x08000000;
11276 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11277 return;
11278
11279 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11280 !tg3_fw_img_is_valid(tp, offset) ||
11281 tg3_nvram_read_swab(tp, offset + 8, &val))
11282 return;
11283
11284 offset += val - start;
11285
11286 bcnt = strlen(tp->fw_ver);
11287
11288 tp->fw_ver[bcnt++] = ',';
11289 tp->fw_ver[bcnt++] = ' ';
11290
11291 for (i = 0; i < 4; i++) {
Al Virob9fc7dc2007-12-17 22:59:57 -080011292 __le32 v;
11293 if (tg3_nvram_read_le(tp, offset, &v))
Matt Carlson9c8a6202007-10-21 16:16:08 -070011294 return;
11295
Al Virob9fc7dc2007-12-17 22:59:57 -080011296 offset += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011297
Al Virob9fc7dc2007-12-17 22:59:57 -080011298 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11299 memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011300 break;
11301 }
11302
Al Virob9fc7dc2007-12-17 22:59:57 -080011303 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11304 bcnt += sizeof(v);
Matt Carlson9c8a6202007-10-21 16:16:08 -070011305 }
11306
11307 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
Michael Chanc4e65752006-03-20 22:29:32 -080011308}
11309
Michael Chan7544b092007-05-05 13:08:32 -070011310static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11311
Linus Torvalds1da177e2005-04-16 15:20:36 -070011312static int __devinit tg3_get_invariants(struct tg3 *tp)
11313{
11314 static struct pci_device_id write_reorder_chipsets[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011315 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11316 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
John W. Linvillec165b002006-07-08 13:28:53 -070011317 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11318 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
Michael Chan399de502005-10-03 14:02:39 -070011319 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11320 PCI_DEVICE_ID_VIA_8385_0) },
Linus Torvalds1da177e2005-04-16 15:20:36 -070011321 { },
11322 };
11323 u32 misc_ctrl_reg;
11324 u32 cacheline_sz_reg;
11325 u32 pci_state_reg, grc_misc_cfg;
11326 u32 val;
11327 u16 pci_cmd;
Michael Chanc7835a72006-11-15 21:14:42 -080011328 int err, pcie_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011329
Linus Torvalds1da177e2005-04-16 15:20:36 -070011330 /* Force memory write invalidate off. If we leave it on,
11331 * then on 5700_BX chips we have to enable a workaround.
11332 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11333 * to match the cacheline size. The Broadcom driver have this
11334 * workaround but turns MWI off all the times so never uses
11335 * it. This seems to suggest that the workaround is insufficient.
11336 */
11337 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11338 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11339 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11340
11341 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11342 * has the register indirect write enable bit set before
11343 * we try to access any of the MMIO registers. It is also
11344 * critical that the PCI-X hw workaround situation is decided
11345 * before that as well.
11346 */
11347 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11348 &misc_ctrl_reg);
11349
11350 tp->pci_chip_rev_id = (misc_ctrl_reg >>
11351 MISC_HOST_CTRL_CHIPREV_SHIFT);
Matt Carlson795d01c2007-10-07 23:28:17 -070011352 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11353 u32 prod_id_asic_rev;
11354
11355 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11356 &prod_id_asic_rev);
11357 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11358 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011359
Michael Chanff645be2005-04-21 17:09:53 -070011360 /* Wrong chip ID in 5752 A0. This code can be removed later
11361 * as A0 is not in production.
11362 */
11363 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11364 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11365
Michael Chan68929142005-08-09 20:17:14 -070011366 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11367 * we need to disable memory and use config. cycles
11368 * only to access all registers. The 5702/03 chips
11369 * can mistakenly decode the special cycles from the
11370 * ICH chipsets as memory write cycles, causing corruption
11371 * of register and memory space. Only certain ICH bridges
11372 * will drive special cycles with non-zero data during the
11373 * address phase which can fall within the 5703's address
11374 * range. This is not an ICH bug as the PCI spec allows
11375 * non-zero address during special cycles. However, only
11376 * these ICH bridges are known to drive non-zero addresses
11377 * during special cycles.
11378 *
11379 * Since special cycles do not cross PCI bridges, we only
11380 * enable this workaround if the 5703 is on the secondary
11381 * bus of these ICH bridges.
11382 */
11383 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11384 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11385 static struct tg3_dev_id {
11386 u32 vendor;
11387 u32 device;
11388 u32 rev;
11389 } ich_chipsets[] = {
11390 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11391 PCI_ANY_ID },
11392 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11393 PCI_ANY_ID },
11394 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11395 0xa },
11396 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11397 PCI_ANY_ID },
11398 { },
11399 };
11400 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11401 struct pci_dev *bridge = NULL;
11402
11403 while (pci_id->vendor != 0) {
11404 bridge = pci_get_device(pci_id->vendor, pci_id->device,
11405 bridge);
11406 if (!bridge) {
11407 pci_id++;
11408 continue;
11409 }
11410 if (pci_id->rev != PCI_ANY_ID) {
Auke Kok44c10132007-06-08 15:46:36 -070011411 if (bridge->revision > pci_id->rev)
Michael Chan68929142005-08-09 20:17:14 -070011412 continue;
11413 }
11414 if (bridge->subordinate &&
11415 (bridge->subordinate->number ==
11416 tp->pdev->bus->number)) {
11417
11418 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11419 pci_dev_put(bridge);
11420 break;
11421 }
11422 }
11423 }
11424
Matt Carlson41588ba2008-04-19 18:12:33 -070011425 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11426 static struct tg3_dev_id {
11427 u32 vendor;
11428 u32 device;
11429 } bridge_chipsets[] = {
11430 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11431 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11432 { },
11433 };
11434 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11435 struct pci_dev *bridge = NULL;
11436
11437 while (pci_id->vendor != 0) {
11438 bridge = pci_get_device(pci_id->vendor,
11439 pci_id->device,
11440 bridge);
11441 if (!bridge) {
11442 pci_id++;
11443 continue;
11444 }
11445 if (bridge->subordinate &&
11446 (bridge->subordinate->number <=
11447 tp->pdev->bus->number) &&
11448 (bridge->subordinate->subordinate >=
11449 tp->pdev->bus->number)) {
11450 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11451 pci_dev_put(bridge);
11452 break;
11453 }
11454 }
11455 }
11456
Michael Chan4a29cc22006-03-19 13:21:12 -080011457 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11458 * DMA addresses > 40-bit. This bridge may have other additional
11459 * 57xx devices behind it in some 4-port NIC designs for example.
11460 * Any tg3 device found behind the bridge will also need the 40-bit
11461 * DMA workaround.
11462 */
Michael Chana4e2b342005-10-26 15:46:52 -070011463 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11464 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11465 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
Michael Chan4a29cc22006-03-19 13:21:12 -080011466 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
Michael Chan4cf78e42005-07-25 12:29:19 -070011467 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
Michael Chana4e2b342005-10-26 15:46:52 -070011468 }
Michael Chan4a29cc22006-03-19 13:21:12 -080011469 else {
11470 struct pci_dev *bridge = NULL;
11471
11472 do {
11473 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11474 PCI_DEVICE_ID_SERVERWORKS_EPB,
11475 bridge);
11476 if (bridge && bridge->subordinate &&
11477 (bridge->subordinate->number <=
11478 tp->pdev->bus->number) &&
11479 (bridge->subordinate->subordinate >=
11480 tp->pdev->bus->number)) {
11481 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11482 pci_dev_put(bridge);
11483 break;
11484 }
11485 } while (bridge);
11486 }
Michael Chan4cf78e42005-07-25 12:29:19 -070011487
Linus Torvalds1da177e2005-04-16 15:20:36 -070011488 /* Initialize misc host control in PCI block. */
11489 tp->misc_host_ctrl |= (misc_ctrl_reg &
11490 MISC_HOST_CTRL_CHIPREV);
11491 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11492 tp->misc_host_ctrl);
11493
11494 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11495 &cacheline_sz_reg);
11496
11497 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
11498 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
11499 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
11500 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
11501
Michael Chan7544b092007-05-05 13:08:32 -070011502 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11503 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11504 tp->pdev_peer = tg3_find_peer(tp);
11505
John W. Linville2052da92005-04-21 16:56:08 -070011506 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
Michael Chan4cf78e42005-07-25 12:29:19 -070011507 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
Michael Chanaf36e6b2006-03-23 01:28:06 -080011508 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chand9ab5ad2006-03-20 22:27:35 -080011509 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070011510 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070011511 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Michael Chanb5d37722006-09-27 16:06:21 -070011512 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
Michael Chana4e2b342005-10-26 15:46:52 -070011513 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
John W. Linville6708e5c2005-04-21 17:00:52 -070011514 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11515
John W. Linville1b440c562005-04-21 17:03:18 -070011516 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11517 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11518 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11519
Michael Chan5a6f3072006-03-20 22:28:05 -080011520 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
Michael Chan7544b092007-05-05 13:08:32 -070011521 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11522 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11523 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11524 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11525 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11526 tp->pdev_peer == tp->pdev))
11527 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11528
Michael Chanaf36e6b2006-03-23 01:28:06 -080011529 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chanb5d37722006-09-27 16:06:21 -070011530 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070011531 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070011532 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Michael Chanb5d37722006-09-27 16:06:21 -070011533 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
Michael Chan5a6f3072006-03-20 22:28:05 -080011534 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
Michael Chanfcfa0a32006-03-20 22:28:41 -080011535 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
Michael Chan52c0fd82006-06-29 20:15:54 -070011536 } else {
Michael Chan7f62ad52007-02-20 23:25:40 -080011537 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
Michael Chan52c0fd82006-06-29 20:15:54 -070011538 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11539 ASIC_REV_5750 &&
11540 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
Michael Chan7f62ad52007-02-20 23:25:40 -080011541 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
Michael Chan52c0fd82006-06-29 20:15:54 -070011542 }
Michael Chan5a6f3072006-03-20 22:28:05 -080011543 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011544
Michael Chan0f893dc2005-07-25 12:30:38 -070011545 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
11546 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
Michael Chand9ab5ad2006-03-20 22:27:35 -080011547 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
Michael Chanaf36e6b2006-03-23 01:28:06 -080011548 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
Michael Chanb5d37722006-09-27 16:06:21 -070011549 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
Matt Carlsond30cdd22007-10-07 23:28:35 -070011550 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
Matt Carlson9936bcf2007-10-10 18:03:07 -070011551 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
Michael Chanb5d37722006-09-27 16:06:21 -070011552 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
Michael Chan0f893dc2005-07-25 12:30:38 -070011553 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11554
Michael Chanc7835a72006-11-15 21:14:42 -080011555 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11556 if (pcie_cap != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011557 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
Matt Carlson5f5c51e2007-11-12 21:19:37 -080011558
11559 pcie_set_readrq(tp->pdev, 4096);
11560
Michael Chanc7835a72006-11-15 21:14:42 -080011561 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11562 u16 lnkctl;
11563
11564 pci_read_config_word(tp->pdev,
11565 pcie_cap + PCI_EXP_LNKCTL,
11566 &lnkctl);
11567 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11568 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11569 }
11570 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011571
Michael Chan399de502005-10-03 14:02:39 -070011572 /* If we have an AMD 762 or VIA K8T800 chipset, write
11573 * reordering to the mailbox registers done by the host
11574 * controller can cause major troubles. We read back from
11575 * every mailbox register write to force the writes to be
11576 * posted to the chip in order.
11577 */
11578 if (pci_dev_present(write_reorder_chipsets) &&
11579 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11580 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11581
Linus Torvalds1da177e2005-04-16 15:20:36 -070011582 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11583 tp->pci_lat_timer < 64) {
11584 tp->pci_lat_timer = 64;
11585
11586 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
11587 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
11588 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
11589 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
11590
11591 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11592 cacheline_sz_reg);
11593 }
11594
Matt Carlson9974a352007-10-07 23:27:28 -070011595 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11596 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11597 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11598 if (!tp->pcix_cap) {
11599 printk(KERN_ERR PFX "Cannot find PCI-X "
11600 "capability, aborting.\n");
11601 return -EIO;
11602 }
11603 }
11604
Linus Torvalds1da177e2005-04-16 15:20:36 -070011605 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11606 &pci_state_reg);
11607
Matt Carlson9974a352007-10-07 23:27:28 -070011608 if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011609 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11610
11611 /* If this is a 5700 BX chipset, and we are in PCI-X
11612 * mode, enable register write workaround.
11613 *
11614 * The workaround is to use indirect register accesses
11615 * for all chip writes not to mailbox registers.
11616 */
11617 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11618 u32 pm_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011619
11620 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11621
11622 /* The chip can have it's power management PCI config
11623 * space registers clobbered due to this bug.
11624 * So explicitly force the chip into D0 here.
11625 */
Matt Carlson9974a352007-10-07 23:27:28 -070011626 pci_read_config_dword(tp->pdev,
11627 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070011628 &pm_reg);
11629 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11630 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
Matt Carlson9974a352007-10-07 23:27:28 -070011631 pci_write_config_dword(tp->pdev,
11632 tp->pm_cap + PCI_PM_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -070011633 pm_reg);
11634
11635 /* Also, force SERR#/PERR# in PCI command. */
11636 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11637 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11638 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11639 }
11640 }
11641
Michael Chan087fe252005-08-09 20:17:41 -070011642 /* 5700 BX chips need to have their TX producer index mailboxes
11643 * written twice to workaround a bug.
11644 */
11645 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11646 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11647
Linus Torvalds1da177e2005-04-16 15:20:36 -070011648 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11649 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11650 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11651 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11652
11653 /* Chip-specific fixup from Broadcom driver */
11654 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11655 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11656 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11657 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11658 }
11659
Michael Chan1ee582d2005-08-09 20:16:46 -070011660 /* Default fast path register access methods */
Michael Chan20094932005-08-09 20:16:32 -070011661 tp->read32 = tg3_read32;
Michael Chan1ee582d2005-08-09 20:16:46 -070011662 tp->write32 = tg3_write32;
Michael Chan09ee9292005-08-09 20:17:00 -070011663 tp->read32_mbox = tg3_read32;
Michael Chan20094932005-08-09 20:16:32 -070011664 tp->write32_mbox = tg3_write32;
Michael Chan1ee582d2005-08-09 20:16:46 -070011665 tp->write32_tx_mbox = tg3_write32;
11666 tp->write32_rx_mbox = tg3_write32;
11667
11668 /* Various workaround register access methods */
11669 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11670 tp->write32 = tg3_write_indirect_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070011671 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11672 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11673 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11674 /*
11675 * Back to back register writes can cause problems on these
11676 * chips, the workaround is to read back all reg writes
11677 * except those to mailbox regs.
11678 *
11679 * See tg3_write_indirect_reg32().
11680 */
Michael Chan1ee582d2005-08-09 20:16:46 -070011681 tp->write32 = tg3_write_flush_reg32;
Matt Carlson98efd8a2007-05-05 12:47:25 -070011682 }
11683
Michael Chan1ee582d2005-08-09 20:16:46 -070011684
11685 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11686 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11687 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11688 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11689 tp->write32_rx_mbox = tg3_write_flush_reg32;
11690 }
Michael Chan20094932005-08-09 20:16:32 -070011691
Michael Chan68929142005-08-09 20:17:14 -070011692 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11693 tp->read32 = tg3_read_indirect_reg32;
11694 tp->write32 = tg3_write_indirect_reg32;
11695 tp->read32_mbox = tg3_read_indirect_mbox;
11696 tp->write32_mbox = tg3_write_indirect_mbox;
11697 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11698 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11699
11700 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070011701 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070011702
11703 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11704 pci_cmd &= ~PCI_COMMAND_MEMORY;
11705 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11706 }
Michael Chanb5d37722006-09-27 16:06:21 -070011707 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11708 tp->read32_mbox = tg3_read32_mbox_5906;
11709 tp->write32_mbox = tg3_write32_mbox_5906;
11710 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11711 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11712 }
Michael Chan68929142005-08-09 20:17:14 -070011713
Michael Chanbbadf502006-04-06 21:46:34 -070011714 if (tp->write32 == tg3_write_indirect_reg32 ||
11715 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11716 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
David S. Millerf49639e2006-06-09 11:58:36 -070011717 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
Michael Chanbbadf502006-04-06 21:46:34 -070011718 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11719
Michael Chan7d0c41e2005-04-21 17:06:20 -070011720 /* Get eeprom hw config before calling tg3_set_power_state().
Michael Chan9d26e212006-12-07 00:21:14 -080011721 * In particular, the TG3_FLG2_IS_NIC flag must be
Michael Chan7d0c41e2005-04-21 17:06:20 -070011722 * determined before calling tg3_set_power_state() so that
11723 * we know whether or not to switch out of Vaux power.
11724 * When the flag is set, it means that GPIO1 is used for eeprom
11725 * write protect and also implies that it is a LOM where GPIOs
11726 * are not used to switch power.
Jeff Garzik6aa20a22006-09-13 13:24:59 -040011727 */
Michael Chan7d0c41e2005-04-21 17:06:20 -070011728 tg3_get_eeprom_hw_cfg(tp);
11729
Matt Carlson0d3031d2007-10-10 18:02:43 -070011730 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11731 /* Allow reads and writes to the
11732 * APE register and memory space.
11733 */
11734 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11735 PCISTATE_ALLOW_APE_SHMEM_WR;
11736 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11737 pci_state_reg);
11738 }
11739
Matt Carlson9936bcf2007-10-10 18:03:07 -070011740 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlsonb5af7122007-11-12 21:22:02 -080011741 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
Matt Carlsond30cdd22007-10-07 23:28:35 -070011742 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11743
Matt Carlsonb5af7122007-11-12 21:22:02 -080011744 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
11745 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
11746 tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
11747 tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
11748 tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
11749 }
11750
Michael Chan314fba32005-04-21 17:07:04 -070011751 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11752 * GPIO1 driven high will bring 5700's external PHY out of reset.
11753 * It is also used as eeprom write protect on LOMs.
11754 */
11755 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11756 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11757 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11758 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11759 GRC_LCLCTRL_GPIO_OUTPUT1);
Michael Chan3e7d83b2005-04-21 17:10:36 -070011760 /* Unused GPIO3 must be driven as output on 5752 because there
11761 * are no pull-up resistors on unused GPIO pins.
11762 */
11763 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11764 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
Michael Chan314fba32005-04-21 17:07:04 -070011765
Michael Chanaf36e6b2006-03-23 01:28:06 -080011766 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11767 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11768
Linus Torvalds1da177e2005-04-16 15:20:36 -070011769 /* Force the chip into D0. */
Michael Chanbc1c7562006-03-20 17:48:03 -080011770 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011771 if (err) {
11772 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11773 pci_name(tp->pdev));
11774 return err;
11775 }
11776
11777 /* 5700 B0 chips do not support checksumming correctly due
11778 * to hardware bugs.
11779 */
11780 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11781 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11782
Linus Torvalds1da177e2005-04-16 15:20:36 -070011783 /* Derive initial jumbo mode from MTU assigned in
11784 * ether_setup() via the alloc_etherdev() call
11785 */
Michael Chan0f893dc2005-07-25 12:30:38 -070011786 if (tp->dev->mtu > ETH_DATA_LEN &&
Michael Chana4e2b342005-10-26 15:46:52 -070011787 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
Michael Chan0f893dc2005-07-25 12:30:38 -070011788 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011789
11790 /* Determine WakeOnLan speed to use. */
11791 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11792 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11793 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11794 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11795 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11796 } else {
11797 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11798 }
11799
11800 /* A few boards don't want Ethernet@WireSpeed phy feature */
11801 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11802 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11803 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
Michael Chan747e8f82005-07-25 12:33:22 -070011804 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
Michael Chanb5d37722006-09-27 16:06:21 -070011805 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
Michael Chan747e8f82005-07-25 12:33:22 -070011806 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
Linus Torvalds1da177e2005-04-16 15:20:36 -070011807 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11808
11809 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11810 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11811 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11812 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11813 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11814
Michael Chanc424cb22006-04-29 18:56:34 -070011815 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11816 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070011817 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070011818 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11819 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
Michael Chand4011ad2007-02-13 12:17:25 -080011820 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11821 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11822 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
Michael Chanc1d2a192007-01-08 19:57:20 -080011823 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11824 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11825 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
Michael Chanc424cb22006-04-29 18:56:34 -070011826 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11827 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070011828
Matt Carlsonb2a5c192008-04-03 21:44:44 -070011829 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11830 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
11831 tp->phy_otp = tg3_read_otp_phycfg(tp);
11832 if (tp->phy_otp == 0)
11833 tp->phy_otp = TG3_OTP_DEFAULT;
11834 }
11835
Matt Carlson8ef21422008-05-02 16:47:53 -070011836 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11837 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11838 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
11839 else
11840 tp->mi_mode = MAC_MI_MODE_BASE;
11841
Linus Torvalds1da177e2005-04-16 15:20:36 -070011842 tp->coalesce_mode = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011843 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11844 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11845 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11846
11847 /* Initialize MAC MI mode, polling disabled. */
11848 tw32_f(MAC_MI_MODE, tp->mi_mode);
11849 udelay(80);
11850
11851 /* Initialize data/descriptor byte/word swapping. */
11852 val = tr32(GRC_MODE);
11853 val &= GRC_MODE_HOST_STACKUP;
11854 tw32(GRC_MODE, val | tp->grc_mode);
11855
11856 tg3_switch_clocks(tp);
11857
11858 /* Clear this out for sanity. */
11859 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11860
11861 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11862 &pci_state_reg);
11863 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11864 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11865 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11866
11867 if (chiprevid == CHIPREV_ID_5701_A0 ||
11868 chiprevid == CHIPREV_ID_5701_B0 ||
11869 chiprevid == CHIPREV_ID_5701_B2 ||
11870 chiprevid == CHIPREV_ID_5701_B5) {
11871 void __iomem *sram_base;
11872
11873 /* Write some dummy words into the SRAM status block
11874 * area, see if it reads back correctly. If the return
11875 * value is bad, force enable the PCIX workaround.
11876 */
11877 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11878
11879 writel(0x00000000, sram_base);
11880 writel(0x00000000, sram_base + 4);
11881 writel(0xffffffff, sram_base + 4);
11882 if (readl(sram_base) != 0x00000000)
11883 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11884 }
11885 }
11886
11887 udelay(50);
11888 tg3_nvram_init(tp);
11889
11890 grc_misc_cfg = tr32(GRC_MISC_CFG);
11891 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11892
Linus Torvalds1da177e2005-04-16 15:20:36 -070011893 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11894 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11895 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11896 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11897
David S. Millerfac9b832005-05-18 22:46:34 -070011898 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11899 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11900 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11901 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11902 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11903 HOSTCC_MODE_CLRTICK_TXBD);
11904
11905 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11906 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11907 tp->misc_host_ctrl);
11908 }
11909
Linus Torvalds1da177e2005-04-16 15:20:36 -070011910 /* these are limited to 10/100 only */
11911 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11912 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11913 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11914 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11915 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11916 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11917 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11918 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11919 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
Michael Chan676917d2006-12-07 00:20:22 -080011920 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11921 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
Michael Chanb5d37722006-09-27 16:06:21 -070011922 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011923 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11924
11925 err = tg3_phy_probe(tp);
11926 if (err) {
11927 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11928 pci_name(tp->pdev), err);
11929 /* ... but do not return immediately ... */
11930 }
11931
11932 tg3_read_partno(tp);
Michael Chanc4e65752006-03-20 22:29:32 -080011933 tg3_read_fw_ver(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070011934
11935 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11936 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11937 } else {
11938 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11939 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11940 else
11941 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11942 }
11943
11944 /* 5700 {AX,BX} chips have a broken status block link
11945 * change bit implementation, so we must use the
11946 * status register in those cases.
11947 */
11948 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11949 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11950 else
11951 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11952
11953 /* The led_ctrl is set during tg3_phy_probe, here we might
11954 * have to force the link status polling mechanism based
11955 * upon subsystem IDs.
11956 */
11957 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
Michael Chan007a880d2007-05-31 14:49:51 -070011958 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -070011959 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11960 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11961 TG3_FLAG_USE_LINKCHG_REG);
11962 }
11963
11964 /* For all SERDES we poll the MAC status register. */
11965 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11966 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11967 else
11968 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11969
Michael Chan5a6f3072006-03-20 22:28:05 -080011970 /* All chips before 5787 can get confused if TX buffers
Linus Torvalds1da177e2005-04-16 15:20:36 -070011971 * straddle the 4GB address boundary in some cases.
11972 */
Michael Chanaf36e6b2006-03-23 01:28:06 -080011973 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
Michael Chanb5d37722006-09-27 16:06:21 -070011974 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
Matt Carlsond30cdd22007-10-07 23:28:35 -070011975 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
Matt Carlson9936bcf2007-10-10 18:03:07 -070011976 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
Michael Chanb5d37722006-09-27 16:06:21 -070011977 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
Michael Chan5a6f3072006-03-20 22:28:05 -080011978 tp->dev->hard_start_xmit = tg3_start_xmit;
11979 else
11980 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011981
11982 tp->rx_offset = 2;
11983 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11984 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11985 tp->rx_offset = 0;
11986
Michael Chanf92905d2006-06-29 20:14:29 -070011987 tp->rx_std_max_post = TG3_RX_RING_SIZE;
11988
11989 /* Increment the rx prod index on the rx std ring by at most
11990 * 8 for these chips to workaround hw errata.
11991 */
11992 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11993 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11994 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11995 tp->rx_std_max_post = 8;
11996
Matt Carlson8ed5d972007-05-07 00:25:49 -070011997 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11998 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11999 PCIE_PWR_MGMT_L1_THRESH_MSK;
12000
Linus Torvalds1da177e2005-04-16 15:20:36 -070012001 return err;
12002}
12003
David S. Miller49b6e95f2007-03-29 01:38:42 -070012004#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012005static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12006{
12007 struct net_device *dev = tp->dev;
12008 struct pci_dev *pdev = tp->pdev;
David S. Miller49b6e95f2007-03-29 01:38:42 -070012009 struct device_node *dp = pci_device_to_OF_node(pdev);
David S. Miller374d4ca2007-03-29 01:57:57 -070012010 const unsigned char *addr;
David S. Miller49b6e95f2007-03-29 01:38:42 -070012011 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012012
David S. Miller49b6e95f2007-03-29 01:38:42 -070012013 addr = of_get_property(dp, "local-mac-address", &len);
12014 if (addr && len == 6) {
12015 memcpy(dev->dev_addr, addr, 6);
12016 memcpy(dev->perm_addr, dev->dev_addr, 6);
12017 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012018 }
12019 return -ENODEV;
12020}
12021
12022static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12023{
12024 struct net_device *dev = tp->dev;
12025
12026 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
John W. Linville2ff43692005-09-12 14:44:20 -070012027 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012028 return 0;
12029}
12030#endif
12031
12032static int __devinit tg3_get_device_address(struct tg3 *tp)
12033{
12034 struct net_device *dev = tp->dev;
12035 u32 hi, lo, mac_offset;
Michael Chan008652b2006-03-27 23:14:53 -080012036 int addr_ok = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012037
David S. Miller49b6e95f2007-03-29 01:38:42 -070012038#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012039 if (!tg3_get_macaddr_sparc(tp))
12040 return 0;
12041#endif
12042
12043 mac_offset = 0x7c;
David S. Millerf49639e2006-06-09 11:58:36 -070012044 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
Michael Chana4e2b342005-10-26 15:46:52 -070012045 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012046 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12047 mac_offset = 0xcc;
12048 if (tg3_nvram_lock(tp))
12049 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12050 else
12051 tg3_nvram_unlock(tp);
12052 }
Michael Chanb5d37722006-09-27 16:06:21 -070012053 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12054 mac_offset = 0x10;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012055
12056 /* First try to get it from MAC address mailbox. */
12057 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12058 if ((hi >> 16) == 0x484b) {
12059 dev->dev_addr[0] = (hi >> 8) & 0xff;
12060 dev->dev_addr[1] = (hi >> 0) & 0xff;
12061
12062 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12063 dev->dev_addr[2] = (lo >> 24) & 0xff;
12064 dev->dev_addr[3] = (lo >> 16) & 0xff;
12065 dev->dev_addr[4] = (lo >> 8) & 0xff;
12066 dev->dev_addr[5] = (lo >> 0) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012067
Michael Chan008652b2006-03-27 23:14:53 -080012068 /* Some old bootcode may report a 0 MAC address in SRAM */
12069 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12070 }
12071 if (!addr_ok) {
12072 /* Next, try NVRAM. */
David S. Millerf49639e2006-06-09 11:58:36 -070012073 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
Michael Chan008652b2006-03-27 23:14:53 -080012074 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12075 dev->dev_addr[0] = ((hi >> 16) & 0xff);
12076 dev->dev_addr[1] = ((hi >> 24) & 0xff);
12077 dev->dev_addr[2] = ((lo >> 0) & 0xff);
12078 dev->dev_addr[3] = ((lo >> 8) & 0xff);
12079 dev->dev_addr[4] = ((lo >> 16) & 0xff);
12080 dev->dev_addr[5] = ((lo >> 24) & 0xff);
12081 }
12082 /* Finally just fetch it out of the MAC control regs. */
12083 else {
12084 hi = tr32(MAC_ADDR_0_HIGH);
12085 lo = tr32(MAC_ADDR_0_LOW);
12086
12087 dev->dev_addr[5] = lo & 0xff;
12088 dev->dev_addr[4] = (lo >> 8) & 0xff;
12089 dev->dev_addr[3] = (lo >> 16) & 0xff;
12090 dev->dev_addr[2] = (lo >> 24) & 0xff;
12091 dev->dev_addr[1] = hi & 0xff;
12092 dev->dev_addr[0] = (hi >> 8) & 0xff;
12093 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012094 }
12095
12096 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
David S. Miller7582a332008-03-20 15:53:15 -070012097#ifdef CONFIG_SPARC
Linus Torvalds1da177e2005-04-16 15:20:36 -070012098 if (!tg3_get_default_macaddr_sparc(tp))
12099 return 0;
12100#endif
12101 return -EINVAL;
12102 }
John W. Linville2ff43692005-09-12 14:44:20 -070012103 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012104 return 0;
12105}
12106
David S. Miller59e6b432005-05-18 22:50:10 -070012107#define BOUNDARY_SINGLE_CACHELINE 1
12108#define BOUNDARY_MULTI_CACHELINE 2
12109
12110static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12111{
12112 int cacheline_size;
12113 u8 byte;
12114 int goal;
12115
12116 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12117 if (byte == 0)
12118 cacheline_size = 1024;
12119 else
12120 cacheline_size = (int) byte * 4;
12121
12122 /* On 5703 and later chips, the boundary bits have no
12123 * effect.
12124 */
12125 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12126 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12127 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12128 goto out;
12129
12130#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12131 goal = BOUNDARY_MULTI_CACHELINE;
12132#else
12133#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12134 goal = BOUNDARY_SINGLE_CACHELINE;
12135#else
12136 goal = 0;
12137#endif
12138#endif
12139
12140 if (!goal)
12141 goto out;
12142
12143 /* PCI controllers on most RISC systems tend to disconnect
12144 * when a device tries to burst across a cache-line boundary.
12145 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12146 *
12147 * Unfortunately, for PCI-E there are only limited
12148 * write-side controls for this, and thus for reads
12149 * we will still get the disconnects. We'll also waste
12150 * these PCI cycles for both read and write for chips
12151 * other than 5700 and 5701 which do not implement the
12152 * boundary bits.
12153 */
12154 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12155 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12156 switch (cacheline_size) {
12157 case 16:
12158 case 32:
12159 case 64:
12160 case 128:
12161 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12162 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12163 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12164 } else {
12165 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12166 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12167 }
12168 break;
12169
12170 case 256:
12171 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12172 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12173 break;
12174
12175 default:
12176 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12177 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12178 break;
12179 };
12180 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12181 switch (cacheline_size) {
12182 case 16:
12183 case 32:
12184 case 64:
12185 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12186 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12187 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12188 break;
12189 }
12190 /* fallthrough */
12191 case 128:
12192 default:
12193 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12194 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12195 break;
12196 };
12197 } else {
12198 switch (cacheline_size) {
12199 case 16:
12200 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12201 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12202 DMA_RWCTRL_WRITE_BNDRY_16);
12203 break;
12204 }
12205 /* fallthrough */
12206 case 32:
12207 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12208 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12209 DMA_RWCTRL_WRITE_BNDRY_32);
12210 break;
12211 }
12212 /* fallthrough */
12213 case 64:
12214 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12215 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12216 DMA_RWCTRL_WRITE_BNDRY_64);
12217 break;
12218 }
12219 /* fallthrough */
12220 case 128:
12221 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12222 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12223 DMA_RWCTRL_WRITE_BNDRY_128);
12224 break;
12225 }
12226 /* fallthrough */
12227 case 256:
12228 val |= (DMA_RWCTRL_READ_BNDRY_256 |
12229 DMA_RWCTRL_WRITE_BNDRY_256);
12230 break;
12231 case 512:
12232 val |= (DMA_RWCTRL_READ_BNDRY_512 |
12233 DMA_RWCTRL_WRITE_BNDRY_512);
12234 break;
12235 case 1024:
12236 default:
12237 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12238 DMA_RWCTRL_WRITE_BNDRY_1024);
12239 break;
12240 };
12241 }
12242
12243out:
12244 return val;
12245}
12246
Linus Torvalds1da177e2005-04-16 15:20:36 -070012247static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12248{
12249 struct tg3_internal_buffer_desc test_desc;
12250 u32 sram_dma_descs;
12251 int i, ret;
12252
12253 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12254
12255 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12256 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12257 tw32(RDMAC_STATUS, 0);
12258 tw32(WDMAC_STATUS, 0);
12259
12260 tw32(BUFMGR_MODE, 0);
12261 tw32(FTQ_RESET, 0);
12262
12263 test_desc.addr_hi = ((u64) buf_dma) >> 32;
12264 test_desc.addr_lo = buf_dma & 0xffffffff;
12265 test_desc.nic_mbuf = 0x00002100;
12266 test_desc.len = size;
12267
12268 /*
12269 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12270 * the *second* time the tg3 driver was getting loaded after an
12271 * initial scan.
12272 *
12273 * Broadcom tells me:
12274 * ...the DMA engine is connected to the GRC block and a DMA
12275 * reset may affect the GRC block in some unpredictable way...
12276 * The behavior of resets to individual blocks has not been tested.
12277 *
12278 * Broadcom noted the GRC reset will also reset all sub-components.
12279 */
12280 if (to_device) {
12281 test_desc.cqid_sqid = (13 << 8) | 2;
12282
12283 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12284 udelay(40);
12285 } else {
12286 test_desc.cqid_sqid = (16 << 8) | 7;
12287
12288 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12289 udelay(40);
12290 }
12291 test_desc.flags = 0x00000005;
12292
12293 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12294 u32 val;
12295
12296 val = *(((u32 *)&test_desc) + i);
12297 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12298 sram_dma_descs + (i * sizeof(u32)));
12299 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12300 }
12301 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12302
12303 if (to_device) {
12304 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12305 } else {
12306 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12307 }
12308
12309 ret = -ENODEV;
12310 for (i = 0; i < 40; i++) {
12311 u32 val;
12312
12313 if (to_device)
12314 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12315 else
12316 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12317 if ((val & 0xffff) == sram_dma_descs) {
12318 ret = 0;
12319 break;
12320 }
12321
12322 udelay(100);
12323 }
12324
12325 return ret;
12326}
12327
David S. Millerded73402005-05-23 13:59:47 -070012328#define TEST_BUFFER_SIZE 0x2000
Linus Torvalds1da177e2005-04-16 15:20:36 -070012329
12330static int __devinit tg3_test_dma(struct tg3 *tp)
12331{
12332 dma_addr_t buf_dma;
David S. Miller59e6b432005-05-18 22:50:10 -070012333 u32 *buf, saved_dma_rwctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012334 int ret;
12335
12336 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12337 if (!buf) {
12338 ret = -ENOMEM;
12339 goto out_nofree;
12340 }
12341
12342 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12343 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12344
David S. Miller59e6b432005-05-18 22:50:10 -070012345 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012346
12347 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12348 /* DMA read watermark not used on PCIE */
12349 tp->dma_rwctrl |= 0x00180000;
12350 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
Michael Chan85e94ce2005-04-21 17:05:28 -070012351 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12352 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012353 tp->dma_rwctrl |= 0x003f0000;
12354 else
12355 tp->dma_rwctrl |= 0x003f000f;
12356 } else {
12357 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12358 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12359 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
Michael Chan49afdeb2007-02-13 12:17:03 -080012360 u32 read_water = 0x7;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012361
Michael Chan4a29cc22006-03-19 13:21:12 -080012362 /* If the 5704 is behind the EPB bridge, we can
12363 * do the less restrictive ONE_DMA workaround for
12364 * better performance.
12365 */
12366 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12367 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12368 tp->dma_rwctrl |= 0x8000;
12369 else if (ccval == 0x6 || ccval == 0x7)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012370 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12371
Michael Chan49afdeb2007-02-13 12:17:03 -080012372 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12373 read_water = 4;
David S. Miller59e6b432005-05-18 22:50:10 -070012374 /* Set bit 23 to enable PCIX hw bug fix */
Michael Chan49afdeb2007-02-13 12:17:03 -080012375 tp->dma_rwctrl |=
12376 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12377 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12378 (1 << 23);
Michael Chan4cf78e42005-07-25 12:29:19 -070012379 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12380 /* 5780 always in PCIX mode */
12381 tp->dma_rwctrl |= 0x00144000;
Michael Chana4e2b342005-10-26 15:46:52 -070012382 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12383 /* 5714 always in PCIX mode */
12384 tp->dma_rwctrl |= 0x00148000;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012385 } else {
12386 tp->dma_rwctrl |= 0x001b000f;
12387 }
12388 }
12389
12390 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12391 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12392 tp->dma_rwctrl &= 0xfffffff0;
12393
12394 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12395 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12396 /* Remove this if it causes problems for some boards. */
12397 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12398
12399 /* On 5700/5701 chips, we need to set this bit.
12400 * Otherwise the chip will issue cacheline transactions
12401 * to streamable DMA memory with not all the byte
12402 * enables turned on. This is an error on several
12403 * RISC PCI controllers, in particular sparc64.
12404 *
12405 * On 5703/5704 chips, this bit has been reassigned
12406 * a different meaning. In particular, it is used
12407 * on those chips to enable a PCI-X workaround.
12408 */
12409 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12410 }
12411
12412 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12413
12414#if 0
12415 /* Unneeded, already done by tg3_get_invariants. */
12416 tg3_switch_clocks(tp);
12417#endif
12418
12419 ret = 0;
12420 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12421 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12422 goto out;
12423
David S. Miller59e6b432005-05-18 22:50:10 -070012424 /* It is best to perform DMA test with maximum write burst size
12425 * to expose the 5700/5701 write DMA bug.
12426 */
12427 saved_dma_rwctrl = tp->dma_rwctrl;
12428 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12429 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12430
Linus Torvalds1da177e2005-04-16 15:20:36 -070012431 while (1) {
12432 u32 *p = buf, i;
12433
12434 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12435 p[i] = i;
12436
12437 /* Send the buffer to the chip. */
12438 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12439 if (ret) {
12440 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12441 break;
12442 }
12443
12444#if 0
12445 /* validate data reached card RAM correctly. */
12446 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12447 u32 val;
12448 tg3_read_mem(tp, 0x2100 + (i*4), &val);
12449 if (le32_to_cpu(val) != p[i]) {
12450 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
12451 /* ret = -ENODEV here? */
12452 }
12453 p[i] = 0;
12454 }
12455#endif
12456 /* Now read it back. */
12457 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12458 if (ret) {
12459 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12460
12461 break;
12462 }
12463
12464 /* Verify it. */
12465 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12466 if (p[i] == i)
12467 continue;
12468
David S. Miller59e6b432005-05-18 22:50:10 -070012469 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12470 DMA_RWCTRL_WRITE_BNDRY_16) {
12471 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012472 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12473 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12474 break;
12475 } else {
12476 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12477 ret = -ENODEV;
12478 goto out;
12479 }
12480 }
12481
12482 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12483 /* Success. */
12484 ret = 0;
12485 break;
12486 }
12487 }
David S. Miller59e6b432005-05-18 22:50:10 -070012488 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12489 DMA_RWCTRL_WRITE_BNDRY_16) {
Michael Chan6d1cfba2005-06-08 14:13:14 -070012490 static struct pci_device_id dma_wait_state_chipsets[] = {
12491 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12492 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12493 { },
12494 };
12495
David S. Miller59e6b432005-05-18 22:50:10 -070012496 /* DMA test passed without adjusting DMA boundary,
Michael Chan6d1cfba2005-06-08 14:13:14 -070012497 * now look for chipsets that are known to expose the
12498 * DMA bug without failing the test.
David S. Miller59e6b432005-05-18 22:50:10 -070012499 */
Michael Chan6d1cfba2005-06-08 14:13:14 -070012500 if (pci_dev_present(dma_wait_state_chipsets)) {
12501 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12502 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12503 }
12504 else
12505 /* Safe to use the calculated DMA boundary. */
12506 tp->dma_rwctrl = saved_dma_rwctrl;
12507
David S. Miller59e6b432005-05-18 22:50:10 -070012508 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12509 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012510
12511out:
12512 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12513out_nofree:
12514 return ret;
12515}
12516
12517static void __devinit tg3_init_link_config(struct tg3 *tp)
12518{
12519 tp->link_config.advertising =
12520 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12521 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12522 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12523 ADVERTISED_Autoneg | ADVERTISED_MII);
12524 tp->link_config.speed = SPEED_INVALID;
12525 tp->link_config.duplex = DUPLEX_INVALID;
12526 tp->link_config.autoneg = AUTONEG_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012527 tp->link_config.active_speed = SPEED_INVALID;
12528 tp->link_config.active_duplex = DUPLEX_INVALID;
12529 tp->link_config.phy_is_low_power = 0;
12530 tp->link_config.orig_speed = SPEED_INVALID;
12531 tp->link_config.orig_duplex = DUPLEX_INVALID;
12532 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12533}
12534
12535static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12536{
Michael Chanfdfec172005-07-25 12:31:48 -070012537 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12538 tp->bufmgr_config.mbuf_read_dma_low_water =
12539 DEFAULT_MB_RDMA_LOW_WATER_5705;
12540 tp->bufmgr_config.mbuf_mac_rx_low_water =
12541 DEFAULT_MB_MACRX_LOW_WATER_5705;
12542 tp->bufmgr_config.mbuf_high_water =
12543 DEFAULT_MB_HIGH_WATER_5705;
Michael Chanb5d37722006-09-27 16:06:21 -070012544 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12545 tp->bufmgr_config.mbuf_mac_rx_low_water =
12546 DEFAULT_MB_MACRX_LOW_WATER_5906;
12547 tp->bufmgr_config.mbuf_high_water =
12548 DEFAULT_MB_HIGH_WATER_5906;
12549 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012550
Michael Chanfdfec172005-07-25 12:31:48 -070012551 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12552 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12553 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12554 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12555 tp->bufmgr_config.mbuf_high_water_jumbo =
12556 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12557 } else {
12558 tp->bufmgr_config.mbuf_read_dma_low_water =
12559 DEFAULT_MB_RDMA_LOW_WATER;
12560 tp->bufmgr_config.mbuf_mac_rx_low_water =
12561 DEFAULT_MB_MACRX_LOW_WATER;
12562 tp->bufmgr_config.mbuf_high_water =
12563 DEFAULT_MB_HIGH_WATER;
12564
12565 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12566 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12567 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12568 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12569 tp->bufmgr_config.mbuf_high_water_jumbo =
12570 DEFAULT_MB_HIGH_WATER_JUMBO;
12571 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012572
12573 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12574 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12575}
12576
12577static char * __devinit tg3_phy_string(struct tg3 *tp)
12578{
12579 switch (tp->phy_id & PHY_ID_MASK) {
12580 case PHY_ID_BCM5400: return "5400";
12581 case PHY_ID_BCM5401: return "5401";
12582 case PHY_ID_BCM5411: return "5411";
12583 case PHY_ID_BCM5701: return "5701";
12584 case PHY_ID_BCM5703: return "5703";
12585 case PHY_ID_BCM5704: return "5704";
12586 case PHY_ID_BCM5705: return "5705";
12587 case PHY_ID_BCM5750: return "5750";
Michael Chan85e94ce2005-04-21 17:05:28 -070012588 case PHY_ID_BCM5752: return "5752";
Michael Chana4e2b342005-10-26 15:46:52 -070012589 case PHY_ID_BCM5714: return "5714";
Michael Chan4cf78e42005-07-25 12:29:19 -070012590 case PHY_ID_BCM5780: return "5780";
Michael Chanaf36e6b2006-03-23 01:28:06 -080012591 case PHY_ID_BCM5755: return "5755";
Michael Chand9ab5ad2006-03-20 22:27:35 -080012592 case PHY_ID_BCM5787: return "5787";
Matt Carlsond30cdd22007-10-07 23:28:35 -070012593 case PHY_ID_BCM5784: return "5784";
Michael Chan126a3362006-09-27 16:03:07 -070012594 case PHY_ID_BCM5756: return "5722/5756";
Michael Chanb5d37722006-09-27 16:06:21 -070012595 case PHY_ID_BCM5906: return "5906";
Matt Carlson9936bcf2007-10-10 18:03:07 -070012596 case PHY_ID_BCM5761: return "5761";
Linus Torvalds1da177e2005-04-16 15:20:36 -070012597 case PHY_ID_BCM8002: return "8002/serdes";
12598 case 0: return "serdes";
12599 default: return "unknown";
12600 };
12601}
12602
Michael Chanf9804dd2005-09-27 12:13:10 -070012603static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12604{
12605 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12606 strcpy(str, "PCI Express");
12607 return str;
12608 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12609 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12610
12611 strcpy(str, "PCIX:");
12612
12613 if ((clock_ctrl == 7) ||
12614 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12615 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12616 strcat(str, "133MHz");
12617 else if (clock_ctrl == 0)
12618 strcat(str, "33MHz");
12619 else if (clock_ctrl == 2)
12620 strcat(str, "50MHz");
12621 else if (clock_ctrl == 4)
12622 strcat(str, "66MHz");
12623 else if (clock_ctrl == 6)
12624 strcat(str, "100MHz");
Michael Chanf9804dd2005-09-27 12:13:10 -070012625 } else {
12626 strcpy(str, "PCI:");
12627 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12628 strcat(str, "66MHz");
12629 else
12630 strcat(str, "33MHz");
12631 }
12632 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12633 strcat(str, ":32-bit");
12634 else
12635 strcat(str, ":64-bit");
12636 return str;
12637}
12638
Michael Chan8c2dc7e2005-12-19 16:26:02 -080012639static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012640{
12641 struct pci_dev *peer;
12642 unsigned int func, devnr = tp->pdev->devfn & ~7;
12643
12644 for (func = 0; func < 8; func++) {
12645 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12646 if (peer && peer != tp->pdev)
12647 break;
12648 pci_dev_put(peer);
12649 }
Michael Chan16fe9d72005-12-13 21:09:54 -080012650 /* 5704 can be configured in single-port mode, set peer to
12651 * tp->pdev in that case.
12652 */
12653 if (!peer) {
12654 peer = tp->pdev;
12655 return peer;
12656 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012657
12658 /*
12659 * We don't need to keep the refcount elevated; there's no way
12660 * to remove one half of this device without removing the other
12661 */
12662 pci_dev_put(peer);
12663
12664 return peer;
12665}
12666
David S. Miller15f98502005-05-18 22:49:26 -070012667static void __devinit tg3_init_coal(struct tg3 *tp)
12668{
12669 struct ethtool_coalesce *ec = &tp->coal;
12670
12671 memset(ec, 0, sizeof(*ec));
12672 ec->cmd = ETHTOOL_GCOALESCE;
12673 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12674 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12675 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12676 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12677 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12678 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12679 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12680 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12681 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12682
12683 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12684 HOSTCC_MODE_CLRTICK_TXBD)) {
12685 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12686 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12687 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12688 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12689 }
Michael Chand244c892005-07-05 14:42:33 -070012690
12691 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12692 ec->rx_coalesce_usecs_irq = 0;
12693 ec->tx_coalesce_usecs_irq = 0;
12694 ec->stats_block_coalesce_usecs = 0;
12695 }
David S. Miller15f98502005-05-18 22:49:26 -070012696}
12697
Linus Torvalds1da177e2005-04-16 15:20:36 -070012698static int __devinit tg3_init_one(struct pci_dev *pdev,
12699 const struct pci_device_id *ent)
12700{
12701 static int tg3_version_printed = 0;
Sergei Shtylyov2de58e32008-04-12 18:30:58 -070012702 resource_size_t tg3reg_base;
12703 unsigned long tg3reg_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012704 struct net_device *dev;
12705 struct tg3 *tp;
Joe Perchesd6645372007-12-20 04:06:59 -080012706 int err, pm_cap;
Michael Chanf9804dd2005-09-27 12:13:10 -070012707 char str[40];
Michael Chan72f2afb2006-03-06 19:28:35 -080012708 u64 dma_mask, persist_dma_mask;
Joe Perchesd6645372007-12-20 04:06:59 -080012709 DECLARE_MAC_BUF(mac);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012710
12711 if (tg3_version_printed++ == 0)
12712 printk(KERN_INFO "%s", version);
12713
12714 err = pci_enable_device(pdev);
12715 if (err) {
12716 printk(KERN_ERR PFX "Cannot enable PCI device, "
12717 "aborting.\n");
12718 return err;
12719 }
12720
12721 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12722 printk(KERN_ERR PFX "Cannot find proper PCI device "
12723 "base address, aborting.\n");
12724 err = -ENODEV;
12725 goto err_out_disable_pdev;
12726 }
12727
12728 err = pci_request_regions(pdev, DRV_MODULE_NAME);
12729 if (err) {
12730 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12731 "aborting.\n");
12732 goto err_out_disable_pdev;
12733 }
12734
12735 pci_set_master(pdev);
12736
12737 /* Find power-management capability. */
12738 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12739 if (pm_cap == 0) {
12740 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12741 "aborting.\n");
12742 err = -EIO;
12743 goto err_out_free_res;
12744 }
12745
Linus Torvalds1da177e2005-04-16 15:20:36 -070012746 tg3reg_base = pci_resource_start(pdev, 0);
12747 tg3reg_len = pci_resource_len(pdev, 0);
12748
12749 dev = alloc_etherdev(sizeof(*tp));
12750 if (!dev) {
12751 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12752 err = -ENOMEM;
12753 goto err_out_free_res;
12754 }
12755
Linus Torvalds1da177e2005-04-16 15:20:36 -070012756 SET_NETDEV_DEV(dev, &pdev->dev);
12757
Linus Torvalds1da177e2005-04-16 15:20:36 -070012758#if TG3_VLAN_TAG_USED
12759 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12760 dev->vlan_rx_register = tg3_vlan_rx_register;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012761#endif
12762
12763 tp = netdev_priv(dev);
12764 tp->pdev = pdev;
12765 tp->dev = dev;
12766 tp->pm_cap = pm_cap;
12767 tp->mac_mode = TG3_DEF_MAC_MODE;
12768 tp->rx_mode = TG3_DEF_RX_MODE;
12769 tp->tx_mode = TG3_DEF_TX_MODE;
Matt Carlson8ef21422008-05-02 16:47:53 -070012770
Linus Torvalds1da177e2005-04-16 15:20:36 -070012771 if (tg3_debug > 0)
12772 tp->msg_enable = tg3_debug;
12773 else
12774 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12775
12776 /* The word/byte swap controls here control register access byte
12777 * swapping. DMA data byte swapping is controlled in the GRC_MODE
12778 * setting below.
12779 */
12780 tp->misc_host_ctrl =
12781 MISC_HOST_CTRL_MASK_PCI_INT |
12782 MISC_HOST_CTRL_WORD_SWAP |
12783 MISC_HOST_CTRL_INDIR_ACCESS |
12784 MISC_HOST_CTRL_PCISTATE_RW;
12785
12786 /* The NONFRM (non-frame) byte/word swap controls take effect
12787 * on descriptor entries, anything which isn't packet data.
12788 *
12789 * The StrongARM chips on the board (one for tx, one for rx)
12790 * are running in big-endian mode.
12791 */
12792 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12793 GRC_MODE_WSWAP_NONFRM_DATA);
12794#ifdef __BIG_ENDIAN
12795 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12796#endif
12797 spin_lock_init(&tp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012798 spin_lock_init(&tp->indirect_lock);
David Howellsc4028952006-11-22 14:57:56 +000012799 INIT_WORK(&tp->reset_task, tg3_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012800
12801 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
Andy Gospodarekab0049b2007-09-06 20:42:14 +010012802 if (!tp->regs) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012803 printk(KERN_ERR PFX "Cannot map device registers, "
12804 "aborting.\n");
12805 err = -ENOMEM;
12806 goto err_out_free_dev;
12807 }
12808
12809 tg3_init_link_config(tp);
12810
Linus Torvalds1da177e2005-04-16 15:20:36 -070012811 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12812 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12813 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12814
12815 dev->open = tg3_open;
12816 dev->stop = tg3_close;
12817 dev->get_stats = tg3_get_stats;
12818 dev->set_multicast_list = tg3_set_rx_mode;
12819 dev->set_mac_address = tg3_set_mac_addr;
12820 dev->do_ioctl = tg3_ioctl;
12821 dev->tx_timeout = tg3_tx_timeout;
Stephen Hemmingerbea33482007-10-03 16:41:36 -070012822 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012823 dev->ethtool_ops = &tg3_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012824 dev->watchdog_timeo = TG3_TX_TIMEOUT;
12825 dev->change_mtu = tg3_change_mtu;
12826 dev->irq = pdev->irq;
12827#ifdef CONFIG_NET_POLL_CONTROLLER
12828 dev->poll_controller = tg3_poll_controller;
12829#endif
12830
12831 err = tg3_get_invariants(tp);
12832 if (err) {
12833 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12834 "aborting.\n");
12835 goto err_out_iounmap;
12836 }
12837
Michael Chan4a29cc22006-03-19 13:21:12 -080012838 /* The EPB bridge inside 5714, 5715, and 5780 and any
12839 * device behind the EPB cannot support DMA addresses > 40-bit.
Michael Chan72f2afb2006-03-06 19:28:35 -080012840 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12841 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12842 * do DMA address check in tg3_start_xmit().
12843 */
Michael Chan4a29cc22006-03-19 13:21:12 -080012844 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12845 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12846 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
Michael Chan72f2afb2006-03-06 19:28:35 -080012847 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12848#ifdef CONFIG_HIGHMEM
12849 dma_mask = DMA_64BIT_MASK;
12850#endif
Michael Chan4a29cc22006-03-19 13:21:12 -080012851 } else
Michael Chan72f2afb2006-03-06 19:28:35 -080012852 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12853
12854 /* Configure DMA attributes. */
12855 if (dma_mask > DMA_32BIT_MASK) {
12856 err = pci_set_dma_mask(pdev, dma_mask);
12857 if (!err) {
12858 dev->features |= NETIF_F_HIGHDMA;
12859 err = pci_set_consistent_dma_mask(pdev,
12860 persist_dma_mask);
12861 if (err < 0) {
12862 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12863 "DMA for consistent allocations\n");
12864 goto err_out_iounmap;
12865 }
12866 }
12867 }
12868 if (err || dma_mask == DMA_32BIT_MASK) {
12869 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12870 if (err) {
12871 printk(KERN_ERR PFX "No usable DMA configuration, "
12872 "aborting.\n");
12873 goto err_out_iounmap;
12874 }
12875 }
12876
Michael Chanfdfec172005-07-25 12:31:48 -070012877 tg3_init_bufmgr_config(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012878
Linus Torvalds1da177e2005-04-16 15:20:36 -070012879 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12880 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12881 }
12882 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12883 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12884 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
Michael Chanc7835a72006-11-15 21:14:42 -080012885 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -070012886 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12887 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12888 } else {
Michael Chan7f62ad52007-02-20 23:25:40 -080012889 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012890 }
12891
Michael Chan4e3a7aa2006-03-20 17:47:44 -080012892 /* TSO is on by default on chips that support hardware TSO.
12893 * Firmware TSO on older chips gives lower performance, so it
12894 * is off by default, but can be enabled using ethtool.
12895 */
Michael Chanb0026622006-07-03 19:42:14 -070012896 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070012897 dev->features |= NETIF_F_TSO;
Michael Chanb5d37722006-09-27 16:06:21 -070012898 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12899 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
Michael Chanb0026622006-07-03 19:42:14 -070012900 dev->features |= NETIF_F_TSO6;
Matt Carlson9936bcf2007-10-10 18:03:07 -070012901 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12902 dev->features |= NETIF_F_TSO_ECN;
Michael Chanb0026622006-07-03 19:42:14 -070012903 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070012904
Linus Torvalds1da177e2005-04-16 15:20:36 -070012905
12906 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12907 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12908 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12909 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12910 tp->rx_pending = 63;
12911 }
12912
Linus Torvalds1da177e2005-04-16 15:20:36 -070012913 err = tg3_get_device_address(tp);
12914 if (err) {
12915 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12916 "aborting.\n");
12917 goto err_out_iounmap;
12918 }
12919
Matt Carlson0d3031d2007-10-10 18:02:43 -070012920 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12921 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12922 printk(KERN_ERR PFX "Cannot find proper PCI device "
12923 "base address for APE, aborting.\n");
12924 err = -ENODEV;
12925 goto err_out_iounmap;
12926 }
12927
12928 tg3reg_base = pci_resource_start(pdev, 2);
12929 tg3reg_len = pci_resource_len(pdev, 2);
12930
12931 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
Al Viro79ea13c2008-01-24 02:06:46 -080012932 if (!tp->aperegs) {
Matt Carlson0d3031d2007-10-10 18:02:43 -070012933 printk(KERN_ERR PFX "Cannot map APE registers, "
12934 "aborting.\n");
12935 err = -ENOMEM;
12936 goto err_out_iounmap;
12937 }
12938
12939 tg3_ape_lock_init(tp);
12940 }
12941
Matt Carlsonc88864d2007-11-12 21:07:01 -080012942 /*
12943 * Reset chip in case UNDI or EFI driver did not shutdown
12944 * DMA self test will enable WDMAC and we'll see (spurious)
12945 * pending DMA on the PCI bus at that point.
12946 */
12947 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12948 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
12949 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
12950 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12951 }
12952
12953 err = tg3_test_dma(tp);
12954 if (err) {
12955 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12956 goto err_out_apeunmap;
12957 }
12958
12959 /* Tigon3 can do ipv4 only... and some chips have buggy
12960 * checksumming.
12961 */
12962 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
12963 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12964 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12965 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12966 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12967 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12968 dev->features |= NETIF_F_IPV6_CSUM;
12969
12970 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12971 } else
12972 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12973
12974 /* flow control autonegotiation is default behavior */
12975 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
Matt Carlson8d018622007-12-20 20:05:44 -080012976 tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
Matt Carlsonc88864d2007-11-12 21:07:01 -080012977
12978 tg3_init_coal(tp);
12979
Michael Chanc49a1562006-12-17 17:07:29 -080012980 pci_set_drvdata(pdev, dev);
12981
Linus Torvalds1da177e2005-04-16 15:20:36 -070012982 err = register_netdev(dev);
12983 if (err) {
12984 printk(KERN_ERR PFX "Cannot register net device, "
12985 "aborting.\n");
Matt Carlson0d3031d2007-10-10 18:02:43 -070012986 goto err_out_apeunmap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070012987 }
12988
Joe Perchesd6645372007-12-20 04:06:59 -080012989 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
12990 "(%s) %s Ethernet %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070012991 dev->name,
12992 tp->board_part_number,
12993 tp->pci_chip_rev_id,
12994 tg3_phy_string(tp),
Michael Chanf9804dd2005-09-27 12:13:10 -070012995 tg3_bus_string(tp, str),
Michael Chancbb45d22006-12-07 00:24:09 -080012996 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12997 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
Joe Perchesd6645372007-12-20 04:06:59 -080012998 "10/100/1000Base-T")),
12999 print_mac(mac, dev->dev_addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -070013000
13001 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
Michael Chan1c46ae02007-03-24 20:54:37 -070013002 "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070013003 dev->name,
13004 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13005 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13006 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13007 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -070013008 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
13009 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
Michael Chan4a29cc22006-03-19 13:21:12 -080013010 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13011 dev->name, tp->dma_rwctrl,
13012 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13013 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
Linus Torvalds1da177e2005-04-16 15:20:36 -070013014
13015 return 0;
13016
Matt Carlson0d3031d2007-10-10 18:02:43 -070013017err_out_apeunmap:
13018 if (tp->aperegs) {
13019 iounmap(tp->aperegs);
13020 tp->aperegs = NULL;
13021 }
13022
Linus Torvalds1da177e2005-04-16 15:20:36 -070013023err_out_iounmap:
Michael Chan68929142005-08-09 20:17:14 -070013024 if (tp->regs) {
13025 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070013026 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070013027 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013028
13029err_out_free_dev:
13030 free_netdev(dev);
13031
13032err_out_free_res:
13033 pci_release_regions(pdev);
13034
13035err_out_disable_pdev:
13036 pci_disable_device(pdev);
13037 pci_set_drvdata(pdev, NULL);
13038 return err;
13039}
13040
13041static void __devexit tg3_remove_one(struct pci_dev *pdev)
13042{
13043 struct net_device *dev = pci_get_drvdata(pdev);
13044
13045 if (dev) {
13046 struct tg3 *tp = netdev_priv(dev);
13047
Michael Chan7faa0062006-02-02 17:29:28 -080013048 flush_scheduled_work();
Linus Torvalds1da177e2005-04-16 15:20:36 -070013049 unregister_netdev(dev);
Matt Carlson0d3031d2007-10-10 18:02:43 -070013050 if (tp->aperegs) {
13051 iounmap(tp->aperegs);
13052 tp->aperegs = NULL;
13053 }
Michael Chan68929142005-08-09 20:17:14 -070013054 if (tp->regs) {
13055 iounmap(tp->regs);
Peter Hagervall22abe312005-09-16 17:01:03 -070013056 tp->regs = NULL;
Michael Chan68929142005-08-09 20:17:14 -070013057 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070013058 free_netdev(dev);
13059 pci_release_regions(pdev);
13060 pci_disable_device(pdev);
13061 pci_set_drvdata(pdev, NULL);
13062 }
13063}
13064
13065static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13066{
13067 struct net_device *dev = pci_get_drvdata(pdev);
13068 struct tg3 *tp = netdev_priv(dev);
13069 int err;
13070
Michael Chan3e0c95f2007-08-03 20:56:54 -070013071 /* PCI register 4 needs to be saved whether netif_running() or not.
13072 * MSI address and data need to be saved if using MSI and
13073 * netif_running().
13074 */
13075 pci_save_state(pdev);
13076
Linus Torvalds1da177e2005-04-16 15:20:36 -070013077 if (!netif_running(dev))
13078 return 0;
13079
Michael Chan7faa0062006-02-02 17:29:28 -080013080 flush_scheduled_work();
Linus Torvalds1da177e2005-04-16 15:20:36 -070013081 tg3_netif_stop(tp);
13082
13083 del_timer_sync(&tp->timer);
13084
David S. Millerf47c11e2005-06-24 20:18:35 -070013085 tg3_full_lock(tp, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013086 tg3_disable_ints(tp);
David S. Millerf47c11e2005-06-24 20:18:35 -070013087 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013088
13089 netif_device_detach(dev);
13090
David S. Millerf47c11e2005-06-24 20:18:35 -070013091 tg3_full_lock(tp, 0);
Michael Chan944d9802005-05-29 14:57:48 -070013092 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
Michael Chan6a9eba12005-12-13 21:08:58 -080013093 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
David S. Millerf47c11e2005-06-24 20:18:35 -070013094 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013095
13096 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
13097 if (err) {
David S. Millerf47c11e2005-06-24 20:18:35 -070013098 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013099
Michael Chan6a9eba12005-12-13 21:08:58 -080013100 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Michael Chanb9ec6c12006-07-25 16:37:27 -070013101 if (tg3_restart_hw(tp, 1))
13102 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013103
13104 tp->timer.expires = jiffies + tp->timer_offset;
13105 add_timer(&tp->timer);
13106
13107 netif_device_attach(dev);
13108 tg3_netif_start(tp);
13109
Michael Chanb9ec6c12006-07-25 16:37:27 -070013110out:
David S. Millerf47c11e2005-06-24 20:18:35 -070013111 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013112 }
13113
13114 return err;
13115}
13116
13117static int tg3_resume(struct pci_dev *pdev)
13118{
13119 struct net_device *dev = pci_get_drvdata(pdev);
13120 struct tg3 *tp = netdev_priv(dev);
13121 int err;
13122
Michael Chan3e0c95f2007-08-03 20:56:54 -070013123 pci_restore_state(tp->pdev);
13124
Linus Torvalds1da177e2005-04-16 15:20:36 -070013125 if (!netif_running(dev))
13126 return 0;
13127
Michael Chanbc1c7562006-03-20 17:48:03 -080013128 err = tg3_set_power_state(tp, PCI_D0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013129 if (err)
13130 return err;
13131
13132 netif_device_attach(dev);
13133
David S. Millerf47c11e2005-06-24 20:18:35 -070013134 tg3_full_lock(tp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013135
Michael Chan6a9eba12005-12-13 21:08:58 -080013136 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
Michael Chanb9ec6c12006-07-25 16:37:27 -070013137 err = tg3_restart_hw(tp, 1);
13138 if (err)
13139 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013140
13141 tp->timer.expires = jiffies + tp->timer_offset;
13142 add_timer(&tp->timer);
13143
Linus Torvalds1da177e2005-04-16 15:20:36 -070013144 tg3_netif_start(tp);
13145
Michael Chanb9ec6c12006-07-25 16:37:27 -070013146out:
David S. Millerf47c11e2005-06-24 20:18:35 -070013147 tg3_full_unlock(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013148
Michael Chanb9ec6c12006-07-25 16:37:27 -070013149 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070013150}
13151
13152static struct pci_driver tg3_driver = {
13153 .name = DRV_MODULE_NAME,
13154 .id_table = tg3_pci_tbl,
13155 .probe = tg3_init_one,
13156 .remove = __devexit_p(tg3_remove_one),
13157 .suspend = tg3_suspend,
13158 .resume = tg3_resume
13159};
13160
13161static int __init tg3_init(void)
13162{
Jeff Garzik29917622006-08-19 17:48:59 -040013163 return pci_register_driver(&tg3_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013164}
13165
13166static void __exit tg3_cleanup(void)
13167{
13168 pci_unregister_driver(&tg3_driver);
13169}
13170
13171module_init(tg3_init);
13172module_exit(tg3_cleanup);